partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
AirflowSecurityManager._sync_dag_view_permissions
|
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
|
airflow/www/security.py
|
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
|
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
|
[
"Set",
"the",
"access",
"policy",
"on",
"the",
"given",
"DAG",
"s",
"ViewModel",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L500-L560
|
[
"def",
"_sync_dag_view_permissions",
"(",
"self",
",",
"dag_id",
",",
"access_control",
")",
":",
"def",
"_get_or_create_dag_permission",
"(",
"perm_name",
")",
":",
"dag_perm",
"=",
"self",
".",
"find_permission_view_menu",
"(",
"perm_name",
",",
"dag_id",
")",
"if",
"not",
"dag_perm",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Creating new permission '%s' on view '%s'\"",
",",
"perm_name",
",",
"dag_id",
")",
"dag_perm",
"=",
"self",
".",
"add_permission_view_menu",
"(",
"perm_name",
",",
"dag_id",
")",
"return",
"dag_perm",
"def",
"_revoke_stale_permissions",
"(",
"dag_view",
")",
":",
"existing_dag_perms",
"=",
"self",
".",
"find_permissions_view_menu",
"(",
"dag_view",
")",
"for",
"perm",
"in",
"existing_dag_perms",
":",
"non_admin_roles",
"=",
"[",
"role",
"for",
"role",
"in",
"perm",
".",
"role",
"if",
"role",
".",
"name",
"!=",
"'Admin'",
"]",
"for",
"role",
"in",
"non_admin_roles",
":",
"target_perms_for_role",
"=",
"access_control",
".",
"get",
"(",
"role",
".",
"name",
",",
"{",
"}",
")",
"if",
"perm",
".",
"permission",
".",
"name",
"not",
"in",
"target_perms_for_role",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Revoking '%s' on DAG '%s' for role '%s'\"",
",",
"perm",
".",
"permission",
",",
"dag_id",
",",
"role",
".",
"name",
")",
"self",
".",
"del_permission_role",
"(",
"role",
",",
"perm",
")",
"dag_view",
"=",
"self",
".",
"find_view_menu",
"(",
"dag_id",
")",
"if",
"dag_view",
":",
"_revoke_stale_permissions",
"(",
"dag_view",
")",
"for",
"rolename",
",",
"perms",
"in",
"access_control",
".",
"items",
"(",
")",
":",
"role",
"=",
"self",
".",
"find_role",
"(",
"rolename",
")",
"if",
"not",
"role",
":",
"raise",
"AirflowException",
"(",
"\"The access_control mapping for DAG '{}' includes a role \"",
"\"named '{}', but that role does not exist\"",
".",
"format",
"(",
"dag_id",
",",
"rolename",
")",
")",
"perms",
"=",
"set",
"(",
"perms",
")",
"invalid_perms",
"=",
"perms",
"-",
"self",
".",
"DAG_PERMS",
"if",
"invalid_perms",
":",
"raise",
"AirflowException",
"(",
"\"The access_control map for DAG '{}' includes the following \"",
"\"invalid permissions: {}; The set of valid permissions \"",
"\"is: {}\"",
".",
"format",
"(",
"dag_id",
",",
"(",
"perms",
"-",
"self",
".",
"DAG_PERMS",
")",
",",
"self",
".",
"DAG_PERMS",
")",
")",
"for",
"perm_name",
"in",
"perms",
":",
"dag_perm",
"=",
"_get_or_create_dag_permission",
"(",
"perm_name",
")",
"self",
".",
"add_permission_role",
"(",
"role",
",",
"dag_perm",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.create_perm_vm_for_all_dag
|
Create perm-vm if not exist and insert into FAB security model for all-dags.
|
airflow/www/security.py
|
def create_perm_vm_for_all_dag(self):
"""
Create perm-vm if not exist and insert into FAB security model for all-dags.
"""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm,
view_menu_name=dag_vm)
|
def create_perm_vm_for_all_dag(self):
"""
Create perm-vm if not exist and insert into FAB security model for all-dags.
"""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm,
view_menu_name=dag_vm)
|
[
"Create",
"perm",
"-",
"vm",
"if",
"not",
"exist",
"and",
"insert",
"into",
"FAB",
"security",
"model",
"for",
"all",
"-",
"dags",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L562-L570
|
[
"def",
"create_perm_vm_for_all_dag",
"(",
"self",
")",
":",
"# create perm for global logical dag",
"for",
"dag_vm",
"in",
"self",
".",
"DAG_VMS",
":",
"for",
"perm",
"in",
"self",
".",
"DAG_PERMS",
":",
"self",
".",
"_merge_perm",
"(",
"permission_name",
"=",
"perm",
",",
"view_menu_name",
"=",
"dag_vm",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
get_fernet
|
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
|
airflow/models/crypto.py
|
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
"""
global _fernet
log = LoggingMixin().log
if _fernet:
return _fernet
try:
from cryptography.fernet import Fernet, MultiFernet, InvalidToken
global InvalidFernetToken
InvalidFernetToken = InvalidToken
except BuiltinImportError:
log.warning(
"cryptography not found - values will not be stored encrypted."
)
_fernet = NullFernet()
return _fernet
try:
fernet_key = configuration.conf.get('core', 'FERNET_KEY')
if not fernet_key:
log.warning(
"empty cryptography key - values will not be stored encrypted."
)
_fernet = NullFernet()
else:
_fernet = MultiFernet([
Fernet(fernet_part.encode('utf-8'))
for fernet_part in fernet_key.split(',')
])
_fernet.is_encrypted = True
except (ValueError, TypeError) as ve:
raise AirflowException("Could not create Fernet object: {}".format(ve))
return _fernet
|
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
"""
global _fernet
log = LoggingMixin().log
if _fernet:
return _fernet
try:
from cryptography.fernet import Fernet, MultiFernet, InvalidToken
global InvalidFernetToken
InvalidFernetToken = InvalidToken
except BuiltinImportError:
log.warning(
"cryptography not found - values will not be stored encrypted."
)
_fernet = NullFernet()
return _fernet
try:
fernet_key = configuration.conf.get('core', 'FERNET_KEY')
if not fernet_key:
log.warning(
"empty cryptography key - values will not be stored encrypted."
)
_fernet = NullFernet()
else:
_fernet = MultiFernet([
Fernet(fernet_part.encode('utf-8'))
for fernet_part in fernet_key.split(',')
])
_fernet.is_encrypted = True
except (ValueError, TypeError) as ve:
raise AirflowException("Could not create Fernet object: {}".format(ve))
return _fernet
|
[
"Deferred",
"load",
"of",
"Fernet",
"key",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/crypto.py#L54-L97
|
[
"def",
"get_fernet",
"(",
")",
":",
"global",
"_fernet",
"log",
"=",
"LoggingMixin",
"(",
")",
".",
"log",
"if",
"_fernet",
":",
"return",
"_fernet",
"try",
":",
"from",
"cryptography",
".",
"fernet",
"import",
"Fernet",
",",
"MultiFernet",
",",
"InvalidToken",
"global",
"InvalidFernetToken",
"InvalidFernetToken",
"=",
"InvalidToken",
"except",
"BuiltinImportError",
":",
"log",
".",
"warning",
"(",
"\"cryptography not found - values will not be stored encrypted.\"",
")",
"_fernet",
"=",
"NullFernet",
"(",
")",
"return",
"_fernet",
"try",
":",
"fernet_key",
"=",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'FERNET_KEY'",
")",
"if",
"not",
"fernet_key",
":",
"log",
".",
"warning",
"(",
"\"empty cryptography key - values will not be stored encrypted.\"",
")",
"_fernet",
"=",
"NullFernet",
"(",
")",
"else",
":",
"_fernet",
"=",
"MultiFernet",
"(",
"[",
"Fernet",
"(",
"fernet_part",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"for",
"fernet_part",
"in",
"fernet_key",
".",
"split",
"(",
"','",
")",
"]",
")",
"_fernet",
".",
"is_encrypted",
"=",
"True",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
"as",
"ve",
":",
"raise",
"AirflowException",
"(",
"\"Could not create Fernet object: {}\"",
".",
"format",
"(",
"ve",
")",
")",
"return",
"_fernet"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AwsGlueCatalogPartitionSensor.poke
|
Checks for existence of the partition in the AWS Glue Catalog table
|
airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py
|
def poke(self, context):
"""
Checks for existence of the partition in the AWS Glue Catalog table
"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(
self.database_name, self.table_name, self.expression)
|
def poke(self, context):
"""
Checks for existence of the partition in the AWS Glue Catalog table
"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(
self.database_name, self.table_name, self.expression)
|
[
"Checks",
"for",
"existence",
"of",
"the",
"partition",
"in",
"the",
"AWS",
"Glue",
"Catalog",
"table"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py#L70-L81
|
[
"def",
"poke",
"(",
"self",
",",
"context",
")",
":",
"if",
"'.'",
"in",
"self",
".",
"table_name",
":",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
"=",
"self",
".",
"table_name",
".",
"split",
"(",
"'.'",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Poking for table %s. %s, expression %s'",
",",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
",",
"self",
".",
"expression",
")",
"return",
"self",
".",
"get_hook",
"(",
")",
".",
"check_for_partition",
"(",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
",",
"self",
".",
"expression",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AwsGlueCatalogPartitionSensor.get_hook
|
Gets the AwsGlueCatalogHook
|
airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py
|
def get_hook(self):
"""
Gets the AwsGlueCatalogHook
"""
if not hasattr(self, 'hook'):
from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook
self.hook = AwsGlueCatalogHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name)
return self.hook
|
def get_hook(self):
"""
Gets the AwsGlueCatalogHook
"""
if not hasattr(self, 'hook'):
from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook
self.hook = AwsGlueCatalogHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name)
return self.hook
|
[
"Gets",
"the",
"AwsGlueCatalogHook"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py#L83-L93
|
[
"def",
"get_hook",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'hook'",
")",
":",
"from",
"airflow",
".",
"contrib",
".",
"hooks",
".",
"aws_glue_catalog_hook",
"import",
"AwsGlueCatalogHook",
"self",
".",
"hook",
"=",
"AwsGlueCatalogHook",
"(",
"aws_conn_id",
"=",
"self",
".",
"aws_conn_id",
",",
"region_name",
"=",
"self",
".",
"region_name",
")",
"return",
"self",
".",
"hook"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SQSSensor.poke
|
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
|
airflow/contrib/sensors/aws_sqs_sensor.py
|
def poke(self, context):
"""
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
"""
sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id)
sqs_conn = sqs_hook.get_conn()
self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue)
messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue,
MaxNumberOfMessages=self.max_messages,
WaitTimeSeconds=self.wait_time_seconds)
self.log.info("reveived message %s", str(messages))
if 'Messages' in messages and len(messages['Messages']) > 0:
entries = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']}
for message in messages['Messages']]
result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue,
Entries=entries)
if 'Successful' in result:
context['ti'].xcom_push(key='messages', value=messages)
return True
else:
raise AirflowException(
'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages))
return False
|
def poke(self, context):
"""
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
"""
sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id)
sqs_conn = sqs_hook.get_conn()
self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue)
messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue,
MaxNumberOfMessages=self.max_messages,
WaitTimeSeconds=self.wait_time_seconds)
self.log.info("reveived message %s", str(messages))
if 'Messages' in messages and len(messages['Messages']) > 0:
entries = [{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']}
for message in messages['Messages']]
result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue,
Entries=entries)
if 'Successful' in result:
context['ti'].xcom_push(key='messages', value=messages)
return True
else:
raise AirflowException(
'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages))
return False
|
[
"Check",
"for",
"message",
"on",
"subscribed",
"queue",
"and",
"write",
"to",
"xcom",
"the",
"message",
"with",
"key",
"messages"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/aws_sqs_sensor.py#L58-L93
|
[
"def",
"poke",
"(",
"self",
",",
"context",
")",
":",
"sqs_hook",
"=",
"SQSHook",
"(",
"aws_conn_id",
"=",
"self",
".",
"aws_conn_id",
")",
"sqs_conn",
"=",
"sqs_hook",
".",
"get_conn",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'SQSSensor checking for message on queue: %s'",
",",
"self",
".",
"sqs_queue",
")",
"messages",
"=",
"sqs_conn",
".",
"receive_message",
"(",
"QueueUrl",
"=",
"self",
".",
"sqs_queue",
",",
"MaxNumberOfMessages",
"=",
"self",
".",
"max_messages",
",",
"WaitTimeSeconds",
"=",
"self",
".",
"wait_time_seconds",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"reveived message %s\"",
",",
"str",
"(",
"messages",
")",
")",
"if",
"'Messages'",
"in",
"messages",
"and",
"len",
"(",
"messages",
"[",
"'Messages'",
"]",
")",
">",
"0",
":",
"entries",
"=",
"[",
"{",
"'Id'",
":",
"message",
"[",
"'MessageId'",
"]",
",",
"'ReceiptHandle'",
":",
"message",
"[",
"'ReceiptHandle'",
"]",
"}",
"for",
"message",
"in",
"messages",
"[",
"'Messages'",
"]",
"]",
"result",
"=",
"sqs_conn",
".",
"delete_message_batch",
"(",
"QueueUrl",
"=",
"self",
".",
"sqs_queue",
",",
"Entries",
"=",
"entries",
")",
"if",
"'Successful'",
"in",
"result",
":",
"context",
"[",
"'ti'",
"]",
".",
"xcom_push",
"(",
"key",
"=",
"'messages'",
",",
"value",
"=",
"messages",
")",
"return",
"True",
"else",
":",
"raise",
"AirflowException",
"(",
"'Delete SQS Messages failed '",
"+",
"str",
"(",
"result",
")",
"+",
"' for messages '",
"+",
"str",
"(",
"messages",
")",
")",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
tmp_configuration_copy
|
Returns a path for a temporary file including a full copy of the configuration
settings.
:return: a path to a temporary file
|
airflow/utils/configuration.py
|
def tmp_configuration_copy(chmod=0o600):
"""
Returns a path for a temporary file including a full copy of the configuration
settings.
:return: a path to a temporary file
"""
cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, 'w') as temp_file:
if chmod is not None:
os.fchmod(temp_fd, chmod)
json.dump(cfg_dict, temp_file)
return cfg_path
|
def tmp_configuration_copy(chmod=0o600):
"""
Returns a path for a temporary file including a full copy of the configuration
settings.
:return: a path to a temporary file
"""
cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, 'w') as temp_file:
if chmod is not None:
os.fchmod(temp_fd, chmod)
json.dump(cfg_dict, temp_file)
return cfg_path
|
[
"Returns",
"a",
"path",
"for",
"a",
"temporary",
"file",
"including",
"a",
"full",
"copy",
"of",
"the",
"configuration",
"settings",
".",
":",
"return",
":",
"a",
"path",
"to",
"a",
"temporary",
"file"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/configuration.py#L27-L41
|
[
"def",
"tmp_configuration_copy",
"(",
"chmod",
"=",
"0o600",
")",
":",
"cfg_dict",
"=",
"conf",
".",
"as_dict",
"(",
"display_sensitive",
"=",
"True",
",",
"raw",
"=",
"True",
")",
"temp_fd",
",",
"cfg_path",
"=",
"mkstemp",
"(",
")",
"with",
"os",
".",
"fdopen",
"(",
"temp_fd",
",",
"'w'",
")",
"as",
"temp_file",
":",
"if",
"chmod",
"is",
"not",
"None",
":",
"os",
".",
"fchmod",
"(",
"temp_fd",
",",
"chmod",
")",
"json",
".",
"dump",
"(",
"cfg_dict",
",",
"temp_file",
")",
"return",
"cfg_path"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HDFSHook.get_conn
|
Returns a snakebite HDFSClient object.
|
airflow/hooks/hdfs_hook.py
|
def get_conn(self):
"""
Returns a snakebite HDFSClient object.
"""
# When using HAClient, proxy_user must be the same, so is ok to always
# take the first.
effective_user = self.proxy_user
autoconfig = self.autoconfig
use_sasl = configuration.conf.get('core', 'security') == 'kerberos'
try:
connections = self.get_connections(self.hdfs_conn_id)
if not effective_user:
effective_user = connections[0].login
if not autoconfig:
autoconfig = connections[0].extra_dejson.get('autoconfig',
False)
hdfs_namenode_principal = connections[0].extra_dejson.get(
'hdfs_namenode_principal')
except AirflowException:
if not autoconfig:
raise
if autoconfig:
# will read config info from $HADOOP_HOME conf files
client = AutoConfigClient(effective_user=effective_user,
use_sasl=use_sasl)
elif len(connections) == 1:
client = Client(connections[0].host, connections[0].port,
effective_user=effective_user, use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn, effective_user=effective_user,
use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
else:
raise HDFSHookException("conn_id doesn't exist in the repository "
"and autoconfig is not specified")
return client
|
def get_conn(self):
"""
Returns a snakebite HDFSClient object.
"""
# When using HAClient, proxy_user must be the same, so is ok to always
# take the first.
effective_user = self.proxy_user
autoconfig = self.autoconfig
use_sasl = configuration.conf.get('core', 'security') == 'kerberos'
try:
connections = self.get_connections(self.hdfs_conn_id)
if not effective_user:
effective_user = connections[0].login
if not autoconfig:
autoconfig = connections[0].extra_dejson.get('autoconfig',
False)
hdfs_namenode_principal = connections[0].extra_dejson.get(
'hdfs_namenode_principal')
except AirflowException:
if not autoconfig:
raise
if autoconfig:
# will read config info from $HADOOP_HOME conf files
client = AutoConfigClient(effective_user=effective_user,
use_sasl=use_sasl)
elif len(connections) == 1:
client = Client(connections[0].host, connections[0].port,
effective_user=effective_user, use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn, effective_user=effective_user,
use_sasl=use_sasl,
hdfs_namenode_principal=hdfs_namenode_principal)
else:
raise HDFSHookException("conn_id doesn't exist in the repository "
"and autoconfig is not specified")
return client
|
[
"Returns",
"a",
"snakebite",
"HDFSClient",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hdfs_hook.py#L57-L98
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"# When using HAClient, proxy_user must be the same, so is ok to always",
"# take the first.",
"effective_user",
"=",
"self",
".",
"proxy_user",
"autoconfig",
"=",
"self",
".",
"autoconfig",
"use_sasl",
"=",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'security'",
")",
"==",
"'kerberos'",
"try",
":",
"connections",
"=",
"self",
".",
"get_connections",
"(",
"self",
".",
"hdfs_conn_id",
")",
"if",
"not",
"effective_user",
":",
"effective_user",
"=",
"connections",
"[",
"0",
"]",
".",
"login",
"if",
"not",
"autoconfig",
":",
"autoconfig",
"=",
"connections",
"[",
"0",
"]",
".",
"extra_dejson",
".",
"get",
"(",
"'autoconfig'",
",",
"False",
")",
"hdfs_namenode_principal",
"=",
"connections",
"[",
"0",
"]",
".",
"extra_dejson",
".",
"get",
"(",
"'hdfs_namenode_principal'",
")",
"except",
"AirflowException",
":",
"if",
"not",
"autoconfig",
":",
"raise",
"if",
"autoconfig",
":",
"# will read config info from $HADOOP_HOME conf files",
"client",
"=",
"AutoConfigClient",
"(",
"effective_user",
"=",
"effective_user",
",",
"use_sasl",
"=",
"use_sasl",
")",
"elif",
"len",
"(",
"connections",
")",
"==",
"1",
":",
"client",
"=",
"Client",
"(",
"connections",
"[",
"0",
"]",
".",
"host",
",",
"connections",
"[",
"0",
"]",
".",
"port",
",",
"effective_user",
"=",
"effective_user",
",",
"use_sasl",
"=",
"use_sasl",
",",
"hdfs_namenode_principal",
"=",
"hdfs_namenode_principal",
")",
"elif",
"len",
"(",
"connections",
")",
">",
"1",
":",
"nn",
"=",
"[",
"Namenode",
"(",
"conn",
".",
"host",
",",
"conn",
".",
"port",
")",
"for",
"conn",
"in",
"connections",
"]",
"client",
"=",
"HAClient",
"(",
"nn",
",",
"effective_user",
"=",
"effective_user",
",",
"use_sasl",
"=",
"use_sasl",
",",
"hdfs_namenode_principal",
"=",
"hdfs_namenode_principal",
")",
"else",
":",
"raise",
"HDFSHookException",
"(",
"\"conn_id doesn't exist in the repository \"",
"\"and autoconfig is not specified\"",
")",
"return",
"client"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WebHDFSHook.get_conn
|
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
|
airflow/hooks/webhdfs_hook.py
|
def get_conn(self):
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
"""
connections = self.get_connections(self.webhdfs_conn_id)
for connection in connections:
try:
self.log.debug('Trying namenode %s', connection.host)
client = self._get_client(connection)
client.status('/')
self.log.debug('Using namenode %s for hook', connection.host)
return client
except HdfsError as hdfs_error:
self.log.debug('Read operation on namenode %s failed with error: %s',
connection.host, hdfs_error)
hosts = [connection.host for connection in connections]
error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format(
hosts='\n'.join(hosts))
raise AirflowWebHDFSHookException(error_message)
|
def get_conn(self):
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
"""
connections = self.get_connections(self.webhdfs_conn_id)
for connection in connections:
try:
self.log.debug('Trying namenode %s', connection.host)
client = self._get_client(connection)
client.status('/')
self.log.debug('Using namenode %s for hook', connection.host)
return client
except HdfsError as hdfs_error:
self.log.debug('Read operation on namenode %s failed with error: %s',
connection.host, hdfs_error)
hosts = [connection.host for connection in connections]
error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format(
hosts='\n'.join(hosts))
raise AirflowWebHDFSHookException(error_message)
|
[
"Establishes",
"a",
"connection",
"depending",
"on",
"the",
"security",
"mode",
"set",
"via",
"config",
"or",
"environment",
"variable",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L56-L79
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"connections",
"=",
"self",
".",
"get_connections",
"(",
"self",
".",
"webhdfs_conn_id",
")",
"for",
"connection",
"in",
"connections",
":",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Trying namenode %s'",
",",
"connection",
".",
"host",
")",
"client",
"=",
"self",
".",
"_get_client",
"(",
"connection",
")",
"client",
".",
"status",
"(",
"'/'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Using namenode %s for hook'",
",",
"connection",
".",
"host",
")",
"return",
"client",
"except",
"HdfsError",
"as",
"hdfs_error",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Read operation on namenode %s failed with error: %s'",
",",
"connection",
".",
"host",
",",
"hdfs_error",
")",
"hosts",
"=",
"[",
"connection",
".",
"host",
"for",
"connection",
"in",
"connections",
"]",
"error_message",
"=",
"'Read operations failed on the namenodes below:\\n{hosts}'",
".",
"format",
"(",
"hosts",
"=",
"'\\n'",
".",
"join",
"(",
"hosts",
")",
")",
"raise",
"AirflowWebHDFSHookException",
"(",
"error_message",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WebHDFSHook.check_for_path
|
Check for the existence of a path in HDFS by querying FileStatus.
:param hdfs_path: The path to check.
:type hdfs_path: str
:return: True if the path exists and False if not.
:rtype: bool
|
airflow/hooks/webhdfs_hook.py
|
def check_for_path(self, hdfs_path):
"""
Check for the existence of a path in HDFS by querying FileStatus.
:param hdfs_path: The path to check.
:type hdfs_path: str
:return: True if the path exists and False if not.
:rtype: bool
"""
conn = self.get_conn()
status = conn.status(hdfs_path, strict=False)
return bool(status)
|
def check_for_path(self, hdfs_path):
"""
Check for the existence of a path in HDFS by querying FileStatus.
:param hdfs_path: The path to check.
:type hdfs_path: str
:return: True if the path exists and False if not.
:rtype: bool
"""
conn = self.get_conn()
status = conn.status(hdfs_path, strict=False)
return bool(status)
|
[
"Check",
"for",
"the",
"existence",
"of",
"a",
"path",
"in",
"HDFS",
"by",
"querying",
"FileStatus",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L92-L104
|
[
"def",
"check_for_path",
"(",
"self",
",",
"hdfs_path",
")",
":",
"conn",
"=",
"self",
".",
"get_conn",
"(",
")",
"status",
"=",
"conn",
".",
"status",
"(",
"hdfs_path",
",",
"strict",
"=",
"False",
")",
"return",
"bool",
"(",
"status",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WebHDFSHook.load_file
|
r"""
Uploads a file to HDFS.
:param source: Local path to file or folder.
If it's a folder, all the files inside of it will be uploaded.
.. note:: This implies that folders empty of files will not be created remotely.
:type source: str
:param destination: PTarget HDFS path.
If it already exists and is a directory, files will be uploaded inside.
:type destination: str
:param overwrite: Overwrite any existing file or directory.
:type overwrite: bool
:param parallelism: Number of threads to use for parallelization.
A value of `0` (or negative) uses as many threads as there are files.
:type parallelism: int
:param \**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.
|
airflow/hooks/webhdfs_hook.py
|
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs):
r"""
Uploads a file to HDFS.
:param source: Local path to file or folder.
If it's a folder, all the files inside of it will be uploaded.
.. note:: This implies that folders empty of files will not be created remotely.
:type source: str
:param destination: PTarget HDFS path.
If it already exists and is a directory, files will be uploaded inside.
:type destination: str
:param overwrite: Overwrite any existing file or directory.
:type overwrite: bool
:param parallelism: Number of threads to use for parallelization.
A value of `0` (or negative) uses as many threads as there are files.
:type parallelism: int
:param \**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.
"""
conn = self.get_conn()
conn.upload(hdfs_path=destination,
local_path=source,
overwrite=overwrite,
n_threads=parallelism,
**kwargs)
self.log.debug("Uploaded file %s to %s", source, destination)
|
def load_file(self, source, destination, overwrite=True, parallelism=1, **kwargs):
r"""
Uploads a file to HDFS.
:param source: Local path to file or folder.
If it's a folder, all the files inside of it will be uploaded.
.. note:: This implies that folders empty of files will not be created remotely.
:type source: str
:param destination: PTarget HDFS path.
If it already exists and is a directory, files will be uploaded inside.
:type destination: str
:param overwrite: Overwrite any existing file or directory.
:type overwrite: bool
:param parallelism: Number of threads to use for parallelization.
A value of `0` (or negative) uses as many threads as there are files.
:type parallelism: int
:param \**kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.
"""
conn = self.get_conn()
conn.upload(hdfs_path=destination,
local_path=source,
overwrite=overwrite,
n_threads=parallelism,
**kwargs)
self.log.debug("Uploaded file %s to %s", source, destination)
|
[
"r",
"Uploads",
"a",
"file",
"to",
"HDFS",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L106-L132
|
[
"def",
"load_file",
"(",
"self",
",",
"source",
",",
"destination",
",",
"overwrite",
"=",
"True",
",",
"parallelism",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"self",
".",
"get_conn",
"(",
")",
"conn",
".",
"upload",
"(",
"hdfs_path",
"=",
"destination",
",",
"local_path",
"=",
"source",
",",
"overwrite",
"=",
"overwrite",
",",
"n_threads",
"=",
"parallelism",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Uploaded file %s to %s\"",
",",
"source",
",",
"destination",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
PinotDbApiHook.get_conn
|
Establish a connection to pinot broker through pinot dbqpi.
|
airflow/contrib/hooks/pinot_hook.py
|
def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbqpi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn
|
def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbqpi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn
|
[
"Establish",
"a",
"connection",
"to",
"pinot",
"broker",
"through",
"pinot",
"dbqpi",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L36-L49
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"pinot_broker_conn_id",
")",
"pinot_broker_conn",
"=",
"connect",
"(",
"host",
"=",
"conn",
".",
"host",
",",
"port",
"=",
"conn",
".",
"port",
",",
"path",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'endpoint'",
",",
"'/pql'",
")",
",",
"scheme",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'schema'",
",",
"'http'",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Get the connection to pinot '",
"'broker on {host}'",
".",
"format",
"(",
"host",
"=",
"conn",
".",
"host",
")",
")",
"return",
"pinot_broker_conn"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
PinotDbApiHook.get_uri
|
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
|
airflow/contrib/hooks/pinot_hook.py
|
def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
|
def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
|
[
"Get",
"the",
"connection",
"uri",
"for",
"pinot",
"broker",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L51-L64
|
[
"def",
"get_uri",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"getattr",
"(",
"self",
",",
"self",
".",
"conn_name_attr",
")",
")",
"host",
"=",
"conn",
".",
"host",
"if",
"conn",
".",
"port",
"is",
"not",
"None",
":",
"host",
"+=",
"':{port}'",
".",
"format",
"(",
"port",
"=",
"conn",
".",
"port",
")",
"conn_type",
"=",
"'http'",
"if",
"not",
"conn",
".",
"conn_type",
"else",
"conn",
".",
"conn_type",
"endpoint",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'endpoint'",
",",
"'pql'",
")",
"return",
"'{conn_type}://{host}/{endpoint}'",
".",
"format",
"(",
"conn_type",
"=",
"conn_type",
",",
"host",
"=",
"host",
",",
"endpoint",
"=",
"endpoint",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
PinotDbApiHook.get_records
|
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
|
airflow/contrib/hooks/pinot_hook.py
|
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
|
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
|
[
"Executes",
"the",
"sql",
"and",
"returns",
"a",
"set",
"of",
"records",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L66-L76
|
[
"def",
"get_records",
"(",
"self",
",",
"sql",
")",
":",
"with",
"self",
".",
"get_conn",
"(",
")",
"as",
"cur",
":",
"cur",
".",
"execute",
"(",
"sql",
")",
"return",
"cur",
".",
"fetchall",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
PinotDbApiHook.get_first
|
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
|
airflow/contrib/hooks/pinot_hook.py
|
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
|
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
|
[
"Executes",
"the",
"sql",
"and",
"returns",
"the",
"first",
"resulting",
"row",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L78-L88
|
[
"def",
"get_first",
"(",
"self",
",",
"sql",
")",
":",
"with",
"self",
".",
"get_conn",
"(",
")",
"as",
"cur",
":",
"cur",
".",
"execute",
"(",
"sql",
")",
"return",
"cur",
".",
"fetchone",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
smart_truncate
|
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
|
airflow/_vendor/slugify/slugify.py
|
def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False):
"""
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
"""
string = string.strip(separator)
if not max_length:
return string
if len(string) < max_length:
return string
if not word_boundary:
return string[:max_length].strip(separator)
if separator not in string:
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = len(truncated) + len(word)
if next_len < max_length:
truncated += '{0}{1}'.format(word, separator)
elif next_len == max_length:
truncated += '{0}'.format(word)
break
else:
if save_order:
break
if not truncated: # pragma: no cover
truncated = string[:max_length]
return truncated.strip(separator)
|
def smart_truncate(string, max_length=0, word_boundary=False, separator=' ', save_order=False):
"""
Truncate a string.
:param string (str): string for modification
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if True then word order of output string is like input string
:param separator (str): separator between words
:return:
"""
string = string.strip(separator)
if not max_length:
return string
if len(string) < max_length:
return string
if not word_boundary:
return string[:max_length].strip(separator)
if separator not in string:
return string[:max_length]
truncated = ''
for word in string.split(separator):
if word:
next_len = len(truncated) + len(word)
if next_len < max_length:
truncated += '{0}{1}'.format(word, separator)
elif next_len == max_length:
truncated += '{0}'.format(word)
break
else:
if save_order:
break
if not truncated: # pragma: no cover
truncated = string[:max_length]
return truncated.strip(separator)
|
[
"Truncate",
"a",
"string",
".",
":",
"param",
"string",
"(",
"str",
")",
":",
"string",
"for",
"modification",
":",
"param",
"max_length",
"(",
"int",
")",
":",
"output",
"string",
"length",
":",
"param",
"word_boundary",
"(",
"bool",
")",
":",
":",
"param",
"save_order",
"(",
"bool",
")",
":",
"if",
"True",
"then",
"word",
"order",
"of",
"output",
"string",
"is",
"like",
"input",
"string",
":",
"param",
"separator",
"(",
"str",
")",
":",
"separator",
"between",
"words",
":",
"return",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/slugify/slugify.py#L32-L71
|
[
"def",
"smart_truncate",
"(",
"string",
",",
"max_length",
"=",
"0",
",",
"word_boundary",
"=",
"False",
",",
"separator",
"=",
"' '",
",",
"save_order",
"=",
"False",
")",
":",
"string",
"=",
"string",
".",
"strip",
"(",
"separator",
")",
"if",
"not",
"max_length",
":",
"return",
"string",
"if",
"len",
"(",
"string",
")",
"<",
"max_length",
":",
"return",
"string",
"if",
"not",
"word_boundary",
":",
"return",
"string",
"[",
":",
"max_length",
"]",
".",
"strip",
"(",
"separator",
")",
"if",
"separator",
"not",
"in",
"string",
":",
"return",
"string",
"[",
":",
"max_length",
"]",
"truncated",
"=",
"''",
"for",
"word",
"in",
"string",
".",
"split",
"(",
"separator",
")",
":",
"if",
"word",
":",
"next_len",
"=",
"len",
"(",
"truncated",
")",
"+",
"len",
"(",
"word",
")",
"if",
"next_len",
"<",
"max_length",
":",
"truncated",
"+=",
"'{0}{1}'",
".",
"format",
"(",
"word",
",",
"separator",
")",
"elif",
"next_len",
"==",
"max_length",
":",
"truncated",
"+=",
"'{0}'",
".",
"format",
"(",
"word",
")",
"break",
"else",
":",
"if",
"save_order",
":",
"break",
"if",
"not",
"truncated",
":",
"# pragma: no cover",
"truncated",
"=",
"string",
"[",
":",
"max_length",
"]",
"return",
"truncated",
".",
"strip",
"(",
"separator",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
slugify
|
Make a slug from the given text.
:param text (str): initial text
:param entities (bool):
:param decimal (bool):
:param hexadecimal (bool):
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if parameter is True and max_length > 0 return whole words in the initial order
:param separator (str): separator between words
:param stopwords (iterable): words to discount
:param regex_pattern (str): regex pattern for allowed characters
:param lowercase (bool): activate case sensitivity by setting it to False
:param replacements (iterable): list of replacement rules e.g. [['|', 'or'], ['%', 'percent']]
:return (str):
|
airflow/_vendor/slugify/slugify.py
|
def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False,
separator=DEFAULT_SEPARATOR, save_order=False, stopwords=(), regex_pattern=None, lowercase=True,
replacements=()):
"""
Make a slug from the given text.
:param text (str): initial text
:param entities (bool):
:param decimal (bool):
:param hexadecimal (bool):
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if parameter is True and max_length > 0 return whole words in the initial order
:param separator (str): separator between words
:param stopwords (iterable): words to discount
:param regex_pattern (str): regex pattern for allowed characters
:param lowercase (bool): activate case sensitivity by setting it to False
:param replacements (iterable): list of replacement rules e.g. [['|', 'or'], ['%', 'percent']]
:return (str):
"""
# user-specific replacements
if replacements:
for old, new in replacements:
text = text.replace(old, new)
# ensure text is unicode
if not isinstance(text, _unicode_type):
text = _unicode(text, 'utf-8', 'ignore')
# replace quotes with dashes - pre-process
text = QUOTE_PATTERN.sub(DEFAULT_SEPARATOR, text)
# decode unicode
text = unidecode.unidecode(text)
# ensure text is still in unicode
if not isinstance(text, _unicode_type):
text = _unicode(text, 'utf-8', 'ignore')
# character entity reference
if entities:
text = CHAR_ENTITY_PATTERN.sub(lambda m: unichr(name2codepoint[m.group(1)]), text)
# decimal character reference
if decimal:
try:
text = DECIMAL_PATTERN.sub(lambda m: unichr(int(m.group(1))), text)
except Exception:
pass
# hexadecimal character reference
if hexadecimal:
try:
text = HEX_PATTERN.sub(lambda m: unichr(int(m.group(1), 16)), text)
except Exception:
pass
# translate
text = unicodedata.normalize('NFKD', text)
# make the text lowercase (optional)
if lowercase:
text = text.lower()
# remove generated quotes -- post-process
text = QUOTE_PATTERN.sub('', text)
# cleanup numbers
text = NUMBERS_PATTERN.sub('', text)
# replace all other unwanted characters
if lowercase:
pattern = regex_pattern or ALLOWED_CHARS_PATTERN
else:
pattern = regex_pattern or ALLOWED_CHARS_PATTERN_WITH_UPPERCASE
text = re.sub(pattern, DEFAULT_SEPARATOR, text)
# remove redundant
text = DUPLICATE_DASH_PATTERN.sub(DEFAULT_SEPARATOR, text).strip(DEFAULT_SEPARATOR)
# remove stopwords
if stopwords:
if lowercase:
stopwords_lower = [s.lower() for s in stopwords]
words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords_lower]
else:
words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords]
text = DEFAULT_SEPARATOR.join(words)
# finalize user-specific replacements
if replacements:
for old, new in replacements:
text = text.replace(old, new)
# smart truncate if requested
if max_length > 0:
text = smart_truncate(text, max_length, word_boundary, DEFAULT_SEPARATOR, save_order)
if separator != DEFAULT_SEPARATOR:
text = text.replace(DEFAULT_SEPARATOR, separator)
return text
|
def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False,
separator=DEFAULT_SEPARATOR, save_order=False, stopwords=(), regex_pattern=None, lowercase=True,
replacements=()):
"""
Make a slug from the given text.
:param text (str): initial text
:param entities (bool):
:param decimal (bool):
:param hexadecimal (bool):
:param max_length (int): output string length
:param word_boundary (bool):
:param save_order (bool): if parameter is True and max_length > 0 return whole words in the initial order
:param separator (str): separator between words
:param stopwords (iterable): words to discount
:param regex_pattern (str): regex pattern for allowed characters
:param lowercase (bool): activate case sensitivity by setting it to False
:param replacements (iterable): list of replacement rules e.g. [['|', 'or'], ['%', 'percent']]
:return (str):
"""
# user-specific replacements
if replacements:
for old, new in replacements:
text = text.replace(old, new)
# ensure text is unicode
if not isinstance(text, _unicode_type):
text = _unicode(text, 'utf-8', 'ignore')
# replace quotes with dashes - pre-process
text = QUOTE_PATTERN.sub(DEFAULT_SEPARATOR, text)
# decode unicode
text = unidecode.unidecode(text)
# ensure text is still in unicode
if not isinstance(text, _unicode_type):
text = _unicode(text, 'utf-8', 'ignore')
# character entity reference
if entities:
text = CHAR_ENTITY_PATTERN.sub(lambda m: unichr(name2codepoint[m.group(1)]), text)
# decimal character reference
if decimal:
try:
text = DECIMAL_PATTERN.sub(lambda m: unichr(int(m.group(1))), text)
except Exception:
pass
# hexadecimal character reference
if hexadecimal:
try:
text = HEX_PATTERN.sub(lambda m: unichr(int(m.group(1), 16)), text)
except Exception:
pass
# translate
text = unicodedata.normalize('NFKD', text)
# make the text lowercase (optional)
if lowercase:
text = text.lower()
# remove generated quotes -- post-process
text = QUOTE_PATTERN.sub('', text)
# cleanup numbers
text = NUMBERS_PATTERN.sub('', text)
# replace all other unwanted characters
if lowercase:
pattern = regex_pattern or ALLOWED_CHARS_PATTERN
else:
pattern = regex_pattern or ALLOWED_CHARS_PATTERN_WITH_UPPERCASE
text = re.sub(pattern, DEFAULT_SEPARATOR, text)
# remove redundant
text = DUPLICATE_DASH_PATTERN.sub(DEFAULT_SEPARATOR, text).strip(DEFAULT_SEPARATOR)
# remove stopwords
if stopwords:
if lowercase:
stopwords_lower = [s.lower() for s in stopwords]
words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords_lower]
else:
words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords]
text = DEFAULT_SEPARATOR.join(words)
# finalize user-specific replacements
if replacements:
for old, new in replacements:
text = text.replace(old, new)
# smart truncate if requested
if max_length > 0:
text = smart_truncate(text, max_length, word_boundary, DEFAULT_SEPARATOR, save_order)
if separator != DEFAULT_SEPARATOR:
text = text.replace(DEFAULT_SEPARATOR, separator)
return text
|
[
"Make",
"a",
"slug",
"from",
"the",
"given",
"text",
".",
":",
"param",
"text",
"(",
"str",
")",
":",
"initial",
"text",
":",
"param",
"entities",
"(",
"bool",
")",
":",
":",
"param",
"decimal",
"(",
"bool",
")",
":",
":",
"param",
"hexadecimal",
"(",
"bool",
")",
":",
":",
"param",
"max_length",
"(",
"int",
")",
":",
"output",
"string",
"length",
":",
"param",
"word_boundary",
"(",
"bool",
")",
":",
":",
"param",
"save_order",
"(",
"bool",
")",
":",
"if",
"parameter",
"is",
"True",
"and",
"max_length",
">",
"0",
"return",
"whole",
"words",
"in",
"the",
"initial",
"order",
":",
"param",
"separator",
"(",
"str",
")",
":",
"separator",
"between",
"words",
":",
"param",
"stopwords",
"(",
"iterable",
")",
":",
"words",
"to",
"discount",
":",
"param",
"regex_pattern",
"(",
"str",
")",
":",
"regex",
"pattern",
"for",
"allowed",
"characters",
":",
"param",
"lowercase",
"(",
"bool",
")",
":",
"activate",
"case",
"sensitivity",
"by",
"setting",
"it",
"to",
"False",
":",
"param",
"replacements",
"(",
"iterable",
")",
":",
"list",
"of",
"replacement",
"rules",
"e",
".",
"g",
".",
"[[",
"|",
"or",
"]",
"[",
"%",
"percent",
"]]",
":",
"return",
"(",
"str",
")",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/slugify/slugify.py#L74-L175
|
[
"def",
"slugify",
"(",
"text",
",",
"entities",
"=",
"True",
",",
"decimal",
"=",
"True",
",",
"hexadecimal",
"=",
"True",
",",
"max_length",
"=",
"0",
",",
"word_boundary",
"=",
"False",
",",
"separator",
"=",
"DEFAULT_SEPARATOR",
",",
"save_order",
"=",
"False",
",",
"stopwords",
"=",
"(",
")",
",",
"regex_pattern",
"=",
"None",
",",
"lowercase",
"=",
"True",
",",
"replacements",
"=",
"(",
")",
")",
":",
"# user-specific replacements",
"if",
"replacements",
":",
"for",
"old",
",",
"new",
"in",
"replacements",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"old",
",",
"new",
")",
"# ensure text is unicode",
"if",
"not",
"isinstance",
"(",
"text",
",",
"_unicode_type",
")",
":",
"text",
"=",
"_unicode",
"(",
"text",
",",
"'utf-8'",
",",
"'ignore'",
")",
"# replace quotes with dashes - pre-process",
"text",
"=",
"QUOTE_PATTERN",
".",
"sub",
"(",
"DEFAULT_SEPARATOR",
",",
"text",
")",
"# decode unicode",
"text",
"=",
"unidecode",
".",
"unidecode",
"(",
"text",
")",
"# ensure text is still in unicode",
"if",
"not",
"isinstance",
"(",
"text",
",",
"_unicode_type",
")",
":",
"text",
"=",
"_unicode",
"(",
"text",
",",
"'utf-8'",
",",
"'ignore'",
")",
"# character entity reference",
"if",
"entities",
":",
"text",
"=",
"CHAR_ENTITY_PATTERN",
".",
"sub",
"(",
"lambda",
"m",
":",
"unichr",
"(",
"name2codepoint",
"[",
"m",
".",
"group",
"(",
"1",
")",
"]",
")",
",",
"text",
")",
"# decimal character reference",
"if",
"decimal",
":",
"try",
":",
"text",
"=",
"DECIMAL_PATTERN",
".",
"sub",
"(",
"lambda",
"m",
":",
"unichr",
"(",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
")",
",",
"text",
")",
"except",
"Exception",
":",
"pass",
"# hexadecimal character reference",
"if",
"hexadecimal",
":",
"try",
":",
"text",
"=",
"HEX_PATTERN",
".",
"sub",
"(",
"lambda",
"m",
":",
"unichr",
"(",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"16",
")",
")",
",",
"text",
")",
"except",
"Exception",
":",
"pass",
"# translate",
"text",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"text",
")",
"# make the text lowercase (optional)",
"if",
"lowercase",
":",
"text",
"=",
"text",
".",
"lower",
"(",
")",
"# remove generated quotes -- post-process",
"text",
"=",
"QUOTE_PATTERN",
".",
"sub",
"(",
"''",
",",
"text",
")",
"# cleanup numbers",
"text",
"=",
"NUMBERS_PATTERN",
".",
"sub",
"(",
"''",
",",
"text",
")",
"# replace all other unwanted characters",
"if",
"lowercase",
":",
"pattern",
"=",
"regex_pattern",
"or",
"ALLOWED_CHARS_PATTERN",
"else",
":",
"pattern",
"=",
"regex_pattern",
"or",
"ALLOWED_CHARS_PATTERN_WITH_UPPERCASE",
"text",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"DEFAULT_SEPARATOR",
",",
"text",
")",
"# remove redundant",
"text",
"=",
"DUPLICATE_DASH_PATTERN",
".",
"sub",
"(",
"DEFAULT_SEPARATOR",
",",
"text",
")",
".",
"strip",
"(",
"DEFAULT_SEPARATOR",
")",
"# remove stopwords",
"if",
"stopwords",
":",
"if",
"lowercase",
":",
"stopwords_lower",
"=",
"[",
"s",
".",
"lower",
"(",
")",
"for",
"s",
"in",
"stopwords",
"]",
"words",
"=",
"[",
"w",
"for",
"w",
"in",
"text",
".",
"split",
"(",
"DEFAULT_SEPARATOR",
")",
"if",
"w",
"not",
"in",
"stopwords_lower",
"]",
"else",
":",
"words",
"=",
"[",
"w",
"for",
"w",
"in",
"text",
".",
"split",
"(",
"DEFAULT_SEPARATOR",
")",
"if",
"w",
"not",
"in",
"stopwords",
"]",
"text",
"=",
"DEFAULT_SEPARATOR",
".",
"join",
"(",
"words",
")",
"# finalize user-specific replacements",
"if",
"replacements",
":",
"for",
"old",
",",
"new",
"in",
"replacements",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"old",
",",
"new",
")",
"# smart truncate if requested",
"if",
"max_length",
">",
"0",
":",
"text",
"=",
"smart_truncate",
"(",
"text",
",",
"max_length",
",",
"word_boundary",
",",
"DEFAULT_SEPARATOR",
",",
"save_order",
")",
"if",
"separator",
"!=",
"DEFAULT_SEPARATOR",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"DEFAULT_SEPARATOR",
",",
"separator",
")",
"return",
"text"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
XCom.set
|
Store an XCom value.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: None
|
airflow/models/xcom.py
|
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
session=None):
"""
Store an XCom value.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: None
"""
session.expunge_all()
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
value = pickle.dumps(value)
else:
try:
value = json.dumps(value).encode('UTF-8')
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
|
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
session=None):
"""
Store an XCom value.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: None
"""
session.expunge_all()
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
value = pickle.dumps(value)
else:
try:
value = json.dumps(value).encode('UTF-8')
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
|
[
"Store",
"an",
"XCom",
"value",
".",
"TODO",
":",
"pickling",
"has",
"been",
"deprecated",
"and",
"JSON",
"is",
"preferred",
".",
"pickling",
"will",
"be",
"removed",
"in",
"Airflow",
"2",
".",
"0",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L88-L136
|
[
"def",
"set",
"(",
"cls",
",",
"key",
",",
"value",
",",
"execution_date",
",",
"task_id",
",",
"dag_id",
",",
"session",
"=",
"None",
")",
":",
"session",
".",
"expunge_all",
"(",
")",
"enable_pickling",
"=",
"configuration",
".",
"getboolean",
"(",
"'core'",
",",
"'enable_xcom_pickling'",
")",
"if",
"enable_pickling",
":",
"value",
"=",
"pickle",
".",
"dumps",
"(",
"value",
")",
"else",
":",
"try",
":",
"value",
"=",
"json",
".",
"dumps",
"(",
"value",
")",
".",
"encode",
"(",
"'UTF-8'",
")",
"except",
"ValueError",
":",
"log",
"=",
"LoggingMixin",
"(",
")",
".",
"log",
"log",
".",
"error",
"(",
"\"Could not serialize the XCOM value into JSON. \"",
"\"If you are using pickles instead of JSON \"",
"\"for XCOM, then you need to enable pickle \"",
"\"support for XCOM in your airflow config.\"",
")",
"raise",
"# remove any duplicate XComs",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter",
"(",
"cls",
".",
"key",
"==",
"key",
",",
"cls",
".",
"execution_date",
"==",
"execution_date",
",",
"cls",
".",
"task_id",
"==",
"task_id",
",",
"cls",
".",
"dag_id",
"==",
"dag_id",
")",
".",
"delete",
"(",
")",
"session",
".",
"commit",
"(",
")",
"# insert new XCom",
"session",
".",
"add",
"(",
"XCom",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
",",
"execution_date",
"=",
"execution_date",
",",
"task_id",
"=",
"task_id",
",",
"dag_id",
"=",
"dag_id",
")",
")",
"session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
XCom.get_one
|
Retrieve an XCom value, optionally meeting certain criteria.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: XCom value
|
airflow/models/xcom.py
|
def get_one(cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: XCom value
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc()))
result = query.first()
if result:
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
return pickle.loads(result.value)
else:
try:
return json.loads(result.value.decode('UTF-8'))
except ValueError:
log = LoggingMixin().log
log.error("Could not deserialize the XCOM value from JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
|
def get_one(cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: XCom value
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc()))
result = query.first()
if result:
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
return pickle.loads(result.value)
else:
try:
return json.loads(result.value.decode('UTF-8'))
except ValueError:
log = LoggingMixin().log
log.error("Could not deserialize the XCOM value from JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
|
[
"Retrieve",
"an",
"XCom",
"value",
"optionally",
"meeting",
"certain",
"criteria",
".",
"TODO",
":",
"pickling",
"has",
"been",
"deprecated",
"and",
"JSON",
"is",
"preferred",
".",
"pickling",
"will",
"be",
"removed",
"in",
"Airflow",
"2",
".",
"0",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L140-L184
|
[
"def",
"get_one",
"(",
"cls",
",",
"execution_date",
",",
"key",
"=",
"None",
",",
"task_id",
"=",
"None",
",",
"dag_id",
"=",
"None",
",",
"include_prior_dates",
"=",
"False",
",",
"session",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"]",
"if",
"key",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"key",
"==",
"key",
")",
"if",
"task_id",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"task_id",
"==",
"task_id",
")",
"if",
"dag_id",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"dag_id",
"==",
"dag_id",
")",
"if",
"include_prior_dates",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"execution_date",
"<=",
"execution_date",
")",
"else",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"execution_date",
"==",
"execution_date",
")",
"query",
"=",
"(",
"session",
".",
"query",
"(",
"cls",
".",
"value",
")",
".",
"filter",
"(",
"and_",
"(",
"*",
"filters",
")",
")",
".",
"order_by",
"(",
"cls",
".",
"execution_date",
".",
"desc",
"(",
")",
",",
"cls",
".",
"timestamp",
".",
"desc",
"(",
")",
")",
")",
"result",
"=",
"query",
".",
"first",
"(",
")",
"if",
"result",
":",
"enable_pickling",
"=",
"configuration",
".",
"getboolean",
"(",
"'core'",
",",
"'enable_xcom_pickling'",
")",
"if",
"enable_pickling",
":",
"return",
"pickle",
".",
"loads",
"(",
"result",
".",
"value",
")",
"else",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"result",
".",
"value",
".",
"decode",
"(",
"'UTF-8'",
")",
")",
"except",
"ValueError",
":",
"log",
"=",
"LoggingMixin",
"(",
")",
".",
"log",
"log",
".",
"error",
"(",
"\"Could not deserialize the XCOM value from JSON. \"",
"\"If you are using pickles instead of JSON \"",
"\"for XCOM, then you need to enable pickle \"",
"\"support for XCOM in your airflow config.\"",
")",
"raise"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
XCom.get_many
|
Retrieve an XCom value, optionally meeting certain criteria
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
|
airflow/models/xcom.py
|
def get_many(cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
results = query.all()
return results
|
def get_many(cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
results = query.all()
return results
|
[
"Retrieve",
"an",
"XCom",
"value",
"optionally",
"meeting",
"certain",
"criteria",
"TODO",
":",
"pickling",
"has",
"been",
"deprecated",
"and",
"JSON",
"is",
"preferred",
".",
"pickling",
"will",
"be",
"removed",
"in",
"Airflow",
"2",
".",
"0",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L188-L218
|
[
"def",
"get_many",
"(",
"cls",
",",
"execution_date",
",",
"key",
"=",
"None",
",",
"task_ids",
"=",
"None",
",",
"dag_ids",
"=",
"None",
",",
"include_prior_dates",
"=",
"False",
",",
"limit",
"=",
"100",
",",
"session",
"=",
"None",
")",
":",
"filters",
"=",
"[",
"]",
"if",
"key",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"key",
"==",
"key",
")",
"if",
"task_ids",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"task_id",
".",
"in_",
"(",
"as_tuple",
"(",
"task_ids",
")",
")",
")",
"if",
"dag_ids",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"dag_id",
".",
"in_",
"(",
"as_tuple",
"(",
"dag_ids",
")",
")",
")",
"if",
"include_prior_dates",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"execution_date",
"<=",
"execution_date",
")",
"else",
":",
"filters",
".",
"append",
"(",
"cls",
".",
"execution_date",
"==",
"execution_date",
")",
"query",
"=",
"(",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter",
"(",
"and_",
"(",
"*",
"filters",
")",
")",
".",
"order_by",
"(",
"cls",
".",
"execution_date",
".",
"desc",
"(",
")",
",",
"cls",
".",
"timestamp",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"limit",
")",
")",
"results",
"=",
"query",
".",
"all",
"(",
")",
"return",
"results"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
TransferJobPreprocessor._convert_date_to_dict
|
Convert native python ``datetime.date`` object to a format supported by the API
|
airflow/contrib/operators/gcp_transfer_operator.py
|
def _convert_date_to_dict(field_date):
"""
Convert native python ``datetime.date`` object to a format supported by the API
"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
|
def _convert_date_to_dict(field_date):
"""
Convert native python ``datetime.date`` object to a format supported by the API
"""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
|
[
"Convert",
"native",
"python",
"datetime",
".",
"date",
"object",
"to",
"a",
"format",
"supported",
"by",
"the",
"API"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_transfer_operator.py#L106-L110
|
[
"def",
"_convert_date_to_dict",
"(",
"field_date",
")",
":",
"return",
"{",
"DAY",
":",
"field_date",
".",
"day",
",",
"MONTH",
":",
"field_date",
".",
"month",
",",
"YEAR",
":",
"field_date",
".",
"year",
"}"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
TransferJobPreprocessor._convert_time_to_dict
|
Convert native python ``datetime.time`` object to a format supported by the API
|
airflow/contrib/operators/gcp_transfer_operator.py
|
def _convert_time_to_dict(time):
"""
Convert native python ``datetime.time`` object to a format supported by the API
"""
return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second}
|
def _convert_time_to_dict(time):
"""
Convert native python ``datetime.time`` object to a format supported by the API
"""
return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second}
|
[
"Convert",
"native",
"python",
"datetime",
".",
"time",
"object",
"to",
"a",
"format",
"supported",
"by",
"the",
"API"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_transfer_operator.py#L113-L117
|
[
"def",
"_convert_time_to_dict",
"(",
"time",
")",
":",
"return",
"{",
"HOURS",
":",
"time",
".",
"hour",
",",
"MINUTES",
":",
"time",
".",
"minute",
",",
"SECONDS",
":",
"time",
".",
"second",
"}"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
RedisHook.get_conn
|
Returns a Redis connection.
|
airflow/contrib/hooks/redis_hook.py
|
def get_conn(self):
"""
Returns a Redis connection.
"""
conn = self.get_connection(self.redis_conn_id)
self.host = conn.host
self.port = conn.port
self.password = None if str(conn.password).lower() in ['none', 'false', ''] else conn.password
self.db = conn.extra_dejson.get('db', None)
if not self.redis:
self.log.debug(
'Initializing redis object for conn_id "%s" on %s:%s:%s',
self.redis_conn_id, self.host, self.port, self.db
)
self.redis = Redis(
host=self.host,
port=self.port,
password=self.password,
db=self.db)
return self.redis
|
def get_conn(self):
"""
Returns a Redis connection.
"""
conn = self.get_connection(self.redis_conn_id)
self.host = conn.host
self.port = conn.port
self.password = None if str(conn.password).lower() in ['none', 'false', ''] else conn.password
self.db = conn.extra_dejson.get('db', None)
if not self.redis:
self.log.debug(
'Initializing redis object for conn_id "%s" on %s:%s:%s',
self.redis_conn_id, self.host, self.port, self.db
)
self.redis = Redis(
host=self.host,
port=self.port,
password=self.password,
db=self.db)
return self.redis
|
[
"Returns",
"a",
"Redis",
"connection",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/redis_hook.py#L45-L66
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"redis_conn_id",
")",
"self",
".",
"host",
"=",
"conn",
".",
"host",
"self",
".",
"port",
"=",
"conn",
".",
"port",
"self",
".",
"password",
"=",
"None",
"if",
"str",
"(",
"conn",
".",
"password",
")",
".",
"lower",
"(",
")",
"in",
"[",
"'none'",
",",
"'false'",
",",
"''",
"]",
"else",
"conn",
".",
"password",
"self",
".",
"db",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'db'",
",",
"None",
")",
"if",
"not",
"self",
".",
"redis",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Initializing redis object for conn_id \"%s\" on %s:%s:%s'",
",",
"self",
".",
"redis_conn_id",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"self",
".",
"db",
")",
"self",
".",
"redis",
"=",
"Redis",
"(",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"password",
"=",
"self",
".",
"password",
",",
"db",
"=",
"self",
".",
"db",
")",
"return",
"self",
".",
"redis"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
OracleHook.get_conn
|
Returns a oracle connection object
Optional parameters for using a custom DSN connection
(instead of using a server alias from tnsnames.ora)
The dsn (data source name) is the TNS entry
(from the Oracle names server or tnsnames.ora file)
or is a string like the one returned from makedsn().
:param dsn: the host address for the Oracle server
:param service_name: the db_unique_name of the database
that you are connecting to (CONNECT_DATA part of TNS)
You can set these parameters in the extra fields of your connection
as in ``{ "dsn":"some.host.address" , "service_name":"some.service.name" }``
see more param detail in
`cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_
|
airflow/hooks/oracle_hook.py
|
def get_conn(self):
"""
Returns a oracle connection object
Optional parameters for using a custom DSN connection
(instead of using a server alias from tnsnames.ora)
The dsn (data source name) is the TNS entry
(from the Oracle names server or tnsnames.ora file)
or is a string like the one returned from makedsn().
:param dsn: the host address for the Oracle server
:param service_name: the db_unique_name of the database
that you are connecting to (CONNECT_DATA part of TNS)
You can set these parameters in the extra fields of your connection
as in ``{ "dsn":"some.host.address" , "service_name":"some.service.name" }``
see more param detail in
`cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_
"""
conn = self.get_connection(self.oracle_conn_id)
conn_config = {
'user': conn.login,
'password': conn.password
}
dsn = conn.extra_dejson.get('dsn', None)
sid = conn.extra_dejson.get('sid', None)
mod = conn.extra_dejson.get('module', None)
service_name = conn.extra_dejson.get('service_name', None)
port = conn.port if conn.port else 1521
if dsn and sid and not service_name:
conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, sid)
elif dsn and service_name and not sid:
conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, service_name=service_name)
else:
conn_config['dsn'] = conn.host
if 'encoding' in conn.extra_dejson:
conn_config['encoding'] = conn.extra_dejson.get('encoding')
# if `encoding` is specific but `nencoding` is not
# `nencoding` should use same values as `encoding` to set encoding, inspired by
# https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993
if 'nencoding' not in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('encoding')
if 'nencoding' in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('nencoding')
if 'threaded' in conn.extra_dejson:
conn_config['threaded'] = conn.extra_dejson.get('threaded')
if 'events' in conn.extra_dejson:
conn_config['events'] = conn.extra_dejson.get('events')
mode = conn.extra_dejson.get('mode', '').lower()
if mode == 'sysdba':
conn_config['mode'] = cx_Oracle.SYSDBA
elif mode == 'sysasm':
conn_config['mode'] = cx_Oracle.SYSASM
elif mode == 'sysoper':
conn_config['mode'] = cx_Oracle.SYSOPER
elif mode == 'sysbkp':
conn_config['mode'] = cx_Oracle.SYSBKP
elif mode == 'sysdgd':
conn_config['mode'] = cx_Oracle.SYSDGD
elif mode == 'syskmt':
conn_config['mode'] = cx_Oracle.SYSKMT
elif mode == 'sysrac':
conn_config['mode'] = cx_Oracle.SYSRAC
purity = conn.extra_dejson.get('purity', '').lower()
if purity == 'new':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_NEW
elif purity == 'self':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_SELF
elif purity == 'default':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_DEFAULT
conn = cx_Oracle.connect(**conn_config)
if mod is not None:
conn.module = mod
return conn
|
def get_conn(self):
"""
Returns a oracle connection object
Optional parameters for using a custom DSN connection
(instead of using a server alias from tnsnames.ora)
The dsn (data source name) is the TNS entry
(from the Oracle names server or tnsnames.ora file)
or is a string like the one returned from makedsn().
:param dsn: the host address for the Oracle server
:param service_name: the db_unique_name of the database
that you are connecting to (CONNECT_DATA part of TNS)
You can set these parameters in the extra fields of your connection
as in ``{ "dsn":"some.host.address" , "service_name":"some.service.name" }``
see more param detail in
`cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_
"""
conn = self.get_connection(self.oracle_conn_id)
conn_config = {
'user': conn.login,
'password': conn.password
}
dsn = conn.extra_dejson.get('dsn', None)
sid = conn.extra_dejson.get('sid', None)
mod = conn.extra_dejson.get('module', None)
service_name = conn.extra_dejson.get('service_name', None)
port = conn.port if conn.port else 1521
if dsn and sid and not service_name:
conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, sid)
elif dsn and service_name and not sid:
conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, service_name=service_name)
else:
conn_config['dsn'] = conn.host
if 'encoding' in conn.extra_dejson:
conn_config['encoding'] = conn.extra_dejson.get('encoding')
# if `encoding` is specific but `nencoding` is not
# `nencoding` should use same values as `encoding` to set encoding, inspired by
# https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993
if 'nencoding' not in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('encoding')
if 'nencoding' in conn.extra_dejson:
conn_config['nencoding'] = conn.extra_dejson.get('nencoding')
if 'threaded' in conn.extra_dejson:
conn_config['threaded'] = conn.extra_dejson.get('threaded')
if 'events' in conn.extra_dejson:
conn_config['events'] = conn.extra_dejson.get('events')
mode = conn.extra_dejson.get('mode', '').lower()
if mode == 'sysdba':
conn_config['mode'] = cx_Oracle.SYSDBA
elif mode == 'sysasm':
conn_config['mode'] = cx_Oracle.SYSASM
elif mode == 'sysoper':
conn_config['mode'] = cx_Oracle.SYSOPER
elif mode == 'sysbkp':
conn_config['mode'] = cx_Oracle.SYSBKP
elif mode == 'sysdgd':
conn_config['mode'] = cx_Oracle.SYSDGD
elif mode == 'syskmt':
conn_config['mode'] = cx_Oracle.SYSKMT
elif mode == 'sysrac':
conn_config['mode'] = cx_Oracle.SYSRAC
purity = conn.extra_dejson.get('purity', '').lower()
if purity == 'new':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_NEW
elif purity == 'self':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_SELF
elif purity == 'default':
conn_config['purity'] = cx_Oracle.ATTR_PURITY_DEFAULT
conn = cx_Oracle.connect(**conn_config)
if mod is not None:
conn.module = mod
return conn
|
[
"Returns",
"a",
"oracle",
"connection",
"object",
"Optional",
"parameters",
"for",
"using",
"a",
"custom",
"DSN",
"connection",
"(",
"instead",
"of",
"using",
"a",
"server",
"alias",
"from",
"tnsnames",
".",
"ora",
")",
"The",
"dsn",
"(",
"data",
"source",
"name",
")",
"is",
"the",
"TNS",
"entry",
"(",
"from",
"the",
"Oracle",
"names",
"server",
"or",
"tnsnames",
".",
"ora",
"file",
")",
"or",
"is",
"a",
"string",
"like",
"the",
"one",
"returned",
"from",
"makedsn",
"()",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/oracle_hook.py#L37-L115
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"oracle_conn_id",
")",
"conn_config",
"=",
"{",
"'user'",
":",
"conn",
".",
"login",
",",
"'password'",
":",
"conn",
".",
"password",
"}",
"dsn",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'dsn'",
",",
"None",
")",
"sid",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'sid'",
",",
"None",
")",
"mod",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'module'",
",",
"None",
")",
"service_name",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'service_name'",
",",
"None",
")",
"port",
"=",
"conn",
".",
"port",
"if",
"conn",
".",
"port",
"else",
"1521",
"if",
"dsn",
"and",
"sid",
"and",
"not",
"service_name",
":",
"conn_config",
"[",
"'dsn'",
"]",
"=",
"cx_Oracle",
".",
"makedsn",
"(",
"dsn",
",",
"port",
",",
"sid",
")",
"elif",
"dsn",
"and",
"service_name",
"and",
"not",
"sid",
":",
"conn_config",
"[",
"'dsn'",
"]",
"=",
"cx_Oracle",
".",
"makedsn",
"(",
"dsn",
",",
"port",
",",
"service_name",
"=",
"service_name",
")",
"else",
":",
"conn_config",
"[",
"'dsn'",
"]",
"=",
"conn",
".",
"host",
"if",
"'encoding'",
"in",
"conn",
".",
"extra_dejson",
":",
"conn_config",
"[",
"'encoding'",
"]",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'encoding'",
")",
"# if `encoding` is specific but `nencoding` is not",
"# `nencoding` should use same values as `encoding` to set encoding, inspired by",
"# https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993",
"if",
"'nencoding'",
"not",
"in",
"conn",
".",
"extra_dejson",
":",
"conn_config",
"[",
"'nencoding'",
"]",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'encoding'",
")",
"if",
"'nencoding'",
"in",
"conn",
".",
"extra_dejson",
":",
"conn_config",
"[",
"'nencoding'",
"]",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'nencoding'",
")",
"if",
"'threaded'",
"in",
"conn",
".",
"extra_dejson",
":",
"conn_config",
"[",
"'threaded'",
"]",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'threaded'",
")",
"if",
"'events'",
"in",
"conn",
".",
"extra_dejson",
":",
"conn_config",
"[",
"'events'",
"]",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'events'",
")",
"mode",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'mode'",
",",
"''",
")",
".",
"lower",
"(",
")",
"if",
"mode",
"==",
"'sysdba'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSDBA",
"elif",
"mode",
"==",
"'sysasm'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSASM",
"elif",
"mode",
"==",
"'sysoper'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSOPER",
"elif",
"mode",
"==",
"'sysbkp'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSBKP",
"elif",
"mode",
"==",
"'sysdgd'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSDGD",
"elif",
"mode",
"==",
"'syskmt'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSKMT",
"elif",
"mode",
"==",
"'sysrac'",
":",
"conn_config",
"[",
"'mode'",
"]",
"=",
"cx_Oracle",
".",
"SYSRAC",
"purity",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'purity'",
",",
"''",
")",
".",
"lower",
"(",
")",
"if",
"purity",
"==",
"'new'",
":",
"conn_config",
"[",
"'purity'",
"]",
"=",
"cx_Oracle",
".",
"ATTR_PURITY_NEW",
"elif",
"purity",
"==",
"'self'",
":",
"conn_config",
"[",
"'purity'",
"]",
"=",
"cx_Oracle",
".",
"ATTR_PURITY_SELF",
"elif",
"purity",
"==",
"'default'",
":",
"conn_config",
"[",
"'purity'",
"]",
"=",
"cx_Oracle",
".",
"ATTR_PURITY_DEFAULT",
"conn",
"=",
"cx_Oracle",
".",
"connect",
"(",
"*",
"*",
"conn_config",
")",
"if",
"mod",
"is",
"not",
"None",
":",
"conn",
".",
"module",
"=",
"mod",
"return",
"conn"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
OracleHook.insert_rows
|
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
Changes from standard DbApiHook implementation:
- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)
- Replace NaN values with NULL using `numpy.nan_to_num` (not using
`is_nan()` because of input types error for strings)
- Coerce datetime cells to Oracle DATETIME format during insert
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table
:type target_fields: iterable of str
:param commit_every: the maximum number of rows to insert in one transaction
Default 1000, Set greater than 0.
Set 1 to insert each row in each single transaction
:type commit_every: int
|
airflow/hooks/oracle_hook.py
|
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
Changes from standard DbApiHook implementation:
- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)
- Replace NaN values with NULL using `numpy.nan_to_num` (not using
`is_nan()` because of input types error for strings)
- Coerce datetime cells to Oracle DATETIME format during insert
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table
:type target_fields: iterable of str
:param commit_every: the maximum number of rows to insert in one transaction
Default 1000, Set greater than 0.
Set 1 to insert each row in each single transaction
:type commit_every: int
"""
if target_fields:
target_fields = ', '.join(target_fields)
target_fields = '({})'.format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
lst = []
for cell in row:
if isinstance(cell, basestring):
lst.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
lst.append('NULL')
elif type(cell) == float and \
numpy.isnan(cell): # coerce numpy NaN to NULL
lst.append('NULL')
elif isinstance(cell, numpy.datetime64):
lst.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
lst.append("to_date('" +
cell.strftime('%Y-%m-%d %H:%M:%S') +
"','YYYY-MM-DD HH24:MI:SS')")
else:
lst.append(str(cell))
values = tuple(lst)
sql = 'INSERT /*+ APPEND */ ' \
'INTO {0} {1} VALUES ({2})'.format(table,
target_fields,
','.join(values))
cur.execute(sql)
if i % commit_every == 0:
conn.commit()
self.log.info('Loaded %s into %s rows so far', i, table)
conn.commit()
cur.close()
conn.close()
self.log.info('Done loading. Loaded a total of %s rows', i)
|
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
Changes from standard DbApiHook implementation:
- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)
- Replace NaN values with NULL using `numpy.nan_to_num` (not using
`is_nan()` because of input types error for strings)
- Coerce datetime cells to Oracle DATETIME format during insert
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table
:type target_fields: iterable of str
:param commit_every: the maximum number of rows to insert in one transaction
Default 1000, Set greater than 0.
Set 1 to insert each row in each single transaction
:type commit_every: int
"""
if target_fields:
target_fields = ', '.join(target_fields)
target_fields = '({})'.format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
lst = []
for cell in row:
if isinstance(cell, basestring):
lst.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
lst.append('NULL')
elif type(cell) == float and \
numpy.isnan(cell): # coerce numpy NaN to NULL
lst.append('NULL')
elif isinstance(cell, numpy.datetime64):
lst.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
lst.append("to_date('" +
cell.strftime('%Y-%m-%d %H:%M:%S') +
"','YYYY-MM-DD HH24:MI:SS')")
else:
lst.append(str(cell))
values = tuple(lst)
sql = 'INSERT /*+ APPEND */ ' \
'INTO {0} {1} VALUES ({2})'.format(table,
target_fields,
','.join(values))
cur.execute(sql)
if i % commit_every == 0:
conn.commit()
self.log.info('Loaded %s into %s rows so far', i, table)
conn.commit()
cur.close()
conn.close()
self.log.info('Done loading. Loaded a total of %s rows', i)
|
[
"A",
"generic",
"way",
"to",
"insert",
"a",
"set",
"of",
"tuples",
"into",
"a",
"table",
"the",
"whole",
"set",
"of",
"inserts",
"is",
"treated",
"as",
"one",
"transaction",
"Changes",
"from",
"standard",
"DbApiHook",
"implementation",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/oracle_hook.py#L117-L182
|
[
"def",
"insert_rows",
"(",
"self",
",",
"table",
",",
"rows",
",",
"target_fields",
"=",
"None",
",",
"commit_every",
"=",
"1000",
")",
":",
"if",
"target_fields",
":",
"target_fields",
"=",
"', '",
".",
"join",
"(",
"target_fields",
")",
"target_fields",
"=",
"'({})'",
".",
"format",
"(",
"target_fields",
")",
"else",
":",
"target_fields",
"=",
"''",
"conn",
"=",
"self",
".",
"get_conn",
"(",
")",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"self",
".",
"supports_autocommit",
":",
"cur",
".",
"execute",
"(",
"'SET autocommit = 0'",
")",
"conn",
".",
"commit",
"(",
")",
"i",
"=",
"0",
"for",
"row",
"in",
"rows",
":",
"i",
"+=",
"1",
"lst",
"=",
"[",
"]",
"for",
"cell",
"in",
"row",
":",
"if",
"isinstance",
"(",
"cell",
",",
"basestring",
")",
":",
"lst",
".",
"append",
"(",
"\"'\"",
"+",
"str",
"(",
"cell",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"''\"",
")",
"+",
"\"'\"",
")",
"elif",
"cell",
"is",
"None",
":",
"lst",
".",
"append",
"(",
"'NULL'",
")",
"elif",
"type",
"(",
"cell",
")",
"==",
"float",
"and",
"numpy",
".",
"isnan",
"(",
"cell",
")",
":",
"# coerce numpy NaN to NULL",
"lst",
".",
"append",
"(",
"'NULL'",
")",
"elif",
"isinstance",
"(",
"cell",
",",
"numpy",
".",
"datetime64",
")",
":",
"lst",
".",
"append",
"(",
"\"'\"",
"+",
"str",
"(",
"cell",
")",
"+",
"\"'\"",
")",
"elif",
"isinstance",
"(",
"cell",
",",
"datetime",
")",
":",
"lst",
".",
"append",
"(",
"\"to_date('\"",
"+",
"cell",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"+",
"\"','YYYY-MM-DD HH24:MI:SS')\"",
")",
"else",
":",
"lst",
".",
"append",
"(",
"str",
"(",
"cell",
")",
")",
"values",
"=",
"tuple",
"(",
"lst",
")",
"sql",
"=",
"'INSERT /*+ APPEND */ '",
"'INTO {0} {1} VALUES ({2})'",
".",
"format",
"(",
"table",
",",
"target_fields",
",",
"','",
".",
"join",
"(",
"values",
")",
")",
"cur",
".",
"execute",
"(",
"sql",
")",
"if",
"i",
"%",
"commit_every",
"==",
"0",
":",
"conn",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Loaded %s into %s rows so far'",
",",
"i",
",",
"table",
")",
"conn",
".",
"commit",
"(",
")",
"cur",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Done loading. Loaded a total of %s rows'",
",",
"i",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
OracleHook.bulk_insert_rows
|
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
|
airflow/hooks/oracle_hook.py
|
def bulk_insert_rows(self, table, rows, target_fields=None, commit_every=5000):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
cursor = conn.cursor()
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close()
|
def bulk_insert_rows(self, table, rows, target_fields=None, commit_every=5000):
"""
A performant bulk insert for cx_Oracle
that uses prepared statements via `executemany()`.
For best performance, pass in `rows` as an iterator.
:param table: target Oracle table, use dot notation to target a
specific database
:type table: str
:param rows: the rows to insert into the table
:type rows: iterable of tuples
:param target_fields: the names of the columns to fill in the table, default None.
If None, each rows should have some order as table columns name
:type target_fields: iterable of str Or None
:param commit_every: the maximum number of rows to insert in one transaction
Default 5000. Set greater than 0. Set 1 to insert each row in each transaction
:type commit_every: int
"""
if not rows:
raise ValueError("parameter rows could not be None or empty iterable")
conn = self.get_conn()
cursor = conn.cursor()
values_base = target_fields if target_fields else rows[0]
prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(
tablename=table,
columns='({})'.format(', '.join(target_fields)) if target_fields else '',
values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),
)
row_count = 0
# Chunk the rows
row_chunk = []
for row in rows:
row_chunk.append(row)
row_count += 1
if row_count % commit_every == 0:
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
# Empty chunk
row_chunk = []
# Commit the leftover chunk
cursor.prepare(prepared_stm)
cursor.executemany(None, row_chunk)
conn.commit()
self.log.info('[%s] inserted %s rows', table, row_count)
cursor.close()
conn.close()
|
[
"A",
"performant",
"bulk",
"insert",
"for",
"cx_Oracle",
"that",
"uses",
"prepared",
"statements",
"via",
"executemany",
"()",
".",
"For",
"best",
"performance",
"pass",
"in",
"rows",
"as",
"an",
"iterator",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/oracle_hook.py#L184-L231
|
[
"def",
"bulk_insert_rows",
"(",
"self",
",",
"table",
",",
"rows",
",",
"target_fields",
"=",
"None",
",",
"commit_every",
"=",
"5000",
")",
":",
"if",
"not",
"rows",
":",
"raise",
"ValueError",
"(",
"\"parameter rows could not be None or empty iterable\"",
")",
"conn",
"=",
"self",
".",
"get_conn",
"(",
")",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"values_base",
"=",
"target_fields",
"if",
"target_fields",
"else",
"rows",
"[",
"0",
"]",
"prepared_stm",
"=",
"'insert into {tablename} {columns} values ({values})'",
".",
"format",
"(",
"tablename",
"=",
"table",
",",
"columns",
"=",
"'({})'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"target_fields",
")",
")",
"if",
"target_fields",
"else",
"''",
",",
"values",
"=",
"', '",
".",
"join",
"(",
"':%s'",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"values_base",
")",
"+",
"1",
")",
")",
",",
")",
"row_count",
"=",
"0",
"# Chunk the rows",
"row_chunk",
"=",
"[",
"]",
"for",
"row",
"in",
"rows",
":",
"row_chunk",
".",
"append",
"(",
"row",
")",
"row_count",
"+=",
"1",
"if",
"row_count",
"%",
"commit_every",
"==",
"0",
":",
"cursor",
".",
"prepare",
"(",
"prepared_stm",
")",
"cursor",
".",
"executemany",
"(",
"None",
",",
"row_chunk",
")",
"conn",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'[%s] inserted %s rows'",
",",
"table",
",",
"row_count",
")",
"# Empty chunk",
"row_chunk",
"=",
"[",
"]",
"# Commit the leftover chunk",
"cursor",
".",
"prepare",
"(",
"prepared_stm",
")",
"cursor",
".",
"executemany",
"(",
"None",
",",
"row_chunk",
")",
"conn",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'[%s] inserted %s rows'",
",",
"table",
",",
"row_count",
")",
"cursor",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.get_conn
|
Returns a connection object
|
airflow/hooks/dbapi_hook.py
|
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
|
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
|
[
"Returns",
"a",
"connection",
"object"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L55-L63
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"db",
"=",
"self",
".",
"get_connection",
"(",
"getattr",
"(",
"self",
",",
"self",
".",
"conn_name_attr",
")",
")",
"return",
"self",
".",
"connector",
".",
"connect",
"(",
"host",
"=",
"db",
".",
"host",
",",
"port",
"=",
"db",
".",
"port",
",",
"username",
"=",
"db",
".",
"login",
",",
"schema",
"=",
"db",
".",
"schema",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.get_pandas_df
|
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
|
airflow/hooks/dbapi_hook.py
|
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
|
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
|
[
"Executes",
"the",
"sql",
"and",
"returns",
"a",
"pandas",
"dataframe"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L81-L94
|
[
"def",
"get_pandas_df",
"(",
"self",
",",
"sql",
",",
"parameters",
"=",
"None",
")",
":",
"import",
"pandas",
".",
"io",
".",
"sql",
"as",
"psql",
"with",
"closing",
"(",
"self",
".",
"get_conn",
"(",
")",
")",
"as",
"conn",
":",
"return",
"psql",
".",
"read_sql",
"(",
"sql",
",",
"con",
"=",
"conn",
",",
"params",
"=",
"parameters",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.get_records
|
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
|
airflow/hooks/dbapi_hook.py
|
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
|
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
|
[
"Executes",
"the",
"sql",
"and",
"returns",
"a",
"set",
"of",
"records",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L96-L112
|
[
"def",
"get_records",
"(",
"self",
",",
"sql",
",",
"parameters",
"=",
"None",
")",
":",
"with",
"closing",
"(",
"self",
".",
"get_conn",
"(",
")",
")",
"as",
"conn",
":",
"with",
"closing",
"(",
"conn",
".",
"cursor",
"(",
")",
")",
"as",
"cur",
":",
"if",
"parameters",
"is",
"not",
"None",
":",
"cur",
".",
"execute",
"(",
"sql",
",",
"parameters",
")",
"else",
":",
"cur",
".",
"execute",
"(",
"sql",
")",
"return",
"cur",
".",
"fetchall",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.get_first
|
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
|
airflow/hooks/dbapi_hook.py
|
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
|
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
|
[
"Executes",
"the",
"sql",
"and",
"returns",
"the",
"first",
"resulting",
"row",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L114-L130
|
[
"def",
"get_first",
"(",
"self",
",",
"sql",
",",
"parameters",
"=",
"None",
")",
":",
"with",
"closing",
"(",
"self",
".",
"get_conn",
"(",
")",
")",
"as",
"conn",
":",
"with",
"closing",
"(",
"conn",
".",
"cursor",
"(",
")",
")",
"as",
"cur",
":",
"if",
"parameters",
"is",
"not",
"None",
":",
"cur",
".",
"execute",
"(",
"sql",
",",
"parameters",
")",
"else",
":",
"cur",
".",
"execute",
"(",
"sql",
")",
"return",
"cur",
".",
"fetchone",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.run
|
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
|
airflow/hooks/dbapi_hook.py
|
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
|
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
|
[
"Runs",
"a",
"command",
"or",
"a",
"list",
"of",
"commands",
".",
"Pass",
"a",
"list",
"of",
"sql",
"statements",
"to",
"the",
"sql",
"parameter",
"to",
"get",
"them",
"to",
"execute",
"sequentially"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L132-L166
|
[
"def",
"run",
"(",
"self",
",",
"sql",
",",
"autocommit",
"=",
"False",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"sql",
",",
"basestring",
")",
":",
"sql",
"=",
"[",
"sql",
"]",
"with",
"closing",
"(",
"self",
".",
"get_conn",
"(",
")",
")",
"as",
"conn",
":",
"if",
"self",
".",
"supports_autocommit",
":",
"self",
".",
"set_autocommit",
"(",
"conn",
",",
"autocommit",
")",
"with",
"closing",
"(",
"conn",
".",
"cursor",
"(",
")",
")",
"as",
"cur",
":",
"for",
"s",
"in",
"sql",
":",
"if",
"parameters",
"is",
"not",
"None",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"{} with parameters {}\"",
".",
"format",
"(",
"s",
",",
"parameters",
")",
")",
"cur",
".",
"execute",
"(",
"s",
",",
"parameters",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"s",
")",
"cur",
".",
"execute",
"(",
"s",
")",
"# If autocommit was set to False for db that supports autocommit,",
"# or if db does not supports autocommit, we do a manual commit.",
"if",
"not",
"self",
".",
"get_autocommit",
"(",
"conn",
")",
":",
"conn",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.set_autocommit
|
Sets the autocommit flag on the connection
|
airflow/hooks/dbapi_hook.py
|
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warn(
("%s connection doesn't support "
"autocommit but autocommit activated."),
getattr(self, self.conn_name_attr))
conn.autocommit = autocommit
|
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warn(
("%s connection doesn't support "
"autocommit but autocommit activated."),
getattr(self, self.conn_name_attr))
conn.autocommit = autocommit
|
[
"Sets",
"the",
"autocommit",
"flag",
"on",
"the",
"connection"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L168-L177
|
[
"def",
"set_autocommit",
"(",
"self",
",",
"conn",
",",
"autocommit",
")",
":",
"if",
"not",
"self",
".",
"supports_autocommit",
"and",
"autocommit",
":",
"self",
".",
"log",
".",
"warn",
"(",
"(",
"\"%s connection doesn't support \"",
"\"autocommit but autocommit activated.\"",
")",
",",
"getattr",
"(",
"self",
",",
"self",
".",
"conn_name_attr",
")",
")",
"conn",
".",
"autocommit",
"=",
"autocommit"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook.insert_rows
|
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
|
airflow/hooks/dbapi_hook.py
|
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
|
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
|
[
"A",
"generic",
"way",
"to",
"insert",
"a",
"set",
"of",
"tuples",
"into",
"a",
"table",
"a",
"new",
"transaction",
"is",
"created",
"every",
"commit_every",
"rows"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L200-L253
|
[
"def",
"insert_rows",
"(",
"self",
",",
"table",
",",
"rows",
",",
"target_fields",
"=",
"None",
",",
"commit_every",
"=",
"1000",
",",
"replace",
"=",
"False",
")",
":",
"if",
"target_fields",
":",
"target_fields",
"=",
"\", \"",
".",
"join",
"(",
"target_fields",
")",
"target_fields",
"=",
"\"({})\"",
".",
"format",
"(",
"target_fields",
")",
"else",
":",
"target_fields",
"=",
"''",
"i",
"=",
"0",
"with",
"closing",
"(",
"self",
".",
"get_conn",
"(",
")",
")",
"as",
"conn",
":",
"if",
"self",
".",
"supports_autocommit",
":",
"self",
".",
"set_autocommit",
"(",
"conn",
",",
"False",
")",
"conn",
".",
"commit",
"(",
")",
"with",
"closing",
"(",
"conn",
".",
"cursor",
"(",
")",
")",
"as",
"cur",
":",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"rows",
",",
"1",
")",
":",
"lst",
"=",
"[",
"]",
"for",
"cell",
"in",
"row",
":",
"lst",
".",
"append",
"(",
"self",
".",
"_serialize_cell",
"(",
"cell",
",",
"conn",
")",
")",
"values",
"=",
"tuple",
"(",
"lst",
")",
"placeholders",
"=",
"[",
"\"%s\"",
",",
"]",
"*",
"len",
"(",
"values",
")",
"if",
"not",
"replace",
":",
"sql",
"=",
"\"INSERT INTO \"",
"else",
":",
"sql",
"=",
"\"REPLACE INTO \"",
"sql",
"+=",
"\"{0} {1} VALUES ({2})\"",
".",
"format",
"(",
"table",
",",
"target_fields",
",",
"\",\"",
".",
"join",
"(",
"placeholders",
")",
")",
"cur",
".",
"execute",
"(",
"sql",
",",
"values",
")",
"if",
"commit_every",
"and",
"i",
"%",
"commit_every",
"==",
"0",
":",
"conn",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Loaded %s into %s rows so far\"",
",",
"i",
",",
"table",
")",
"conn",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Done loading. Loaded a total of %s rows\"",
",",
"i",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DbApiHook._serialize_cell
|
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
|
airflow/hooks/dbapi_hook.py
|
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
|
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
|
[
"Returns",
"the",
"SQL",
"literal",
"of",
"the",
"cell",
"as",
"a",
"string",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L256-L272
|
[
"def",
"_serialize_cell",
"(",
"cell",
",",
"conn",
"=",
"None",
")",
":",
"if",
"cell",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"cell",
",",
"datetime",
")",
":",
"return",
"cell",
".",
"isoformat",
"(",
")",
"return",
"str",
"(",
"cell",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
Airflow.health
|
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
|
airflow/www/views.py
|
def health(self, session=None):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
BJ = jobs.BaseJob
payload = {}
scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler',
'scheduler_health_check_threshold'
))
latest_scheduler_heartbeat = None
payload['metadatabase'] = {'status': 'healthy'}
try:
latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\
filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\
scalar()
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
if not latest_scheduler_heartbeat:
scheduler_status = 'unhealthy'
else:
if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold:
scheduler_status = 'healthy'
else:
scheduler_status = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)}
return wwwutils.json_response(payload)
|
def health(self, session=None):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
BJ = jobs.BaseJob
payload = {}
scheduler_health_check_threshold = timedelta(seconds=conf.getint('scheduler',
'scheduler_health_check_threshold'
))
latest_scheduler_heartbeat = None
payload['metadatabase'] = {'status': 'healthy'}
try:
latest_scheduler_heartbeat = session.query(func.max(BJ.latest_heartbeat)).\
filter(BJ.state == 'running', BJ.job_type == 'SchedulerJob').\
scalar()
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
if not latest_scheduler_heartbeat:
scheduler_status = 'unhealthy'
else:
if timezone.utcnow() - latest_scheduler_heartbeat <= scheduler_health_check_threshold:
scheduler_status = 'healthy'
else:
scheduler_status = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': str(latest_scheduler_heartbeat)}
return wwwutils.json_response(payload)
|
[
"An",
"endpoint",
"helping",
"check",
"the",
"health",
"status",
"of",
"the",
"Airflow",
"instance",
"including",
"metadatabase",
"and",
"scheduler",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L158-L190
|
[
"def",
"health",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"BJ",
"=",
"jobs",
".",
"BaseJob",
"payload",
"=",
"{",
"}",
"scheduler_health_check_threshold",
"=",
"timedelta",
"(",
"seconds",
"=",
"conf",
".",
"getint",
"(",
"'scheduler'",
",",
"'scheduler_health_check_threshold'",
")",
")",
"latest_scheduler_heartbeat",
"=",
"None",
"payload",
"[",
"'metadatabase'",
"]",
"=",
"{",
"'status'",
":",
"'healthy'",
"}",
"try",
":",
"latest_scheduler_heartbeat",
"=",
"session",
".",
"query",
"(",
"func",
".",
"max",
"(",
"BJ",
".",
"latest_heartbeat",
")",
")",
".",
"filter",
"(",
"BJ",
".",
"state",
"==",
"'running'",
",",
"BJ",
".",
"job_type",
"==",
"'SchedulerJob'",
")",
".",
"scalar",
"(",
")",
"except",
"Exception",
":",
"payload",
"[",
"'metadatabase'",
"]",
"[",
"'status'",
"]",
"=",
"'unhealthy'",
"if",
"not",
"latest_scheduler_heartbeat",
":",
"scheduler_status",
"=",
"'unhealthy'",
"else",
":",
"if",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"latest_scheduler_heartbeat",
"<=",
"scheduler_health_check_threshold",
":",
"scheduler_status",
"=",
"'healthy'",
"else",
":",
"scheduler_status",
"=",
"'unhealthy'",
"payload",
"[",
"'scheduler'",
"]",
"=",
"{",
"'status'",
":",
"scheduler_status",
",",
"'latest_scheduler_heartbeat'",
":",
"str",
"(",
"latest_scheduler_heartbeat",
")",
"}",
"return",
"wwwutils",
".",
"json_response",
"(",
"payload",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
Airflow.extra_links
|
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
|
airflow/www/views.py
|
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = airflow.utils.timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response
|
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = airflow.utils.timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response
|
[
"A",
"restful",
"endpoint",
"that",
"returns",
"external",
"links",
"for",
"a",
"given",
"Operator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L1772-L1825
|
[
"def",
"extra_links",
"(",
"self",
")",
":",
"dag_id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'dag_id'",
")",
"task_id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'task_id'",
")",
"execution_date",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'execution_date'",
")",
"link_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'link_name'",
")",
"dttm",
"=",
"airflow",
".",
"utils",
".",
"timezone",
".",
"parse",
"(",
"execution_date",
")",
"dag",
"=",
"dagbag",
".",
"get_dag",
"(",
"dag_id",
")",
"if",
"not",
"dag",
"or",
"task_id",
"not",
"in",
"dag",
".",
"task_ids",
":",
"response",
"=",
"jsonify",
"(",
"{",
"'url'",
":",
"None",
",",
"'error'",
":",
"\"can't find dag {dag} or task_id {task_id}\"",
".",
"format",
"(",
"dag",
"=",
"dag",
",",
"task_id",
"=",
"task_id",
")",
"}",
")",
"response",
".",
"status_code",
"=",
"404",
"return",
"response",
"task",
"=",
"dag",
".",
"get_task",
"(",
"task_id",
")",
"try",
":",
"url",
"=",
"task",
".",
"get_extra_links",
"(",
"dttm",
",",
"link_name",
")",
"except",
"ValueError",
"as",
"err",
":",
"response",
"=",
"jsonify",
"(",
"{",
"'url'",
":",
"None",
",",
"'error'",
":",
"str",
"(",
"err",
")",
"}",
")",
"response",
".",
"status_code",
"=",
"404",
"return",
"response",
"if",
"url",
":",
"response",
"=",
"jsonify",
"(",
"{",
"'error'",
":",
"None",
",",
"'url'",
":",
"url",
"}",
")",
"response",
".",
"status_code",
"=",
"200",
"return",
"response",
"else",
":",
"response",
"=",
"jsonify",
"(",
"{",
"'url'",
":",
"None",
",",
"'error'",
":",
"'No URL found for {dest}'",
".",
"format",
"(",
"dest",
"=",
"link_name",
")",
"}",
")",
"response",
".",
"status_code",
"=",
"404",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagModelView.get_query
|
Default filters for model
|
airflow/www/views.py
|
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
|
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
|
[
"Default",
"filters",
"for",
"model"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L2481-L2490
|
[
"def",
"get_query",
"(",
"self",
")",
":",
"return",
"(",
"super",
"(",
")",
".",
"get_query",
"(",
")",
".",
"filter",
"(",
"or_",
"(",
"models",
".",
"DagModel",
".",
"is_active",
",",
"models",
".",
"DagModel",
".",
"is_paused",
")",
")",
".",
"filter",
"(",
"~",
"models",
".",
"DagModel",
".",
"is_subdag",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagModelView.get_count_query
|
Default filters for model
|
airflow/www/views.py
|
def get_count_query(self):
"""
Default filters for model
"""
return (
super().get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
|
def get_count_query(self):
"""
Default filters for model
"""
return (
super().get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
|
[
"Default",
"filters",
"for",
"model"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L2492-L2500
|
[
"def",
"get_count_query",
"(",
"self",
")",
":",
"return",
"(",
"super",
"(",
")",
".",
"get_count_query",
"(",
")",
".",
"filter",
"(",
"models",
".",
"DagModel",
".",
"is_active",
")",
".",
"filter",
"(",
"~",
"models",
".",
"DagModel",
".",
"is_subdag",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudantHook.get_conn
|
Opens a connection to the cloudant service and closes it automatically if used as context manager.
.. note::
In the connection form:
- 'host' equals the 'Account' (optional)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: an authorized cloudant session context manager object.
:rtype: cloudant
|
airflow/contrib/hooks/cloudant_hook.py
|
def get_conn(self):
"""
Opens a connection to the cloudant service and closes it automatically if used as context manager.
.. note::
In the connection form:
- 'host' equals the 'Account' (optional)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: an authorized cloudant session context manager object.
:rtype: cloudant
"""
conn = self.get_connection(self.cloudant_conn_id)
self._validate_connection(conn)
cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host)
return cloudant_session
|
def get_conn(self):
"""
Opens a connection to the cloudant service and closes it automatically if used as context manager.
.. note::
In the connection form:
- 'host' equals the 'Account' (optional)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: an authorized cloudant session context manager object.
:rtype: cloudant
"""
conn = self.get_connection(self.cloudant_conn_id)
self._validate_connection(conn)
cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host)
return cloudant_session
|
[
"Opens",
"a",
"connection",
"to",
"the",
"cloudant",
"service",
"and",
"closes",
"it",
"automatically",
"if",
"used",
"as",
"context",
"manager",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cloudant_hook.py#L40-L59
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"cloudant_conn_id",
")",
"self",
".",
"_validate_connection",
"(",
"conn",
")",
"cloudant_session",
"=",
"cloudant",
"(",
"user",
"=",
"conn",
".",
"login",
",",
"passwd",
"=",
"conn",
".",
"password",
",",
"account",
"=",
"conn",
".",
"host",
")",
"return",
"cloudant_session"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SlackWebhookOperator.execute
|
Call the SlackWebhookHook to post the provided Slack message
|
airflow/contrib/operators/slack_webhook_operator.py
|
def execute(self, context):
"""
Call the SlackWebhookHook to post the provided Slack message
"""
self.hook = SlackWebhookHook(
self.http_conn_id,
self.webhook_token,
self.message,
self.attachments,
self.channel,
self.username,
self.icon_emoji,
self.link_names,
self.proxy
)
self.hook.execute()
|
def execute(self, context):
"""
Call the SlackWebhookHook to post the provided Slack message
"""
self.hook = SlackWebhookHook(
self.http_conn_id,
self.webhook_token,
self.message,
self.attachments,
self.channel,
self.username,
self.icon_emoji,
self.link_names,
self.proxy
)
self.hook.execute()
|
[
"Call",
"the",
"SlackWebhookHook",
"to",
"post",
"the",
"provided",
"Slack",
"message"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/slack_webhook_operator.py#L84-L99
|
[
"def",
"execute",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"hook",
"=",
"SlackWebhookHook",
"(",
"self",
".",
"http_conn_id",
",",
"self",
".",
"webhook_token",
",",
"self",
".",
"message",
",",
"self",
".",
"attachments",
",",
"self",
".",
"channel",
",",
"self",
".",
"username",
",",
"self",
".",
"icon_emoji",
",",
"self",
".",
"link_names",
",",
"self",
".",
"proxy",
")",
"self",
".",
"hook",
".",
"execute",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBaseHook._get_credentials
|
Returns the Credentials object for Google API
|
airflow/contrib/hooks/gcp_api_base_hook.py
|
def _get_credentials(self):
"""
Returns the Credentials object for Google API
"""
key_path = self._get_field('key_path', False)
keyfile_dict = self._get_field('keyfile_dict', False)
scope = self._get_field('scope', None)
if scope:
scopes = [s.strip() for s in scope.split(',')]
else:
scopes = _DEFAULT_SCOPES
if not key_path and not keyfile_dict:
self.log.info('Getting connection using `google.auth.default()` '
'since no key file is defined for hook.')
credentials, _ = google.auth.default(scopes=scopes)
elif key_path:
# Get credentials from a JSON file.
if key_path.endswith('.json'):
self.log.debug('Getting connection using JSON key file %s' % key_path)
credentials = (
google.oauth2.service_account.Credentials.from_service_account_file(
key_path, scopes=scopes)
)
elif key_path.endswith('.p12'):
raise AirflowException('Legacy P12 key file are not supported, '
'use a JSON key file.')
else:
raise AirflowException('Unrecognised extension for key file.')
else:
# Get credentials from JSON data provided in the UI.
try:
keyfile_dict = json.loads(keyfile_dict)
# Depending on how the JSON was formatted, it may contain
# escaped newlines. Convert those to actual newlines.
keyfile_dict['private_key'] = keyfile_dict['private_key'].replace(
'\\n', '\n')
credentials = (
google.oauth2.service_account.Credentials.from_service_account_info(
keyfile_dict, scopes=scopes)
)
except json.decoder.JSONDecodeError:
raise AirflowException('Invalid key JSON.')
return credentials.with_subject(self.delegate_to) \
if self.delegate_to else credentials
|
def _get_credentials(self):
"""
Returns the Credentials object for Google API
"""
key_path = self._get_field('key_path', False)
keyfile_dict = self._get_field('keyfile_dict', False)
scope = self._get_field('scope', None)
if scope:
scopes = [s.strip() for s in scope.split(',')]
else:
scopes = _DEFAULT_SCOPES
if not key_path and not keyfile_dict:
self.log.info('Getting connection using `google.auth.default()` '
'since no key file is defined for hook.')
credentials, _ = google.auth.default(scopes=scopes)
elif key_path:
# Get credentials from a JSON file.
if key_path.endswith('.json'):
self.log.debug('Getting connection using JSON key file %s' % key_path)
credentials = (
google.oauth2.service_account.Credentials.from_service_account_file(
key_path, scopes=scopes)
)
elif key_path.endswith('.p12'):
raise AirflowException('Legacy P12 key file are not supported, '
'use a JSON key file.')
else:
raise AirflowException('Unrecognised extension for key file.')
else:
# Get credentials from JSON data provided in the UI.
try:
keyfile_dict = json.loads(keyfile_dict)
# Depending on how the JSON was formatted, it may contain
# escaped newlines. Convert those to actual newlines.
keyfile_dict['private_key'] = keyfile_dict['private_key'].replace(
'\\n', '\n')
credentials = (
google.oauth2.service_account.Credentials.from_service_account_info(
keyfile_dict, scopes=scopes)
)
except json.decoder.JSONDecodeError:
raise AirflowException('Invalid key JSON.')
return credentials.with_subject(self.delegate_to) \
if self.delegate_to else credentials
|
[
"Returns",
"the",
"Credentials",
"object",
"for",
"Google",
"API"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L82-L129
|
[
"def",
"_get_credentials",
"(",
"self",
")",
":",
"key_path",
"=",
"self",
".",
"_get_field",
"(",
"'key_path'",
",",
"False",
")",
"keyfile_dict",
"=",
"self",
".",
"_get_field",
"(",
"'keyfile_dict'",
",",
"False",
")",
"scope",
"=",
"self",
".",
"_get_field",
"(",
"'scope'",
",",
"None",
")",
"if",
"scope",
":",
"scopes",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"scope",
".",
"split",
"(",
"','",
")",
"]",
"else",
":",
"scopes",
"=",
"_DEFAULT_SCOPES",
"if",
"not",
"key_path",
"and",
"not",
"keyfile_dict",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Getting connection using `google.auth.default()` '",
"'since no key file is defined for hook.'",
")",
"credentials",
",",
"_",
"=",
"google",
".",
"auth",
".",
"default",
"(",
"scopes",
"=",
"scopes",
")",
"elif",
"key_path",
":",
"# Get credentials from a JSON file.",
"if",
"key_path",
".",
"endswith",
"(",
"'.json'",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Getting connection using JSON key file %s'",
"%",
"key_path",
")",
"credentials",
"=",
"(",
"google",
".",
"oauth2",
".",
"service_account",
".",
"Credentials",
".",
"from_service_account_file",
"(",
"key_path",
",",
"scopes",
"=",
"scopes",
")",
")",
"elif",
"key_path",
".",
"endswith",
"(",
"'.p12'",
")",
":",
"raise",
"AirflowException",
"(",
"'Legacy P12 key file are not supported, '",
"'use a JSON key file.'",
")",
"else",
":",
"raise",
"AirflowException",
"(",
"'Unrecognised extension for key file.'",
")",
"else",
":",
"# Get credentials from JSON data provided in the UI.",
"try",
":",
"keyfile_dict",
"=",
"json",
".",
"loads",
"(",
"keyfile_dict",
")",
"# Depending on how the JSON was formatted, it may contain",
"# escaped newlines. Convert those to actual newlines.",
"keyfile_dict",
"[",
"'private_key'",
"]",
"=",
"keyfile_dict",
"[",
"'private_key'",
"]",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"credentials",
"=",
"(",
"google",
".",
"oauth2",
".",
"service_account",
".",
"Credentials",
".",
"from_service_account_info",
"(",
"keyfile_dict",
",",
"scopes",
"=",
"scopes",
")",
")",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"raise",
"AirflowException",
"(",
"'Invalid key JSON.'",
")",
"return",
"credentials",
".",
"with_subject",
"(",
"self",
".",
"delegate_to",
")",
"if",
"self",
".",
"delegate_to",
"else",
"credentials"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBaseHook._authorize
|
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
|
airflow/contrib/hooks/gcp_api_base_hook.py
|
def _authorize(self):
"""
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
"""
credentials = self._get_credentials()
http = httplib2.Http()
authed_http = google_auth_httplib2.AuthorizedHttp(
credentials, http=http)
return authed_http
|
def _authorize(self):
"""
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
"""
credentials = self._get_credentials()
http = httplib2.Http()
authed_http = google_auth_httplib2.AuthorizedHttp(
credentials, http=http)
return authed_http
|
[
"Returns",
"an",
"authorized",
"HTTP",
"object",
"to",
"be",
"used",
"to",
"build",
"a",
"Google",
"cloud",
"service",
"hook",
"connection",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L137-L146
|
[
"def",
"_authorize",
"(",
"self",
")",
":",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
"http",
"=",
"httplib2",
".",
"Http",
"(",
")",
"authed_http",
"=",
"google_auth_httplib2",
".",
"AuthorizedHttp",
"(",
"credentials",
",",
"http",
"=",
"http",
")",
"return",
"authed_http"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBaseHook._get_field
|
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
|
airflow/contrib/hooks/gcp_api_base_hook.py
|
def _get_field(self, f, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(f)
if hasattr(self, 'extras') and long_f in self.extras:
return self.extras[long_f]
else:
return default
|
def _get_field(self, f, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(f)
if hasattr(self, 'extras') and long_f in self.extras:
return self.extras[long_f]
else:
return default
|
[
"Fetches",
"a",
"field",
"from",
"extras",
"and",
"returns",
"it",
".",
"This",
"is",
"some",
"Airflow",
"magic",
".",
"The",
"google_cloud_platform",
"hook",
"type",
"adds",
"custom",
"UI",
"elements",
"to",
"the",
"hook",
"page",
"which",
"allow",
"admins",
"to",
"specify",
"service_account",
"key_path",
"etc",
".",
"They",
"get",
"formatted",
"as",
"shown",
"below",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L148-L159
|
[
"def",
"_get_field",
"(",
"self",
",",
"f",
",",
"default",
"=",
"None",
")",
":",
"long_f",
"=",
"'extra__google_cloud_platform__{}'",
".",
"format",
"(",
"f",
")",
"if",
"hasattr",
"(",
"self",
",",
"'extras'",
")",
"and",
"long_f",
"in",
"self",
".",
"extras",
":",
"return",
"self",
".",
"extras",
"[",
"long_f",
"]",
"else",
":",
"return",
"default"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBaseHook.catch_http_exception
|
Function decorator that intercepts HTTP Errors and raises AirflowException
with more informative message.
|
airflow/contrib/hooks/gcp_api_base_hook.py
|
def catch_http_exception(func):
"""
Function decorator that intercepts HTTP Errors and raises AirflowException
with more informative message.
"""
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GoogleAPICallError as e:
if isinstance(e, AlreadyExists):
raise e
else:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
except RetryError as e:
self.log.error('The request failed due to a retryable error and retry attempts failed.')
raise AirflowException(e)
except ValueError as e:
self.log.error('The request failed, the parameters are invalid.')
raise AirflowException(e)
except HttpError as e:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
return wrapper_decorator
|
def catch_http_exception(func):
"""
Function decorator that intercepts HTTP Errors and raises AirflowException
with more informative message.
"""
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GoogleAPICallError as e:
if isinstance(e, AlreadyExists):
raise e
else:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
except RetryError as e:
self.log.error('The request failed due to a retryable error and retry attempts failed.')
raise AirflowException(e)
except ValueError as e:
self.log.error('The request failed, the parameters are invalid.')
raise AirflowException(e)
except HttpError as e:
self.log.error('The request failed:\n%s', str(e))
raise AirflowException(e)
return wrapper_decorator
|
[
"Function",
"decorator",
"that",
"intercepts",
"HTTP",
"Errors",
"and",
"raises",
"AirflowException",
"with",
"more",
"informative",
"message",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L166-L192
|
[
"def",
"catch_http_exception",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper_decorator",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"AlreadyExists",
")",
":",
"raise",
"e",
"else",
":",
"self",
".",
"log",
".",
"error",
"(",
"'The request failed:\\n%s'",
",",
"str",
"(",
"e",
")",
")",
"raise",
"AirflowException",
"(",
"e",
")",
"except",
"RetryError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'The request failed due to a retryable error and retry attempts failed.'",
")",
"raise",
"AirflowException",
"(",
"e",
")",
"except",
"ValueError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'The request failed, the parameters are invalid.'",
")",
"raise",
"AirflowException",
"(",
"e",
")",
"except",
"HttpError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'The request failed:\\n%s'",
",",
"str",
"(",
"e",
")",
")",
"raise",
"AirflowException",
"(",
"e",
")",
"return",
"wrapper_decorator"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBaseHook.fallback_to_default_project_id
|
Decorator that provides fallback for Google Cloud Platform project id. If
the project is None it will be replaced with the project_id from the
service account the Hook is authenticated with. Project id can be specified
either via project_id kwarg or via first parameter in positional args.
:param func: function to wrap
:return: result of the function call
|
airflow/contrib/hooks/gcp_api_base_hook.py
|
def fallback_to_default_project_id(func):
"""
Decorator that provides fallback for Google Cloud Platform project id. If
the project is None it will be replaced with the project_id from the
service account the Hook is authenticated with. Project id can be specified
either via project_id kwarg or via first parameter in positional args.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
if len(args) > 0:
raise AirflowException(
"You must use keyword arguments in this methods rather than"
" positional")
if 'project_id' in kwargs:
kwargs['project_id'] = self._get_project_id(kwargs['project_id'])
else:
kwargs['project_id'] = self._get_project_id(None)
if not kwargs['project_id']:
raise AirflowException("The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in GCP connection definition. Both are not set!")
return func(self, *args, **kwargs)
return inner_wrapper
|
def fallback_to_default_project_id(func):
"""
Decorator that provides fallback for Google Cloud Platform project id. If
the project is None it will be replaced with the project_id from the
service account the Hook is authenticated with. Project id can be specified
either via project_id kwarg or via first parameter in positional args.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
if len(args) > 0:
raise AirflowException(
"You must use keyword arguments in this methods rather than"
" positional")
if 'project_id' in kwargs:
kwargs['project_id'] = self._get_project_id(kwargs['project_id'])
else:
kwargs['project_id'] = self._get_project_id(None)
if not kwargs['project_id']:
raise AirflowException("The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in GCP connection definition. Both are not set!")
return func(self, *args, **kwargs)
return inner_wrapper
|
[
"Decorator",
"that",
"provides",
"fallback",
"for",
"Google",
"Cloud",
"Platform",
"project",
"id",
".",
"If",
"the",
"project",
"is",
"None",
"it",
"will",
"be",
"replaced",
"with",
"the",
"project_id",
"from",
"the",
"service",
"account",
"the",
"Hook",
"is",
"authenticated",
"with",
".",
"Project",
"id",
"can",
"be",
"specified",
"either",
"via",
"project_id",
"kwarg",
"or",
"via",
"first",
"parameter",
"in",
"positional",
"args",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L195-L220
|
[
"def",
"fallback_to_default_project_id",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"inner_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"0",
":",
"raise",
"AirflowException",
"(",
"\"You must use keyword arguments in this methods rather than\"",
"\" positional\"",
")",
"if",
"'project_id'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'project_id'",
"]",
"=",
"self",
".",
"_get_project_id",
"(",
"kwargs",
"[",
"'project_id'",
"]",
")",
"else",
":",
"kwargs",
"[",
"'project_id'",
"]",
"=",
"self",
".",
"_get_project_id",
"(",
"None",
")",
"if",
"not",
"kwargs",
"[",
"'project_id'",
"]",
":",
"raise",
"AirflowException",
"(",
"\"The project id must be passed either as \"",
"\"keyword project_id parameter or as project_id extra \"",
"\"in GCP connection definition. Both are not set!\"",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner_wrapper"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
State.unfinished
|
A list of states indicating that a task either has not completed
a run or has not even started.
|
airflow/utils/state.py
|
def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.SHUTDOWN,
cls.UP_FOR_RETRY,
cls.UP_FOR_RESCHEDULE
]
|
def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.SHUTDOWN,
cls.UP_FOR_RETRY,
cls.UP_FOR_RESCHEDULE
]
|
[
"A",
"list",
"of",
"states",
"indicating",
"that",
"a",
"task",
"either",
"has",
"not",
"completed",
"a",
"run",
"or",
"has",
"not",
"even",
"started",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/state.py#L107-L120
|
[
"def",
"unfinished",
"(",
"cls",
")",
":",
"return",
"[",
"cls",
".",
"NONE",
",",
"cls",
".",
"SCHEDULED",
",",
"cls",
".",
"QUEUED",
",",
"cls",
".",
"RUNNING",
",",
"cls",
".",
"SHUTDOWN",
",",
"cls",
".",
"UP_FOR_RETRY",
",",
"cls",
".",
"UP_FOR_RESCHEDULE",
"]"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
delete_dag
|
:param dag_id: the dag_id of the DAG to delete
:type dag_id: str
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:type keep_records_in_log: bool
|
airflow/api/common/experimental/delete_dag.py
|
def delete_dag(dag_id, keep_records_in_log=True, session=None):
"""
:param dag_id: the dag_id of the DAG to delete
:type dag_id: str
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:type keep_records_in_log: bool
"""
DM = models.DagModel
dag = session.query(DM).filter(DM.dag_id == dag_id).first()
if dag is None:
raise DagNotFound("Dag id {} not found".format(dag_id))
if dag.fileloc and os.path.exists(dag.fileloc):
raise DagFileExists("Dag id {} is still in DagBag. "
"Remove the DAG file first: {}".format(dag_id, dag.fileloc))
count = 0
# noinspection PyUnresolvedReferences,PyProtectedMember
for m in models.base.Base._decl_class_registry.values():
if hasattr(m, "dag_id"):
if keep_records_in_log and m.__name__ == 'Log':
continue
cond = or_(m.dag_id == dag_id, m.dag_id.like(dag_id + ".%"))
count += session.query(m).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
p, c = dag_id.rsplit(".", 1)
for m in models.DagRun, TaskFail, models.TaskInstance:
count += session.query(m).filter(m.dag_id == p, m.task_id == c).delete()
return count
|
def delete_dag(dag_id, keep_records_in_log=True, session=None):
"""
:param dag_id: the dag_id of the DAG to delete
:type dag_id: str
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:type keep_records_in_log: bool
"""
DM = models.DagModel
dag = session.query(DM).filter(DM.dag_id == dag_id).first()
if dag is None:
raise DagNotFound("Dag id {} not found".format(dag_id))
if dag.fileloc and os.path.exists(dag.fileloc):
raise DagFileExists("Dag id {} is still in DagBag. "
"Remove the DAG file first: {}".format(dag_id, dag.fileloc))
count = 0
# noinspection PyUnresolvedReferences,PyProtectedMember
for m in models.base.Base._decl_class_registry.values():
if hasattr(m, "dag_id"):
if keep_records_in_log and m.__name__ == 'Log':
continue
cond = or_(m.dag_id == dag_id, m.dag_id.like(dag_id + ".%"))
count += session.query(m).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
p, c = dag_id.rsplit(".", 1)
for m in models.DagRun, TaskFail, models.TaskInstance:
count += session.query(m).filter(m.dag_id == p, m.task_id == c).delete()
return count
|
[
":",
"param",
"dag_id",
":",
"the",
"dag_id",
"of",
"the",
"DAG",
"to",
"delete",
":",
"type",
"dag_id",
":",
"str",
":",
"param",
"keep_records_in_log",
":",
"whether",
"keep",
"records",
"of",
"the",
"given",
"dag_id",
"in",
"the",
"Log",
"table",
"in",
"the",
"backend",
"database",
"(",
"for",
"reasons",
"like",
"auditing",
")",
".",
"The",
"default",
"value",
"is",
"True",
".",
":",
"type",
"keep_records_in_log",
":",
"bool"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/delete_dag.py#L31-L64
|
[
"def",
"delete_dag",
"(",
"dag_id",
",",
"keep_records_in_log",
"=",
"True",
",",
"session",
"=",
"None",
")",
":",
"DM",
"=",
"models",
".",
"DagModel",
"dag",
"=",
"session",
".",
"query",
"(",
"DM",
")",
".",
"filter",
"(",
"DM",
".",
"dag_id",
"==",
"dag_id",
")",
".",
"first",
"(",
")",
"if",
"dag",
"is",
"None",
":",
"raise",
"DagNotFound",
"(",
"\"Dag id {} not found\"",
".",
"format",
"(",
"dag_id",
")",
")",
"if",
"dag",
".",
"fileloc",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"dag",
".",
"fileloc",
")",
":",
"raise",
"DagFileExists",
"(",
"\"Dag id {} is still in DagBag. \"",
"\"Remove the DAG file first: {}\"",
".",
"format",
"(",
"dag_id",
",",
"dag",
".",
"fileloc",
")",
")",
"count",
"=",
"0",
"# noinspection PyUnresolvedReferences,PyProtectedMember",
"for",
"m",
"in",
"models",
".",
"base",
".",
"Base",
".",
"_decl_class_registry",
".",
"values",
"(",
")",
":",
"if",
"hasattr",
"(",
"m",
",",
"\"dag_id\"",
")",
":",
"if",
"keep_records_in_log",
"and",
"m",
".",
"__name__",
"==",
"'Log'",
":",
"continue",
"cond",
"=",
"or_",
"(",
"m",
".",
"dag_id",
"==",
"dag_id",
",",
"m",
".",
"dag_id",
".",
"like",
"(",
"dag_id",
"+",
"\".%\"",
")",
")",
"count",
"+=",
"session",
".",
"query",
"(",
"m",
")",
".",
"filter",
"(",
"cond",
")",
".",
"delete",
"(",
"synchronize_session",
"=",
"'fetch'",
")",
"if",
"dag",
".",
"is_subdag",
":",
"p",
",",
"c",
"=",
"dag_id",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"for",
"m",
"in",
"models",
".",
"DagRun",
",",
"TaskFail",
",",
"models",
".",
"TaskInstance",
":",
"count",
"+=",
"session",
".",
"query",
"(",
"m",
")",
".",
"filter",
"(",
"m",
".",
"dag_id",
"==",
"p",
",",
"m",
".",
"task_id",
"==",
"c",
")",
".",
"delete",
"(",
")",
"return",
"count"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSqlHook._prepare_command
|
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:type cmd: str
:return: full command to be executed
|
airflow/contrib/hooks/spark_sql_hook.py
|
def _prepare_command(self, cmd):
"""
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:type cmd: str
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
connection_cmd += cmd
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
|
def _prepare_command(self, cmd):
"""
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:type cmd: str
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
connection_cmd += cmd
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
|
[
"Construct",
"the",
"spark",
"-",
"sql",
"command",
"to",
"execute",
".",
"Verbose",
"output",
"is",
"enabled",
"as",
"default",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_sql_hook.py#L91-L134
|
[
"def",
"_prepare_command",
"(",
"self",
",",
"cmd",
")",
":",
"connection_cmd",
"=",
"[",
"\"spark-sql\"",
"]",
"if",
"self",
".",
"_conf",
":",
"for",
"conf_el",
"in",
"self",
".",
"_conf",
".",
"split",
"(",
"\",\"",
")",
":",
"connection_cmd",
"+=",
"[",
"\"--conf\"",
",",
"conf_el",
"]",
"if",
"self",
".",
"_total_executor_cores",
":",
"connection_cmd",
"+=",
"[",
"\"--total-executor-cores\"",
",",
"str",
"(",
"self",
".",
"_total_executor_cores",
")",
"]",
"if",
"self",
".",
"_executor_cores",
":",
"connection_cmd",
"+=",
"[",
"\"--executor-cores\"",
",",
"str",
"(",
"self",
".",
"_executor_cores",
")",
"]",
"if",
"self",
".",
"_executor_memory",
":",
"connection_cmd",
"+=",
"[",
"\"--executor-memory\"",
",",
"self",
".",
"_executor_memory",
"]",
"if",
"self",
".",
"_keytab",
":",
"connection_cmd",
"+=",
"[",
"\"--keytab\"",
",",
"self",
".",
"_keytab",
"]",
"if",
"self",
".",
"_principal",
":",
"connection_cmd",
"+=",
"[",
"\"--principal\"",
",",
"self",
".",
"_principal",
"]",
"if",
"self",
".",
"_num_executors",
":",
"connection_cmd",
"+=",
"[",
"\"--num-executors\"",
",",
"str",
"(",
"self",
".",
"_num_executors",
")",
"]",
"if",
"self",
".",
"_sql",
":",
"sql",
"=",
"self",
".",
"_sql",
".",
"strip",
"(",
")",
"if",
"sql",
".",
"endswith",
"(",
"\".sql\"",
")",
"or",
"sql",
".",
"endswith",
"(",
"\".hql\"",
")",
":",
"connection_cmd",
"+=",
"[",
"\"-f\"",
",",
"sql",
"]",
"else",
":",
"connection_cmd",
"+=",
"[",
"\"-e\"",
",",
"sql",
"]",
"if",
"self",
".",
"_master",
":",
"connection_cmd",
"+=",
"[",
"\"--master\"",
",",
"self",
".",
"_master",
"]",
"if",
"self",
".",
"_name",
":",
"connection_cmd",
"+=",
"[",
"\"--name\"",
",",
"self",
".",
"_name",
"]",
"if",
"self",
".",
"_verbose",
":",
"connection_cmd",
"+=",
"[",
"\"--verbose\"",
"]",
"if",
"self",
".",
"_yarn_queue",
":",
"connection_cmd",
"+=",
"[",
"\"--queue\"",
",",
"self",
".",
"_yarn_queue",
"]",
"connection_cmd",
"+=",
"cmd",
"self",
".",
"log",
".",
"debug",
"(",
"\"Spark-Sql cmd: %s\"",
",",
"connection_cmd",
")",
"return",
"connection_cmd"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSqlHook.run_query
|
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
|
airflow/contrib/hooks/spark_sql_hook.py
|
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
)
|
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
)
|
[
"Remote",
"Popen",
"(",
"actually",
"execute",
"the",
"Spark",
"-",
"sql",
"query",
")"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_sql_hook.py#L136-L159
|
[
"def",
"run_query",
"(",
"self",
",",
"cmd",
"=",
"\"\"",
",",
"*",
"*",
"kwargs",
")",
":",
"spark_sql_cmd",
"=",
"self",
".",
"_prepare_command",
"(",
"cmd",
")",
"self",
".",
"_sp",
"=",
"subprocess",
".",
"Popen",
"(",
"spark_sql_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"*",
"*",
"kwargs",
")",
"for",
"line",
"in",
"iter",
"(",
"self",
".",
"_sp",
".",
"stdout",
".",
"readline",
",",
"''",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"line",
")",
"returncode",
"=",
"self",
".",
"_sp",
".",
"wait",
"(",
")",
"if",
"returncode",
":",
"raise",
"AirflowException",
"(",
"\"Cannot execute {} on {}. Process exit code: {}.\"",
".",
"format",
"(",
"cmd",
",",
"self",
".",
"_conn",
".",
"host",
",",
"returncode",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
vgg11_bn
|
VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
torchvision/models/vgg.py
|
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
|
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
|
[
"VGG",
"11",
"-",
"layer",
"model",
"(",
"configuration",
"A",
")",
"with",
"batch",
"normalization"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/vgg.py#L100-L111
|
[
"def",
"vgg11_bn",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained",
":",
"kwargs",
"[",
"'init_weights'",
"]",
"=",
"False",
"model",
"=",
"VGG",
"(",
"make_layers",
"(",
"cfg",
"[",
"'A'",
"]",
",",
"batch_norm",
"=",
"True",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'vgg11_bn'",
"]",
")",
")",
"return",
"model"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
vgg13
|
VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
torchvision/models/vgg.py
|
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
|
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
|
[
"VGG",
"13",
"-",
"layer",
"model",
"(",
"configuration",
"B",
")"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/vgg.py#L114-L125
|
[
"def",
"vgg13",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained",
":",
"kwargs",
"[",
"'init_weights'",
"]",
"=",
"False",
"model",
"=",
"VGG",
"(",
"make_layers",
"(",
"cfg",
"[",
"'B'",
"]",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'vgg13'",
"]",
")",
")",
"return",
"model"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
alexnet
|
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
torchvision/models/alexnet.py
|
def alexnet(pretrained=False, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
|
def alexnet(pretrained=False, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model
|
[
"r",
"AlexNet",
"model",
"architecture",
"from",
"the",
"One",
"weird",
"trick",
"...",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1404",
".",
"5997",
">",
"_",
"paper",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/alexnet.py#L51-L61
|
[
"def",
"alexnet",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"AlexNet",
"(",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'alexnet'",
"]",
")",
")",
"return",
"model"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
densenet121
|
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
torchvision/models/densenet.py
|
def densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet121'])
return model
|
def densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
_load_state_dict(model, model_urls['densenet121'])
return model
|
[
"r",
"Densenet",
"-",
"121",
"model",
"from",
"Densely",
"Connected",
"Convolutional",
"Networks",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1608",
".",
"06993",
".",
"pdf",
">",
"_"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/densenet.py#L137-L148
|
[
"def",
"densenet121",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"DenseNet",
"(",
"num_init_features",
"=",
"64",
",",
"growth_rate",
"=",
"32",
",",
"block_config",
"=",
"(",
"6",
",",
"12",
",",
"24",
",",
"16",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"_load_state_dict",
"(",
"model",
",",
"model_urls",
"[",
"'densenet121'",
"]",
")",
"return",
"model"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
to_tensor
|
Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
|
torchvision/transforms/functional.py
|
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not(_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
[
"Convert",
"a",
"PIL",
"Image",
"or",
"numpy",
".",
"ndarray",
"to",
"tensor",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L38-L94
|
[
"def",
"to_tensor",
"(",
"pic",
")",
":",
"if",
"not",
"(",
"_is_pil_image",
"(",
"pic",
")",
"or",
"_is_numpy_image",
"(",
"pic",
")",
")",
":",
"raise",
"TypeError",
"(",
"'pic should be PIL Image or ndarray. Got {}'",
".",
"format",
"(",
"type",
"(",
"pic",
")",
")",
")",
"if",
"isinstance",
"(",
"pic",
",",
"np",
".",
"ndarray",
")",
":",
"# handle numpy array",
"if",
"pic",
".",
"ndim",
"==",
"2",
":",
"pic",
"=",
"pic",
"[",
":",
",",
":",
",",
"None",
"]",
"img",
"=",
"torch",
".",
"from_numpy",
"(",
"pic",
".",
"transpose",
"(",
"(",
"2",
",",
"0",
",",
"1",
")",
")",
")",
"# backward compatibility",
"if",
"isinstance",
"(",
"img",
",",
"torch",
".",
"ByteTensor",
")",
":",
"return",
"img",
".",
"float",
"(",
")",
".",
"div",
"(",
"255",
")",
"else",
":",
"return",
"img",
"if",
"accimage",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"pic",
",",
"accimage",
".",
"Image",
")",
":",
"nppic",
"=",
"np",
".",
"zeros",
"(",
"[",
"pic",
".",
"channels",
",",
"pic",
".",
"height",
",",
"pic",
".",
"width",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"pic",
".",
"copyto",
"(",
"nppic",
")",
"return",
"torch",
".",
"from_numpy",
"(",
"nppic",
")",
"# handle PIL Image",
"if",
"pic",
".",
"mode",
"==",
"'I'",
":",
"img",
"=",
"torch",
".",
"from_numpy",
"(",
"np",
".",
"array",
"(",
"pic",
",",
"np",
".",
"int32",
",",
"copy",
"=",
"False",
")",
")",
"elif",
"pic",
".",
"mode",
"==",
"'I;16'",
":",
"img",
"=",
"torch",
".",
"from_numpy",
"(",
"np",
".",
"array",
"(",
"pic",
",",
"np",
".",
"int16",
",",
"copy",
"=",
"False",
")",
")",
"elif",
"pic",
".",
"mode",
"==",
"'F'",
":",
"img",
"=",
"torch",
".",
"from_numpy",
"(",
"np",
".",
"array",
"(",
"pic",
",",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
")",
"elif",
"pic",
".",
"mode",
"==",
"'1'",
":",
"img",
"=",
"255",
"*",
"torch",
".",
"from_numpy",
"(",
"np",
".",
"array",
"(",
"pic",
",",
"np",
".",
"uint8",
",",
"copy",
"=",
"False",
")",
")",
"else",
":",
"img",
"=",
"torch",
".",
"ByteTensor",
"(",
"torch",
".",
"ByteStorage",
".",
"from_buffer",
"(",
"pic",
".",
"tobytes",
"(",
")",
")",
")",
"# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK",
"if",
"pic",
".",
"mode",
"==",
"'YCbCr'",
":",
"nchannel",
"=",
"3",
"elif",
"pic",
".",
"mode",
"==",
"'I;16'",
":",
"nchannel",
"=",
"1",
"else",
":",
"nchannel",
"=",
"len",
"(",
"pic",
".",
"mode",
")",
"img",
"=",
"img",
".",
"view",
"(",
"pic",
".",
"size",
"[",
"1",
"]",
",",
"pic",
".",
"size",
"[",
"0",
"]",
",",
"nchannel",
")",
"# put it from HWC to CHW format",
"# yikes, this transpose takes 80% of the loading time/CPU",
"img",
"=",
"img",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"contiguous",
"(",
")",
"if",
"isinstance",
"(",
"img",
",",
"torch",
".",
"ByteTensor",
")",
":",
"return",
"img",
".",
"float",
"(",
")",
".",
"div",
"(",
"255",
")",
"else",
":",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
to_pil_image
|
Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
|
torchvision/transforms/functional.py
|
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if isinstance(pic, torch.Tensor):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
|
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if isinstance(pic, torch.Tensor):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
|
[
"Convert",
"a",
"tensor",
"or",
"an",
"ndarray",
"to",
"PIL",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L97-L181
|
[
"def",
"to_pil_image",
"(",
"pic",
",",
"mode",
"=",
"None",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
"or",
"isinstance",
"(",
"pic",
",",
"np",
".",
"ndarray",
")",
")",
":",
"raise",
"TypeError",
"(",
"'pic should be Tensor or ndarray. Got {}.'",
".",
"format",
"(",
"type",
"(",
"pic",
")",
")",
")",
"elif",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
":",
"if",
"pic",
".",
"ndimension",
"(",
")",
"not",
"in",
"{",
"2",
",",
"3",
"}",
":",
"raise",
"ValueError",
"(",
"'pic should be 2/3 dimensional. Got {} dimensions.'",
".",
"format",
"(",
"pic",
".",
"ndimension",
"(",
")",
")",
")",
"elif",
"pic",
".",
"ndimension",
"(",
")",
"==",
"2",
":",
"# if 2D image, add channel dimension (CHW)",
"pic",
"=",
"pic",
".",
"unsqueeze",
"(",
"0",
")",
"elif",
"isinstance",
"(",
"pic",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"pic",
".",
"ndim",
"not",
"in",
"{",
"2",
",",
"3",
"}",
":",
"raise",
"ValueError",
"(",
"'pic should be 2/3 dimensional. Got {} dimensions.'",
".",
"format",
"(",
"pic",
".",
"ndim",
")",
")",
"elif",
"pic",
".",
"ndim",
"==",
"2",
":",
"# if 2D image, add channel dimension (HWC)",
"pic",
"=",
"np",
".",
"expand_dims",
"(",
"pic",
",",
"2",
")",
"npimg",
"=",
"pic",
"if",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"FloatTensor",
")",
":",
"pic",
"=",
"pic",
".",
"mul",
"(",
"255",
")",
".",
"byte",
"(",
")",
"if",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
":",
"npimg",
"=",
"np",
".",
"transpose",
"(",
"pic",
".",
"numpy",
"(",
")",
",",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"if",
"not",
"isinstance",
"(",
"npimg",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"'Input pic must be a torch.Tensor or NumPy ndarray, '",
"+",
"'not {}'",
".",
"format",
"(",
"type",
"(",
"npimg",
")",
")",
")",
"if",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"1",
":",
"expected_mode",
"=",
"None",
"npimg",
"=",
"npimg",
"[",
":",
",",
":",
",",
"0",
"]",
"if",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"expected_mode",
"=",
"'L'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"int16",
":",
"expected_mode",
"=",
"'I;16'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"int32",
":",
"expected_mode",
"=",
"'I'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"float32",
":",
"expected_mode",
"=",
"'F'",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"!=",
"expected_mode",
":",
"raise",
"ValueError",
"(",
"\"Incorrect mode ({}) supplied for input type {}. Should be {}\"",
".",
"format",
"(",
"mode",
",",
"np",
".",
"dtype",
",",
"expected_mode",
")",
")",
"mode",
"=",
"expected_mode",
"elif",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"2",
":",
"permitted_2_channel_modes",
"=",
"[",
"'LA'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_2_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 2D inputs\"",
".",
"format",
"(",
"permitted_2_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'LA'",
"elif",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"4",
":",
"permitted_4_channel_modes",
"=",
"[",
"'RGBA'",
",",
"'CMYK'",
",",
"'RGBX'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_4_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 4D inputs\"",
".",
"format",
"(",
"permitted_4_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'RGBA'",
"else",
":",
"permitted_3_channel_modes",
"=",
"[",
"'RGB'",
",",
"'YCbCr'",
",",
"'HSV'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_3_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 3D inputs\"",
".",
"format",
"(",
"permitted_3_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'RGB'",
"if",
"mode",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'Input type {} is not supported'",
".",
"format",
"(",
"npimg",
".",
"dtype",
")",
")",
"return",
"Image",
".",
"fromarray",
"(",
"npimg",
",",
"mode",
"=",
"mode",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
normalize
|
Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
Returns:
Tensor: Normalized Tensor image.
|
torchvision/transforms/functional.py
|
def normalize(tensor, mean, std, inplace=False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
if not inplace:
tensor = tensor.clone()
mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)
std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
|
def normalize(tensor, mean, std, inplace=False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
if not inplace:
tensor = tensor.clone()
mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)
std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
|
[
"Normalize",
"a",
"tensor",
"image",
"with",
"mean",
"and",
"standard",
"deviation",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L184-L209
|
[
"def",
"normalize",
"(",
"tensor",
",",
"mean",
",",
"std",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"not",
"_is_tensor_image",
"(",
"tensor",
")",
":",
"raise",
"TypeError",
"(",
"'tensor is not a torch image.'",
")",
"if",
"not",
"inplace",
":",
"tensor",
"=",
"tensor",
".",
"clone",
"(",
")",
"mean",
"=",
"torch",
".",
"as_tensor",
"(",
"mean",
",",
"dtype",
"=",
"torch",
".",
"float32",
",",
"device",
"=",
"tensor",
".",
"device",
")",
"std",
"=",
"torch",
".",
"as_tensor",
"(",
"std",
",",
"dtype",
"=",
"torch",
".",
"float32",
",",
"device",
"=",
"tensor",
".",
"device",
")",
"tensor",
".",
"sub_",
"(",
"mean",
"[",
":",
",",
"None",
",",
"None",
"]",
")",
".",
"div_",
"(",
"std",
"[",
":",
",",
"None",
",",
"None",
"]",
")",
"return",
"tensor"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
resize
|
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
|
torchvision/transforms/functional.py
|
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
|
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
|
[
"r",
"Resize",
"the",
"input",
"PIL",
"Image",
"to",
"the",
"given",
"size",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L212-L246
|
[
"def",
"resize",
"(",
"img",
",",
"size",
",",
"interpolation",
"=",
"Image",
".",
"BILINEAR",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"if",
"not",
"(",
"isinstance",
"(",
"size",
",",
"int",
")",
"or",
"(",
"isinstance",
"(",
"size",
",",
"Iterable",
")",
"and",
"len",
"(",
"size",
")",
"==",
"2",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Got inappropriate size arg: {}'",
".",
"format",
"(",
"size",
")",
")",
"if",
"isinstance",
"(",
"size",
",",
"int",
")",
":",
"w",
",",
"h",
"=",
"img",
".",
"size",
"if",
"(",
"w",
"<=",
"h",
"and",
"w",
"==",
"size",
")",
"or",
"(",
"h",
"<=",
"w",
"and",
"h",
"==",
"size",
")",
":",
"return",
"img",
"if",
"w",
"<",
"h",
":",
"ow",
"=",
"size",
"oh",
"=",
"int",
"(",
"size",
"*",
"h",
"/",
"w",
")",
"return",
"img",
".",
"resize",
"(",
"(",
"ow",
",",
"oh",
")",
",",
"interpolation",
")",
"else",
":",
"oh",
"=",
"size",
"ow",
"=",
"int",
"(",
"size",
"*",
"w",
"/",
"h",
")",
"return",
"img",
".",
"resize",
"(",
"(",
"ow",
",",
"oh",
")",
",",
"interpolation",
")",
"else",
":",
"return",
"img",
".",
"resize",
"(",
"size",
"[",
":",
":",
"-",
"1",
"]",
",",
"interpolation",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
pad
|
r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
|
torchvision/transforms/functional.py
|
def pad(img, padding, fill=0, padding_mode='constant'):
r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if padding_mode == 'constant':
if img.mode == 'P':
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, fill=fill)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, fill=fill)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, Sequence) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img)
|
def pad(img, padding, fill=0, padding_mode='constant'):
r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Padding mode should be either constant, edge, reflect or symmetric'
if padding_mode == 'constant':
if img.mode == 'P':
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, fill=fill)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, fill=fill)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, Sequence) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
if img.mode == 'P':
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img)
|
[
"r",
"Pad",
"the",
"given",
"PIL",
"Image",
"on",
"all",
"sides",
"with",
"specified",
"padding",
"mode",
"and",
"fill",
"value",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L255-L340
|
[
"def",
"pad",
"(",
"img",
",",
"padding",
",",
"fill",
"=",
"0",
",",
"padding_mode",
"=",
"'constant'",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"padding",
",",
"(",
"numbers",
".",
"Number",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Got inappropriate padding arg'",
")",
"if",
"not",
"isinstance",
"(",
"fill",
",",
"(",
"numbers",
".",
"Number",
",",
"str",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Got inappropriate fill arg'",
")",
"if",
"not",
"isinstance",
"(",
"padding_mode",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Got inappropriate padding_mode arg'",
")",
"if",
"isinstance",
"(",
"padding",
",",
"Sequence",
")",
"and",
"len",
"(",
"padding",
")",
"not",
"in",
"[",
"2",
",",
"4",
"]",
":",
"raise",
"ValueError",
"(",
"\"Padding must be an int or a 2, or 4 element tuple, not a \"",
"+",
"\"{} element tuple\"",
".",
"format",
"(",
"len",
"(",
"padding",
")",
")",
")",
"assert",
"padding_mode",
"in",
"[",
"'constant'",
",",
"'edge'",
",",
"'reflect'",
",",
"'symmetric'",
"]",
",",
"'Padding mode should be either constant, edge, reflect or symmetric'",
"if",
"padding_mode",
"==",
"'constant'",
":",
"if",
"img",
".",
"mode",
"==",
"'P'",
":",
"palette",
"=",
"img",
".",
"getpalette",
"(",
")",
"image",
"=",
"ImageOps",
".",
"expand",
"(",
"img",
",",
"border",
"=",
"padding",
",",
"fill",
"=",
"fill",
")",
"image",
".",
"putpalette",
"(",
"palette",
")",
"return",
"image",
"return",
"ImageOps",
".",
"expand",
"(",
"img",
",",
"border",
"=",
"padding",
",",
"fill",
"=",
"fill",
")",
"else",
":",
"if",
"isinstance",
"(",
"padding",
",",
"int",
")",
":",
"pad_left",
"=",
"pad_right",
"=",
"pad_top",
"=",
"pad_bottom",
"=",
"padding",
"if",
"isinstance",
"(",
"padding",
",",
"Sequence",
")",
"and",
"len",
"(",
"padding",
")",
"==",
"2",
":",
"pad_left",
"=",
"pad_right",
"=",
"padding",
"[",
"0",
"]",
"pad_top",
"=",
"pad_bottom",
"=",
"padding",
"[",
"1",
"]",
"if",
"isinstance",
"(",
"padding",
",",
"Sequence",
")",
"and",
"len",
"(",
"padding",
")",
"==",
"4",
":",
"pad_left",
"=",
"padding",
"[",
"0",
"]",
"pad_top",
"=",
"padding",
"[",
"1",
"]",
"pad_right",
"=",
"padding",
"[",
"2",
"]",
"pad_bottom",
"=",
"padding",
"[",
"3",
"]",
"if",
"img",
".",
"mode",
"==",
"'P'",
":",
"palette",
"=",
"img",
".",
"getpalette",
"(",
")",
"img",
"=",
"np",
".",
"asarray",
"(",
"img",
")",
"img",
"=",
"np",
".",
"pad",
"(",
"img",
",",
"(",
"(",
"pad_top",
",",
"pad_bottom",
")",
",",
"(",
"pad_left",
",",
"pad_right",
")",
")",
",",
"padding_mode",
")",
"img",
"=",
"Image",
".",
"fromarray",
"(",
"img",
")",
"img",
".",
"putpalette",
"(",
"palette",
")",
"return",
"img",
"img",
"=",
"np",
".",
"asarray",
"(",
"img",
")",
"# RGB image",
"if",
"len",
"(",
"img",
".",
"shape",
")",
"==",
"3",
":",
"img",
"=",
"np",
".",
"pad",
"(",
"img",
",",
"(",
"(",
"pad_top",
",",
"pad_bottom",
")",
",",
"(",
"pad_left",
",",
"pad_right",
")",
",",
"(",
"0",
",",
"0",
")",
")",
",",
"padding_mode",
")",
"# Grayscale image",
"if",
"len",
"(",
"img",
".",
"shape",
")",
"==",
"2",
":",
"img",
"=",
"np",
".",
"pad",
"(",
"img",
",",
"(",
"(",
"pad_top",
",",
"pad_bottom",
")",
",",
"(",
"pad_left",
",",
"pad_right",
")",
")",
",",
"padding_mode",
")",
"return",
"Image",
".",
"fromarray",
"(",
"img",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
crop
|
Crop the given PIL Image.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped image.
w (int): Width of the cropped image.
Returns:
PIL Image: Cropped image.
|
torchvision/transforms/functional.py
|
def crop(img, i, j, h, w):
"""Crop the given PIL Image.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped image.
w (int): Width of the cropped image.
Returns:
PIL Image: Cropped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.crop((j, i, j + w, i + h))
|
def crop(img, i, j, h, w):
"""Crop the given PIL Image.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped image.
w (int): Width of the cropped image.
Returns:
PIL Image: Cropped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.crop((j, i, j + w, i + h))
|
[
"Crop",
"the",
"given",
"PIL",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L343-L359
|
[
"def",
"crop",
"(",
"img",
",",
"i",
",",
"j",
",",
"h",
",",
"w",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"return",
"img",
".",
"crop",
"(",
"(",
"j",
",",
"i",
",",
"j",
"+",
"w",
",",
"i",
"+",
"h",
")",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
resized_crop
|
Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner
j (int): j in (i,j) i.e coordinates of the upper left corner
h (int): Height of the cropped image.
w (int): Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
|
torchvision/transforms/functional.py
|
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
"""Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner
j (int): j in (i,j) i.e coordinates of the upper left corner
h (int): Height of the cropped image.
w (int): Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
assert _is_pil_image(img), 'img should be PIL Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
|
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
"""Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner
j (int): j in (i,j) i.e coordinates of the upper left corner
h (int): Height of the cropped image.
w (int): Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
assert _is_pil_image(img), 'img should be PIL Image'
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
|
[
"Crop",
"the",
"given",
"PIL",
"Image",
"and",
"resize",
"it",
"to",
"desired",
"size",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L372-L392
|
[
"def",
"resized_crop",
"(",
"img",
",",
"i",
",",
"j",
",",
"h",
",",
"w",
",",
"size",
",",
"interpolation",
"=",
"Image",
".",
"BILINEAR",
")",
":",
"assert",
"_is_pil_image",
"(",
"img",
")",
",",
"'img should be PIL Image'",
"img",
"=",
"crop",
"(",
"img",
",",
"i",
",",
"j",
",",
"h",
",",
"w",
")",
"img",
"=",
"resize",
"(",
"img",
",",
"size",
",",
"interpolation",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
hflip
|
Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image.
|
torchvision/transforms/functional.py
|
def hflip(img):
"""Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
def hflip(img):
"""Horizontally flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontall flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
[
"Horizontally",
"flip",
"the",
"given",
"PIL",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L395-L407
|
[
"def",
"hflip",
"(",
"img",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"return",
"img",
".",
"transpose",
"(",
"Image",
".",
"FLIP_LEFT_RIGHT",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
_get_perspective_coeffs
|
Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
|
torchvision/transforms/functional.py
|
def _get_perspective_coeffs(startpoints, endpoints):
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
matrix = []
for p1, p2 in zip(endpoints, startpoints):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = torch.tensor(matrix, dtype=torch.float)
B = torch.tensor(startpoints, dtype=torch.float).view(8)
res = torch.gels(B, A)[0]
return res.squeeze_(1).tolist()
|
def _get_perspective_coeffs(startpoints, endpoints):
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
matrix = []
for p1, p2 in zip(endpoints, startpoints):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = torch.tensor(matrix, dtype=torch.float)
B = torch.tensor(startpoints, dtype=torch.float).view(8)
res = torch.gels(B, A)[0]
return res.squeeze_(1).tolist()
|
[
"Helper",
"function",
"to",
"get",
"the",
"coefficients",
"(",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
")",
"for",
"the",
"perspective",
"transforms",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L410-L432
|
[
"def",
"_get_perspective_coeffs",
"(",
"startpoints",
",",
"endpoints",
")",
":",
"matrix",
"=",
"[",
"]",
"for",
"p1",
",",
"p2",
"in",
"zip",
"(",
"endpoints",
",",
"startpoints",
")",
":",
"matrix",
".",
"append",
"(",
"[",
"p1",
"[",
"0",
"]",
",",
"p1",
"[",
"1",
"]",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"-",
"p2",
"[",
"0",
"]",
"*",
"p1",
"[",
"0",
"]",
",",
"-",
"p2",
"[",
"0",
"]",
"*",
"p1",
"[",
"1",
"]",
"]",
")",
"matrix",
".",
"append",
"(",
"[",
"0",
",",
"0",
",",
"0",
",",
"p1",
"[",
"0",
"]",
",",
"p1",
"[",
"1",
"]",
",",
"1",
",",
"-",
"p2",
"[",
"1",
"]",
"*",
"p1",
"[",
"0",
"]",
",",
"-",
"p2",
"[",
"1",
"]",
"*",
"p1",
"[",
"1",
"]",
"]",
")",
"A",
"=",
"torch",
".",
"tensor",
"(",
"matrix",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
"B",
"=",
"torch",
".",
"tensor",
"(",
"startpoints",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
".",
"view",
"(",
"8",
")",
"res",
"=",
"torch",
".",
"gels",
"(",
"B",
",",
"A",
")",
"[",
"0",
"]",
"return",
"res",
".",
"squeeze_",
"(",
"1",
")",
".",
"tolist",
"(",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
perspective
|
Perform perspective transform of the given PIL Image.
Args:
img (PIL Image): Image to be transformed.
coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.
for a perspective transform.
interpolation: Default- Image.BICUBIC
Returns:
PIL Image: Perspectively transformed Image.
|
torchvision/transforms/functional.py
|
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC):
"""Perform perspective transform of the given PIL Image.
Args:
img (PIL Image): Image to be transformed.
coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.
for a perspective transform.
interpolation: Default- Image.BICUBIC
Returns:
PIL Image: Perspectively transformed Image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
coeffs = _get_perspective_coeffs(startpoints, endpoints)
return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
|
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC):
"""Perform perspective transform of the given PIL Image.
Args:
img (PIL Image): Image to be transformed.
coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients.
for a perspective transform.
interpolation: Default- Image.BICUBIC
Returns:
PIL Image: Perspectively transformed Image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
coeffs = _get_perspective_coeffs(startpoints, endpoints)
return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
|
[
"Perform",
"perspective",
"transform",
"of",
"the",
"given",
"PIL",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L435-L450
|
[
"def",
"perspective",
"(",
"img",
",",
"startpoints",
",",
"endpoints",
",",
"interpolation",
"=",
"Image",
".",
"BICUBIC",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"coeffs",
"=",
"_get_perspective_coeffs",
"(",
"startpoints",
",",
"endpoints",
")",
"return",
"img",
".",
"transform",
"(",
"img",
".",
"size",
",",
"Image",
".",
"PERSPECTIVE",
",",
"coeffs",
",",
"interpolation",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
vflip
|
Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
|
torchvision/transforms/functional.py
|
def vflip(img):
"""Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM)
|
def vflip(img):
"""Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM)
|
[
"Vertically",
"flip",
"the",
"given",
"PIL",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L453-L465
|
[
"def",
"vflip",
"(",
"img",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"return",
"img",
".",
"transpose",
"(",
"Image",
".",
"FLIP_TOP_BOTTOM",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
five_crop
|
Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
|
torchvision/transforms/functional.py
|
def five_crop(img, size):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
w, h = img.size
crop_h, crop_w = size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = center_crop(img, (crop_h, crop_w))
return (tl, tr, bl, br, center)
|
def five_crop(img, size):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
w, h = img.size
crop_h, crop_w = size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = center_crop(img, (crop_h, crop_w))
return (tl, tr, bl, br, center)
|
[
"Crop",
"the",
"given",
"PIL",
"Image",
"into",
"four",
"corners",
"and",
"the",
"central",
"crop",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L468-L499
|
[
"def",
"five_crop",
"(",
"img",
",",
"size",
")",
":",
"if",
"isinstance",
"(",
"size",
",",
"numbers",
".",
"Number",
")",
":",
"size",
"=",
"(",
"int",
"(",
"size",
")",
",",
"int",
"(",
"size",
")",
")",
"else",
":",
"assert",
"len",
"(",
"size",
")",
"==",
"2",
",",
"\"Please provide only two dimensions (h, w) for size.\"",
"w",
",",
"h",
"=",
"img",
".",
"size",
"crop_h",
",",
"crop_w",
"=",
"size",
"if",
"crop_w",
">",
"w",
"or",
"crop_h",
">",
"h",
":",
"raise",
"ValueError",
"(",
"\"Requested crop size {} is bigger than input size {}\"",
".",
"format",
"(",
"size",
",",
"(",
"h",
",",
"w",
")",
")",
")",
"tl",
"=",
"img",
".",
"crop",
"(",
"(",
"0",
",",
"0",
",",
"crop_w",
",",
"crop_h",
")",
")",
"tr",
"=",
"img",
".",
"crop",
"(",
"(",
"w",
"-",
"crop_w",
",",
"0",
",",
"w",
",",
"crop_h",
")",
")",
"bl",
"=",
"img",
".",
"crop",
"(",
"(",
"0",
",",
"h",
"-",
"crop_h",
",",
"crop_w",
",",
"h",
")",
")",
"br",
"=",
"img",
".",
"crop",
"(",
"(",
"w",
"-",
"crop_w",
",",
"h",
"-",
"crop_h",
",",
"w",
",",
"h",
")",
")",
"center",
"=",
"center_crop",
"(",
"img",
",",
"(",
"crop_h",
",",
"crop_w",
")",
")",
"return",
"(",
"tl",
",",
"tr",
",",
"bl",
",",
"br",
",",
"center",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
ten_crop
|
r"""Crop the given PIL Image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and center crop
and same for the flipped image.
|
torchvision/transforms/functional.py
|
def ten_crop(img, size, vertical_flip=False):
r"""Crop the given PIL Image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and center crop
and same for the flipped image.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
|
def ten_crop(img, size, vertical_flip=False):
r"""Crop the given PIL Image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and center crop
and same for the flipped image.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
|
[
"r",
"Crop",
"the",
"given",
"PIL",
"Image",
"into",
"four",
"corners",
"and",
"the",
"central",
"crop",
"plus",
"the",
"flipped",
"version",
"of",
"these",
"(",
"horizontal",
"flipping",
"is",
"used",
"by",
"default",
")",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L502-L534
|
[
"def",
"ten_crop",
"(",
"img",
",",
"size",
",",
"vertical_flip",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"size",
",",
"numbers",
".",
"Number",
")",
":",
"size",
"=",
"(",
"int",
"(",
"size",
")",
",",
"int",
"(",
"size",
")",
")",
"else",
":",
"assert",
"len",
"(",
"size",
")",
"==",
"2",
",",
"\"Please provide only two dimensions (h, w) for size.\"",
"first_five",
"=",
"five_crop",
"(",
"img",
",",
"size",
")",
"if",
"vertical_flip",
":",
"img",
"=",
"vflip",
"(",
"img",
")",
"else",
":",
"img",
"=",
"hflip",
"(",
"img",
")",
"second_five",
"=",
"five_crop",
"(",
"img",
",",
"size",
")",
"return",
"first_five",
"+",
"second_five"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
adjust_brightness
|
Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
|
torchvision/transforms/functional.py
|
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
|
def adjust_brightness(img, brightness_factor):
"""Adjust brightness of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
|
[
"Adjust",
"brightness",
"of",
"an",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L537-L554
|
[
"def",
"adjust_brightness",
"(",
"img",
",",
"brightness_factor",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"enhancer",
"=",
"ImageEnhance",
".",
"Brightness",
"(",
"img",
")",
"img",
"=",
"enhancer",
".",
"enhance",
"(",
"brightness_factor",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
adjust_contrast
|
Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
|
torchvision/transforms/functional.py
|
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
|
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
|
[
"Adjust",
"contrast",
"of",
"an",
"Image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L557-L574
|
[
"def",
"adjust_contrast",
"(",
"img",
",",
"contrast_factor",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"enhancer",
"=",
"ImageEnhance",
".",
"Contrast",
"(",
"img",
")",
"img",
"=",
"enhancer",
".",
"enhance",
"(",
"contrast_factor",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
adjust_saturation
|
Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
|
torchvision/transforms/functional.py
|
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
|
def adjust_saturation(img, saturation_factor):
"""Adjust color saturation of an image.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
|
[
"Adjust",
"color",
"saturation",
"of",
"an",
"image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L577-L594
|
[
"def",
"adjust_saturation",
"(",
"img",
",",
"saturation_factor",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"enhancer",
"=",
"ImageEnhance",
".",
"Color",
"(",
"img",
")",
"img",
"=",
"enhancer",
".",
"enhance",
"(",
"saturation_factor",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
adjust_hue
|
Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
|
torchvision/transforms/functional.py
|
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
|
def adjust_hue(img, hue_factor):
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
|
[
"Adjust",
"hue",
"of",
"an",
"image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L597-L641
|
[
"def",
"adjust_hue",
"(",
"img",
",",
"hue_factor",
")",
":",
"if",
"not",
"(",
"-",
"0.5",
"<=",
"hue_factor",
"<=",
"0.5",
")",
":",
"raise",
"ValueError",
"(",
"'hue_factor is not in [-0.5, 0.5].'",
".",
"format",
"(",
"hue_factor",
")",
")",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"input_mode",
"=",
"img",
".",
"mode",
"if",
"input_mode",
"in",
"{",
"'L'",
",",
"'1'",
",",
"'I'",
",",
"'F'",
"}",
":",
"return",
"img",
"h",
",",
"s",
",",
"v",
"=",
"img",
".",
"convert",
"(",
"'HSV'",
")",
".",
"split",
"(",
")",
"np_h",
"=",
"np",
".",
"array",
"(",
"h",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"# uint8 addition take cares of rotation across boundaries",
"with",
"np",
".",
"errstate",
"(",
"over",
"=",
"'ignore'",
")",
":",
"np_h",
"+=",
"np",
".",
"uint8",
"(",
"hue_factor",
"*",
"255",
")",
"h",
"=",
"Image",
".",
"fromarray",
"(",
"np_h",
",",
"'L'",
")",
"img",
"=",
"Image",
".",
"merge",
"(",
"'HSV'",
",",
"(",
"h",
",",
"s",
",",
"v",
")",
")",
".",
"convert",
"(",
"input_mode",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
adjust_gamma
|
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
|
torchvision/transforms/functional.py
|
def adjust_gamma(img, gamma, gain=1):
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
input_mode = img.mode
img = img.convert('RGB')
gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
img = img.convert(input_mode)
return img
|
def adjust_gamma(img, gamma, gain=1):
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
input_mode = img.mode
img = img.convert('RGB')
gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
img = img.convert(input_mode)
return img
|
[
"r",
"Perform",
"gamma",
"correction",
"on",
"an",
"image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L644-L677
|
[
"def",
"adjust_gamma",
"(",
"img",
",",
"gamma",
",",
"gain",
"=",
"1",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"if",
"gamma",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Gamma should be a non-negative real number'",
")",
"input_mode",
"=",
"img",
".",
"mode",
"img",
"=",
"img",
".",
"convert",
"(",
"'RGB'",
")",
"gamma_map",
"=",
"[",
"255",
"*",
"gain",
"*",
"pow",
"(",
"ele",
"/",
"255.",
",",
"gamma",
")",
"for",
"ele",
"in",
"range",
"(",
"256",
")",
"]",
"*",
"3",
"img",
"=",
"img",
".",
"point",
"(",
"gamma_map",
")",
"# use PIL's point-function to accelerate this part",
"img",
"=",
"img",
".",
"convert",
"(",
"input_mode",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
rotate
|
Rotate the image by angle.
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): In degrees degrees counter clockwise order.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
|
torchvision/transforms/functional.py
|
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle.
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): In degrees degrees counter clockwise order.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.rotate(angle, resample, expand, center)
|
def rotate(img, angle, resample=False, expand=False, center=None):
"""Rotate the image by angle.
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): In degrees degrees counter clockwise order.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.rotate(angle, resample, expand, center)
|
[
"Rotate",
"the",
"image",
"by",
"angle",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L680-L705
|
[
"def",
"rotate",
"(",
"img",
",",
"angle",
",",
"resample",
"=",
"False",
",",
"expand",
"=",
"False",
",",
"center",
"=",
"None",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"return",
"img",
".",
"rotate",
"(",
"angle",
",",
"resample",
",",
"expand",
",",
"center",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
affine
|
Apply affine transformation on the image keeping image center invariant
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter.
See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
|
torchvision/transforms/functional.py
|
def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter.
See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"Argument translate should be a list or tuple of length 2"
assert scale > 0.0, "Argument scale should be positive"
output_size = img.size
center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
kwargs = {"fillcolor": fillcolor} if PILLOW_VERSION[0] == '5' else {}
return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
|
def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
"""Apply affine transformation on the image keeping image center invariant
Args:
img (PIL Image): PIL Image to be rotated.
angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
An optional resampling filter.
See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"Argument translate should be a list or tuple of length 2"
assert scale > 0.0, "Argument scale should be positive"
output_size = img.size
center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
kwargs = {"fillcolor": fillcolor} if PILLOW_VERSION[0] == '5' else {}
return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
|
[
"Apply",
"affine",
"transformation",
"on",
"the",
"image",
"keeping",
"image",
"center",
"invariant"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L743-L770
|
[
"def",
"affine",
"(",
"img",
",",
"angle",
",",
"translate",
",",
"scale",
",",
"shear",
",",
"resample",
"=",
"0",
",",
"fillcolor",
"=",
"None",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"assert",
"isinstance",
"(",
"translate",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"len",
"(",
"translate",
")",
"==",
"2",
",",
"\"Argument translate should be a list or tuple of length 2\"",
"assert",
"scale",
">",
"0.0",
",",
"\"Argument scale should be positive\"",
"output_size",
"=",
"img",
".",
"size",
"center",
"=",
"(",
"img",
".",
"size",
"[",
"0",
"]",
"*",
"0.5",
"+",
"0.5",
",",
"img",
".",
"size",
"[",
"1",
"]",
"*",
"0.5",
"+",
"0.5",
")",
"matrix",
"=",
"_get_inverse_affine_matrix",
"(",
"center",
",",
"angle",
",",
"translate",
",",
"scale",
",",
"shear",
")",
"kwargs",
"=",
"{",
"\"fillcolor\"",
":",
"fillcolor",
"}",
"if",
"PILLOW_VERSION",
"[",
"0",
"]",
"==",
"'5'",
"else",
"{",
"}",
"return",
"img",
".",
"transform",
"(",
"output_size",
",",
"Image",
".",
"AFFINE",
",",
"matrix",
",",
"resample",
",",
"*",
"*",
"kwargs",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
to_grayscale
|
Convert image to grayscale version of image.
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Grayscale version of the image.
if num_output_channels = 1 : returned image is single channel
if num_output_channels = 3 : returned image is 3 channel with r = g = b
|
torchvision/transforms/functional.py
|
def to_grayscale(img, num_output_channels=1):
"""Convert image to grayscale version of image.
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Grayscale version of the image.
if num_output_channels = 1 : returned image is single channel
if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if num_output_channels == 1:
img = img.convert('L')
elif num_output_channels == 3:
img = img.convert('L')
np_img = np.array(img, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
img = Image.fromarray(np_img, 'RGB')
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img
|
def to_grayscale(img, num_output_channels=1):
"""Convert image to grayscale version of image.
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Grayscale version of the image.
if num_output_channels = 1 : returned image is single channel
if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if num_output_channels == 1:
img = img.convert('L')
elif num_output_channels == 3:
img = img.convert('L')
np_img = np.array(img, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
img = Image.fromarray(np_img, 'RGB')
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img
|
[
"Convert",
"image",
"to",
"grayscale",
"version",
"of",
"image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L773-L798
|
[
"def",
"to_grayscale",
"(",
"img",
",",
"num_output_channels",
"=",
"1",
")",
":",
"if",
"not",
"_is_pil_image",
"(",
"img",
")",
":",
"raise",
"TypeError",
"(",
"'img should be PIL Image. Got {}'",
".",
"format",
"(",
"type",
"(",
"img",
")",
")",
")",
"if",
"num_output_channels",
"==",
"1",
":",
"img",
"=",
"img",
".",
"convert",
"(",
"'L'",
")",
"elif",
"num_output_channels",
"==",
"3",
":",
"img",
"=",
"img",
".",
"convert",
"(",
"'L'",
")",
"np_img",
"=",
"np",
".",
"array",
"(",
"img",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"np_img",
"=",
"np",
".",
"dstack",
"(",
"[",
"np_img",
",",
"np_img",
",",
"np_img",
"]",
")",
"img",
"=",
"Image",
".",
"fromarray",
"(",
"np_img",
",",
"'RGB'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'num_output_channels should be either 1 or 3'",
")",
"return",
"img"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
make_grid
|
Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images separately rather than the (min, max) over all images.
pad_value (float, optional): Value for the padded pixels.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
|
torchvision/utils.py
|
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images separately rather than the (min, max) over all images.
pad_value (float, optional): Value for the padded pixels.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.unsqueeze(0)
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
tensor = tensor.unsqueeze(0)
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze()
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + padding, height - padding)\
.narrow(2, x * width + padding, width - padding)\
.copy_(tensor[k])
k = k + 1
return grid
|
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images separately rather than the (min, max) over all images.
pad_value (float, optional): Value for the padded pixels.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.unsqueeze(0)
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
tensor = tensor.unsqueeze(0)
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze()
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + padding, height - padding)\
.narrow(2, x * width + padding, width - padding)\
.copy_(tensor[k])
k = k + 1
return grid
|
[
"Make",
"a",
"grid",
"of",
"images",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/utils.py#L6-L87
|
[
"def",
"make_grid",
"(",
"tensor",
",",
"nrow",
"=",
"8",
",",
"padding",
"=",
"2",
",",
"normalize",
"=",
"False",
",",
"range",
"=",
"None",
",",
"scale_each",
"=",
"False",
",",
"pad_value",
"=",
"0",
")",
":",
"if",
"not",
"(",
"torch",
".",
"is_tensor",
"(",
"tensor",
")",
"or",
"(",
"isinstance",
"(",
"tensor",
",",
"list",
")",
"and",
"all",
"(",
"torch",
".",
"is_tensor",
"(",
"t",
")",
"for",
"t",
"in",
"tensor",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'tensor or list of tensors expected, got {}'",
".",
"format",
"(",
"type",
"(",
"tensor",
")",
")",
")",
"# if list of tensors, convert to a 4D mini-batch Tensor",
"if",
"isinstance",
"(",
"tensor",
",",
"list",
")",
":",
"tensor",
"=",
"torch",
".",
"stack",
"(",
"tensor",
",",
"dim",
"=",
"0",
")",
"if",
"tensor",
".",
"dim",
"(",
")",
"==",
"2",
":",
"# single image H x W",
"tensor",
"=",
"tensor",
".",
"unsqueeze",
"(",
"0",
")",
"if",
"tensor",
".",
"dim",
"(",
")",
"==",
"3",
":",
"# single image",
"if",
"tensor",
".",
"size",
"(",
"0",
")",
"==",
"1",
":",
"# if single-channel, convert to 3-channel",
"tensor",
"=",
"torch",
".",
"cat",
"(",
"(",
"tensor",
",",
"tensor",
",",
"tensor",
")",
",",
"0",
")",
"tensor",
"=",
"tensor",
".",
"unsqueeze",
"(",
"0",
")",
"if",
"tensor",
".",
"dim",
"(",
")",
"==",
"4",
"and",
"tensor",
".",
"size",
"(",
"1",
")",
"==",
"1",
":",
"# single-channel images",
"tensor",
"=",
"torch",
".",
"cat",
"(",
"(",
"tensor",
",",
"tensor",
",",
"tensor",
")",
",",
"1",
")",
"if",
"normalize",
"is",
"True",
":",
"tensor",
"=",
"tensor",
".",
"clone",
"(",
")",
"# avoid modifying tensor in-place",
"if",
"range",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"range",
",",
"tuple",
")",
",",
"\"range has to be a tuple (min, max) if specified. min and max are numbers\"",
"def",
"norm_ip",
"(",
"img",
",",
"min",
",",
"max",
")",
":",
"img",
".",
"clamp_",
"(",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")",
"img",
".",
"add_",
"(",
"-",
"min",
")",
".",
"div_",
"(",
"max",
"-",
"min",
"+",
"1e-5",
")",
"def",
"norm_range",
"(",
"t",
",",
"range",
")",
":",
"if",
"range",
"is",
"not",
"None",
":",
"norm_ip",
"(",
"t",
",",
"range",
"[",
"0",
"]",
",",
"range",
"[",
"1",
"]",
")",
"else",
":",
"norm_ip",
"(",
"t",
",",
"float",
"(",
"t",
".",
"min",
"(",
")",
")",
",",
"float",
"(",
"t",
".",
"max",
"(",
")",
")",
")",
"if",
"scale_each",
"is",
"True",
":",
"for",
"t",
"in",
"tensor",
":",
"# loop over mini-batch dimension",
"norm_range",
"(",
"t",
",",
"range",
")",
"else",
":",
"norm_range",
"(",
"tensor",
",",
"range",
")",
"if",
"tensor",
".",
"size",
"(",
"0",
")",
"==",
"1",
":",
"return",
"tensor",
".",
"squeeze",
"(",
")",
"# make the mini-batch of images into a grid",
"nmaps",
"=",
"tensor",
".",
"size",
"(",
"0",
")",
"xmaps",
"=",
"min",
"(",
"nrow",
",",
"nmaps",
")",
"ymaps",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"nmaps",
")",
"/",
"xmaps",
")",
")",
"height",
",",
"width",
"=",
"int",
"(",
"tensor",
".",
"size",
"(",
"2",
")",
"+",
"padding",
")",
",",
"int",
"(",
"tensor",
".",
"size",
"(",
"3",
")",
"+",
"padding",
")",
"grid",
"=",
"tensor",
".",
"new_full",
"(",
"(",
"3",
",",
"height",
"*",
"ymaps",
"+",
"padding",
",",
"width",
"*",
"xmaps",
"+",
"padding",
")",
",",
"pad_value",
")",
"k",
"=",
"0",
"for",
"y",
"in",
"irange",
"(",
"ymaps",
")",
":",
"for",
"x",
"in",
"irange",
"(",
"xmaps",
")",
":",
"if",
"k",
">=",
"nmaps",
":",
"break",
"grid",
".",
"narrow",
"(",
"1",
",",
"y",
"*",
"height",
"+",
"padding",
",",
"height",
"-",
"padding",
")",
".",
"narrow",
"(",
"2",
",",
"x",
"*",
"width",
"+",
"padding",
",",
"width",
"-",
"padding",
")",
".",
"copy_",
"(",
"tensor",
"[",
"k",
"]",
")",
"k",
"=",
"k",
"+",
"1",
"return",
"grid"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
save_image
|
Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
|
torchvision/utils.py
|
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
im.save(filename)
|
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
im.save(filename)
|
[
"Save",
"a",
"given",
"Tensor",
"into",
"an",
"image",
"file",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/utils.py#L90-L105
|
[
"def",
"save_image",
"(",
"tensor",
",",
"filename",
",",
"nrow",
"=",
"8",
",",
"padding",
"=",
"2",
",",
"normalize",
"=",
"False",
",",
"range",
"=",
"None",
",",
"scale_each",
"=",
"False",
",",
"pad_value",
"=",
"0",
")",
":",
"from",
"PIL",
"import",
"Image",
"grid",
"=",
"make_grid",
"(",
"tensor",
",",
"nrow",
"=",
"nrow",
",",
"padding",
"=",
"padding",
",",
"pad_value",
"=",
"pad_value",
",",
"normalize",
"=",
"normalize",
",",
"range",
"=",
"range",
",",
"scale_each",
"=",
"scale_each",
")",
"# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer",
"ndarr",
"=",
"grid",
".",
"mul_",
"(",
"255",
")",
".",
"add_",
"(",
"0.5",
")",
".",
"clamp_",
"(",
"0",
",",
"255",
")",
".",
"permute",
"(",
"1",
",",
"2",
",",
"0",
")",
".",
"to",
"(",
"'cpu'",
",",
"torch",
".",
"uint8",
")",
".",
"numpy",
"(",
")",
"im",
"=",
"Image",
".",
"fromarray",
"(",
"ndarr",
")",
"im",
".",
"save",
"(",
"filename",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
DatasetFolder._find_classes
|
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
|
torchvision/datasets/folder.py
|
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
|
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
|
[
"Finds",
"the",
"class",
"folders",
"in",
"a",
"dataset",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/folder.py#L107-L127
|
[
"def",
"_find_classes",
"(",
"self",
",",
"dir",
")",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"5",
")",
":",
"# Faster and available in Python 3.5 and above",
"classes",
"=",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"os",
".",
"scandir",
"(",
"dir",
")",
"if",
"d",
".",
"is_dir",
"(",
")",
"]",
"else",
":",
"classes",
"=",
"[",
"d",
"for",
"d",
"in",
"os",
".",
"listdir",
"(",
"dir",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"d",
")",
")",
"]",
"classes",
".",
"sort",
"(",
")",
"class_to_idx",
"=",
"{",
"classes",
"[",
"i",
"]",
":",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"classes",
")",
")",
"}",
"return",
"classes",
",",
"class_to_idx"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
read_image_file
|
Return a Tensor containing the patches
|
torchvision/datasets/phototour.py
|
def read_image_file(data_dir, image_ext, n):
"""Return a Tensor containing the patches
"""
def PIL2array(_img):
"""Convert PIL image type to numpy 2D array
"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir, _image_ext):
"""Return a list with the file names of the images containing the patches
"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, 1024, 64):
for x in range(0, 1024, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n]))
|
def read_image_file(data_dir, image_ext, n):
"""Return a Tensor containing the patches
"""
def PIL2array(_img):
"""Convert PIL image type to numpy 2D array
"""
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
def find_files(_data_dir, _image_ext):
"""Return a list with the file names of the images containing the patches
"""
files = []
# find those files with the specified extension
for file_dir in os.listdir(_data_dir):
if file_dir.endswith(_image_ext):
files.append(os.path.join(_data_dir, file_dir))
return sorted(files) # sort files in ascend order to keep relations
patches = []
list_files = find_files(data_dir, image_ext)
for fpath in list_files:
img = Image.open(fpath)
for y in range(0, 1024, 64):
for x in range(0, 1024, 64):
patch = img.crop((x, y, x + 64, y + 64))
patches.append(PIL2array(patch))
return torch.ByteTensor(np.array(patches[:n]))
|
[
"Return",
"a",
"Tensor",
"containing",
"the",
"patches"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/phototour.py#L158-L186
|
[
"def",
"read_image_file",
"(",
"data_dir",
",",
"image_ext",
",",
"n",
")",
":",
"def",
"PIL2array",
"(",
"_img",
")",
":",
"\"\"\"Convert PIL image type to numpy 2D array\n \"\"\"",
"return",
"np",
".",
"array",
"(",
"_img",
".",
"getdata",
"(",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
".",
"reshape",
"(",
"64",
",",
"64",
")",
"def",
"find_files",
"(",
"_data_dir",
",",
"_image_ext",
")",
":",
"\"\"\"Return a list with the file names of the images containing the patches\n \"\"\"",
"files",
"=",
"[",
"]",
"# find those files with the specified extension",
"for",
"file_dir",
"in",
"os",
".",
"listdir",
"(",
"_data_dir",
")",
":",
"if",
"file_dir",
".",
"endswith",
"(",
"_image_ext",
")",
":",
"files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"_data_dir",
",",
"file_dir",
")",
")",
"return",
"sorted",
"(",
"files",
")",
"# sort files in ascend order to keep relations",
"patches",
"=",
"[",
"]",
"list_files",
"=",
"find_files",
"(",
"data_dir",
",",
"image_ext",
")",
"for",
"fpath",
"in",
"list_files",
":",
"img",
"=",
"Image",
".",
"open",
"(",
"fpath",
")",
"for",
"y",
"in",
"range",
"(",
"0",
",",
"1024",
",",
"64",
")",
":",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"1024",
",",
"64",
")",
":",
"patch",
"=",
"img",
".",
"crop",
"(",
"(",
"x",
",",
"y",
",",
"x",
"+",
"64",
",",
"y",
"+",
"64",
")",
")",
"patches",
".",
"append",
"(",
"PIL2array",
"(",
"patch",
")",
")",
"return",
"torch",
".",
"ByteTensor",
"(",
"np",
".",
"array",
"(",
"patches",
"[",
":",
"n",
"]",
")",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
read_info_file
|
Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point.
|
torchvision/datasets/phototour.py
|
def read_info_file(data_dir, info_file):
"""Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point.
"""
labels = []
with open(os.path.join(data_dir, info_file), 'r') as f:
labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels)
|
def read_info_file(data_dir, info_file):
"""Return a Tensor containing the list of labels
Read the file and keep only the ID of the 3D point.
"""
labels = []
with open(os.path.join(data_dir, info_file), 'r') as f:
labels = [int(line.split()[0]) for line in f]
return torch.LongTensor(labels)
|
[
"Return",
"a",
"Tensor",
"containing",
"the",
"list",
"of",
"labels",
"Read",
"the",
"file",
"and",
"keep",
"only",
"the",
"ID",
"of",
"the",
"3D",
"point",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/phototour.py#L189-L196
|
[
"def",
"read_info_file",
"(",
"data_dir",
",",
"info_file",
")",
":",
"labels",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"info_file",
")",
",",
"'r'",
")",
"as",
"f",
":",
"labels",
"=",
"[",
"int",
"(",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"for",
"line",
"in",
"f",
"]",
"return",
"torch",
".",
"LongTensor",
"(",
"labels",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
read_matches_files
|
Return a Tensor containing the ground truth matches
Read the file and keep only 3D point ID.
Matches are represented with a 1, non matches with a 0.
|
torchvision/datasets/phototour.py
|
def read_matches_files(data_dir, matches_file):
"""Return a Tensor containing the ground truth matches
Read the file and keep only 3D point ID.
Matches are represented with a 1, non matches with a 0.
"""
matches = []
with open(os.path.join(data_dir, matches_file), 'r') as f:
for line in f:
line_split = line.split()
matches.append([int(line_split[0]), int(line_split[3]),
int(line_split[1] == line_split[4])])
return torch.LongTensor(matches)
|
def read_matches_files(data_dir, matches_file):
"""Return a Tensor containing the ground truth matches
Read the file and keep only 3D point ID.
Matches are represented with a 1, non matches with a 0.
"""
matches = []
with open(os.path.join(data_dir, matches_file), 'r') as f:
for line in f:
line_split = line.split()
matches.append([int(line_split[0]), int(line_split[3]),
int(line_split[1] == line_split[4])])
return torch.LongTensor(matches)
|
[
"Return",
"a",
"Tensor",
"containing",
"the",
"ground",
"truth",
"matches",
"Read",
"the",
"file",
"and",
"keep",
"only",
"3D",
"point",
"ID",
".",
"Matches",
"are",
"represented",
"with",
"a",
"1",
"non",
"matches",
"with",
"a",
"0",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/phototour.py#L199-L210
|
[
"def",
"read_matches_files",
"(",
"data_dir",
",",
"matches_file",
")",
":",
"matches",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"matches_file",
")",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line_split",
"=",
"line",
".",
"split",
"(",
")",
"matches",
".",
"append",
"(",
"[",
"int",
"(",
"line_split",
"[",
"0",
"]",
")",
",",
"int",
"(",
"line_split",
"[",
"3",
"]",
")",
",",
"int",
"(",
"line_split",
"[",
"1",
"]",
"==",
"line_split",
"[",
"4",
"]",
")",
"]",
")",
"return",
"torch",
".",
"LongTensor",
"(",
"matches",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
conv1x1
|
1x1 convolution
|
torchvision/models/resnet.py
|
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
[
"1x1",
"convolution"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/resnet.py#L24-L26
|
[
"def",
"conv1x1",
"(",
"in_planes",
",",
"out_planes",
",",
"stride",
"=",
"1",
")",
":",
"return",
"nn",
".",
"Conv2d",
"(",
"in_planes",
",",
"out_planes",
",",
"kernel_size",
"=",
"1",
",",
"stride",
"=",
"stride",
",",
"bias",
"=",
"False",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
accuracy
|
Computes the accuracy over the k top predictions for the specified values of k
|
references/classification/utils.py
|
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
|
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
|
[
"Computes",
"the",
"accuracy",
"over",
"the",
"k",
"top",
"predictions",
"for",
"the",
"specified",
"values",
"of",
"k"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L147-L161
|
[
"def",
"accuracy",
"(",
"output",
",",
"target",
",",
"topk",
"=",
"(",
"1",
",",
")",
")",
":",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"maxk",
"=",
"max",
"(",
"topk",
")",
"batch_size",
"=",
"target",
".",
"size",
"(",
"0",
")",
"_",
",",
"pred",
"=",
"output",
".",
"topk",
"(",
"maxk",
",",
"1",
",",
"True",
",",
"True",
")",
"pred",
"=",
"pred",
".",
"t",
"(",
")",
"correct",
"=",
"pred",
".",
"eq",
"(",
"target",
"[",
"None",
"]",
")",
"res",
"=",
"[",
"]",
"for",
"k",
"in",
"topk",
":",
"correct_k",
"=",
"correct",
"[",
":",
"k",
"]",
".",
"flatten",
"(",
")",
".",
"sum",
"(",
"dtype",
"=",
"torch",
".",
"float32",
")",
"res",
".",
"append",
"(",
"correct_k",
"*",
"(",
"100.0",
"/",
"batch_size",
")",
")",
"return",
"res"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
setup_for_distributed
|
This function disables printing when not in master process
|
references/classification/utils.py
|
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
|
[
"This",
"function",
"disables",
"printing",
"when",
"not",
"in",
"master",
"process"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L172-L184
|
[
"def",
"setup_for_distributed",
"(",
"is_master",
")",
":",
"import",
"builtins",
"as",
"__builtin__",
"builtin_print",
"=",
"__builtin__",
".",
"print",
"def",
"print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"force",
"=",
"kwargs",
".",
"pop",
"(",
"'force'",
",",
"False",
")",
"if",
"is_master",
"or",
"force",
":",
"builtin_print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"__builtin__",
".",
"print",
"=",
"print"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
SmoothedValue.synchronize_between_processes
|
Warning: does not synchronize the deque!
|
references/classification/utils.py
|
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
|
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
|
[
"Warning",
":",
"does",
"not",
"synchronize",
"the",
"deque!"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L30-L41
|
[
"def",
"synchronize_between_processes",
"(",
"self",
")",
":",
"if",
"not",
"is_dist_avail_and_initialized",
"(",
")",
":",
"return",
"t",
"=",
"torch",
".",
"tensor",
"(",
"[",
"self",
".",
"count",
",",
"self",
".",
"total",
"]",
",",
"dtype",
"=",
"torch",
".",
"float64",
",",
"device",
"=",
"'cuda'",
")",
"dist",
".",
"barrier",
"(",
")",
"dist",
".",
"all_reduce",
"(",
"t",
")",
"t",
"=",
"t",
".",
"tolist",
"(",
")",
"self",
".",
"count",
"=",
"int",
"(",
"t",
"[",
"0",
"]",
")",
"self",
".",
"total",
"=",
"t",
"[",
"1",
"]"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
squeezenet1_1
|
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
torchvision/models/squeezenet.py
|
def squeezenet1_1(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
|
def squeezenet1_1(pretrained=False, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))
return model
|
[
"r",
"SqueezeNet",
"1",
".",
"1",
"model",
"from",
"the",
"official",
"SqueezeNet",
"repo",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"DeepScale",
"/",
"SqueezeNet",
"/",
"tree",
"/",
"master",
"/",
"SqueezeNet_v1",
".",
"1",
">",
"_",
".",
"SqueezeNet",
"1",
".",
"1",
"has",
"2",
".",
"4x",
"less",
"computation",
"and",
"slightly",
"fewer",
"parameters",
"than",
"SqueezeNet",
"1",
".",
"0",
"without",
"sacrificing",
"accuracy",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/squeezenet.py#L117-L129
|
[
"def",
"squeezenet1_1",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"SqueezeNet",
"(",
"version",
"=",
"1.1",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'squeezenet1_1'",
"]",
")",
")",
"return",
"model"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
makedir_exist_ok
|
Python2 support for os.makedirs(.., exist_ok=True)
|
torchvision/datasets/utils.py
|
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
|
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
|
[
"Python2",
"support",
"for",
"os",
".",
"makedirs",
"(",
"..",
"exist_ok",
"=",
"True",
")"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L41-L51
|
[
"def",
"makedir_exist_ok",
"(",
"dirpath",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"dirpath",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"pass",
"else",
":",
"raise"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
download_url
|
Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
|
torchvision/datasets/utils.py
|
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except OSError:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
|
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except OSError:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
|
[
"Download",
"a",
"file",
"from",
"a",
"url",
"and",
"place",
"it",
"in",
"root",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L54-L90
|
[
"def",
"download_url",
"(",
"url",
",",
"root",
",",
"filename",
"=",
"None",
",",
"md5",
"=",
"None",
")",
":",
"from",
"six",
".",
"moves",
"import",
"urllib",
"root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"root",
")",
"if",
"not",
"filename",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"makedir_exist_ok",
"(",
"root",
")",
"# downloads file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")",
"and",
"check_integrity",
"(",
"fpath",
",",
"md5",
")",
":",
"print",
"(",
"'Using downloaded and verified file: '",
"+",
"fpath",
")",
"else",
":",
"try",
":",
"print",
"(",
"'Downloading '",
"+",
"url",
"+",
"' to '",
"+",
"fpath",
")",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"fpath",
",",
"reporthook",
"=",
"gen_bar_updater",
"(",
")",
")",
"except",
"OSError",
":",
"if",
"url",
"[",
":",
"5",
"]",
"==",
"'https'",
":",
"url",
"=",
"url",
".",
"replace",
"(",
"'https:'",
",",
"'http:'",
")",
"print",
"(",
"'Failed download. Trying https -> http instead.'",
"' Downloading '",
"+",
"url",
"+",
"' to '",
"+",
"fpath",
")",
"urllib",
".",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"fpath",
",",
"reporthook",
"=",
"gen_bar_updater",
"(",
")",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
list_dir
|
List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
|
torchvision/datasets/utils.py
|
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
|
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
|
[
"List",
"all",
"directories",
"at",
"a",
"given",
"root"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L93-L112
|
[
"def",
"list_dir",
"(",
"root",
",",
"prefix",
"=",
"False",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"root",
")",
"directories",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"p",
")",
")",
",",
"os",
".",
"listdir",
"(",
"root",
")",
")",
")",
"if",
"prefix",
"is",
"True",
":",
"directories",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"directories",
"]",
"return",
"directories"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
list_files
|
List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
|
torchvision/datasets/utils.py
|
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
|
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
|
[
"List",
"all",
"files",
"ending",
"with",
"a",
"suffix",
"at",
"a",
"given",
"root"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L115-L136
|
[
"def",
"list_files",
"(",
"root",
",",
"suffix",
",",
"prefix",
"=",
"False",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"root",
")",
"files",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"p",
":",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"p",
")",
")",
"and",
"p",
".",
"endswith",
"(",
"suffix",
")",
",",
"os",
".",
"listdir",
"(",
"root",
")",
")",
")",
"if",
"prefix",
"is",
"True",
":",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"files",
"]",
"return",
"files"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
download_file_from_google_drive
|
Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
|
torchvision/datasets/utils.py
|
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
|
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
|
[
"Download",
"a",
"Google",
"Drive",
"file",
"from",
"and",
"place",
"it",
"in",
"root",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L139-L171
|
[
"def",
"download_file_from_google_drive",
"(",
"file_id",
",",
"root",
",",
"filename",
"=",
"None",
",",
"md5",
"=",
"None",
")",
":",
"# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url",
"import",
"requests",
"url",
"=",
"\"https://docs.google.com/uc?export=download\"",
"root",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"root",
")",
"if",
"not",
"filename",
":",
"filename",
"=",
"file_id",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"makedir_exist_ok",
"(",
"root",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")",
"and",
"check_integrity",
"(",
"fpath",
",",
"md5",
")",
":",
"print",
"(",
"'Using downloaded and verified file: '",
"+",
"fpath",
")",
"else",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"'id'",
":",
"file_id",
"}",
",",
"stream",
"=",
"True",
")",
"token",
"=",
"_get_confirm_token",
"(",
"response",
")",
"if",
"token",
":",
"params",
"=",
"{",
"'id'",
":",
"file_id",
",",
"'confirm'",
":",
"token",
"}",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
")",
"_save_response_content",
"(",
"response",
",",
"fpath",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
RandomCrop.get_params
|
Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
|
torchvision/transforms/transforms.py
|
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
|
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
|
[
"Get",
"parameters",
"for",
"crop",
"for",
"a",
"random",
"crop",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L435-L452
|
[
"def",
"get_params",
"(",
"img",
",",
"output_size",
")",
":",
"w",
",",
"h",
"=",
"img",
".",
"size",
"th",
",",
"tw",
"=",
"output_size",
"if",
"w",
"==",
"tw",
"and",
"h",
"==",
"th",
":",
"return",
"0",
",",
"0",
",",
"h",
",",
"w",
"i",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"h",
"-",
"th",
")",
"j",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"w",
"-",
"tw",
")",
"return",
"i",
",",
"j",
",",
"th",
",",
"tw"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
RandomPerspective.get_params
|
Get parameters for ``perspective`` for a random perspective transform.
Args:
width : width of the image.
height : height of the image.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
|
torchvision/transforms/transforms.py
|
def get_params(width, height, distortion_scale):
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width : width of the image.
height : height of the image.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = int(height / 2)
half_width = int(width / 2)
topleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(0, int(distortion_scale * half_height)))
topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(0, int(distortion_scale * half_height)))
botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
botleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
endpoints = [topleft, topright, botright, botleft]
return startpoints, endpoints
|
def get_params(width, height, distortion_scale):
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width : width of the image.
height : height of the image.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = int(height / 2)
half_width = int(width / 2)
topleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(0, int(distortion_scale * half_height)))
topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(0, int(distortion_scale * half_height)))
botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
botleft = (random.randint(0, int(distortion_scale * half_width)),
random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
endpoints = [topleft, topright, botright, botleft]
return startpoints, endpoints
|
[
"Get",
"parameters",
"for",
"perspective",
"for",
"a",
"random",
"perspective",
"transform",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L567-L590
|
[
"def",
"get_params",
"(",
"width",
",",
"height",
",",
"distortion_scale",
")",
":",
"half_height",
"=",
"int",
"(",
"height",
"/",
"2",
")",
"half_width",
"=",
"int",
"(",
"width",
"/",
"2",
")",
"topleft",
"=",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"int",
"(",
"distortion_scale",
"*",
"half_width",
")",
")",
",",
"random",
".",
"randint",
"(",
"0",
",",
"int",
"(",
"distortion_scale",
"*",
"half_height",
")",
")",
")",
"topright",
"=",
"(",
"random",
".",
"randint",
"(",
"width",
"-",
"int",
"(",
"distortion_scale",
"*",
"half_width",
")",
"-",
"1",
",",
"width",
"-",
"1",
")",
",",
"random",
".",
"randint",
"(",
"0",
",",
"int",
"(",
"distortion_scale",
"*",
"half_height",
")",
")",
")",
"botright",
"=",
"(",
"random",
".",
"randint",
"(",
"width",
"-",
"int",
"(",
"distortion_scale",
"*",
"half_width",
")",
"-",
"1",
",",
"width",
"-",
"1",
")",
",",
"random",
".",
"randint",
"(",
"height",
"-",
"int",
"(",
"distortion_scale",
"*",
"half_height",
")",
"-",
"1",
",",
"height",
"-",
"1",
")",
")",
"botleft",
"=",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"int",
"(",
"distortion_scale",
"*",
"half_width",
")",
")",
",",
"random",
".",
"randint",
"(",
"height",
"-",
"int",
"(",
"distortion_scale",
"*",
"half_height",
")",
"-",
"1",
",",
"height",
"-",
"1",
")",
")",
"startpoints",
"=",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"width",
"-",
"1",
",",
"0",
")",
",",
"(",
"width",
"-",
"1",
",",
"height",
"-",
"1",
")",
",",
"(",
"0",
",",
"height",
"-",
"1",
")",
"]",
"endpoints",
"=",
"[",
"topleft",
",",
"topright",
",",
"botright",
",",
"botleft",
"]",
"return",
"startpoints",
",",
"endpoints"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
RandomResizedCrop.get_params
|
Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
|
torchvision/transforms/transforms.py
|
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if (in_ratio < min(ratio)):
w = img.size[0]
h = w / min(ratio)
elif (in_ratio > max(ratio)):
h = img.size[1]
w = h * max(ratio)
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
|
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if (in_ratio < min(ratio)):
w = img.size[0]
h = w / min(ratio)
elif (in_ratio > max(ratio)):
h = img.size[1]
w = h * max(ratio)
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
|
[
"Get",
"parameters",
"for",
"crop",
"for",
"a",
"random",
"sized",
"crop",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L624-L664
|
[
"def",
"get_params",
"(",
"img",
",",
"scale",
",",
"ratio",
")",
":",
"area",
"=",
"img",
".",
"size",
"[",
"0",
"]",
"*",
"img",
".",
"size",
"[",
"1",
"]",
"for",
"attempt",
"in",
"range",
"(",
"10",
")",
":",
"target_area",
"=",
"random",
".",
"uniform",
"(",
"*",
"scale",
")",
"*",
"area",
"log_ratio",
"=",
"(",
"math",
".",
"log",
"(",
"ratio",
"[",
"0",
"]",
")",
",",
"math",
".",
"log",
"(",
"ratio",
"[",
"1",
"]",
")",
")",
"aspect_ratio",
"=",
"math",
".",
"exp",
"(",
"random",
".",
"uniform",
"(",
"*",
"log_ratio",
")",
")",
"w",
"=",
"int",
"(",
"round",
"(",
"math",
".",
"sqrt",
"(",
"target_area",
"*",
"aspect_ratio",
")",
")",
")",
"h",
"=",
"int",
"(",
"round",
"(",
"math",
".",
"sqrt",
"(",
"target_area",
"/",
"aspect_ratio",
")",
")",
")",
"if",
"w",
"<=",
"img",
".",
"size",
"[",
"0",
"]",
"and",
"h",
"<=",
"img",
".",
"size",
"[",
"1",
"]",
":",
"i",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"img",
".",
"size",
"[",
"1",
"]",
"-",
"h",
")",
"j",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"img",
".",
"size",
"[",
"0",
"]",
"-",
"w",
")",
"return",
"i",
",",
"j",
",",
"h",
",",
"w",
"# Fallback to central crop",
"in_ratio",
"=",
"img",
".",
"size",
"[",
"0",
"]",
"/",
"img",
".",
"size",
"[",
"1",
"]",
"if",
"(",
"in_ratio",
"<",
"min",
"(",
"ratio",
")",
")",
":",
"w",
"=",
"img",
".",
"size",
"[",
"0",
"]",
"h",
"=",
"w",
"/",
"min",
"(",
"ratio",
")",
"elif",
"(",
"in_ratio",
">",
"max",
"(",
"ratio",
")",
")",
":",
"h",
"=",
"img",
".",
"size",
"[",
"1",
"]",
"w",
"=",
"h",
"*",
"max",
"(",
"ratio",
")",
"else",
":",
"# whole image",
"w",
"=",
"img",
".",
"size",
"[",
"0",
"]",
"h",
"=",
"img",
".",
"size",
"[",
"1",
"]",
"i",
"=",
"(",
"img",
".",
"size",
"[",
"1",
"]",
"-",
"h",
")",
"//",
"2",
"j",
"=",
"(",
"img",
".",
"size",
"[",
"0",
"]",
"-",
"w",
")",
"//",
"2",
"return",
"i",
",",
"j",
",",
"h",
",",
"w"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
ColorJitter.get_params
|
Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
|
torchvision/transforms/transforms.py
|
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
|
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
|
[
"Get",
"a",
"randomized",
"transform",
"to",
"be",
"applied",
"on",
"image",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L875-L905
|
[
"def",
"get_params",
"(",
"brightness",
",",
"contrast",
",",
"saturation",
",",
"hue",
")",
":",
"transforms",
"=",
"[",
"]",
"if",
"brightness",
"is",
"not",
"None",
":",
"brightness_factor",
"=",
"random",
".",
"uniform",
"(",
"brightness",
"[",
"0",
"]",
",",
"brightness",
"[",
"1",
"]",
")",
"transforms",
".",
"append",
"(",
"Lambda",
"(",
"lambda",
"img",
":",
"F",
".",
"adjust_brightness",
"(",
"img",
",",
"brightness_factor",
")",
")",
")",
"if",
"contrast",
"is",
"not",
"None",
":",
"contrast_factor",
"=",
"random",
".",
"uniform",
"(",
"contrast",
"[",
"0",
"]",
",",
"contrast",
"[",
"1",
"]",
")",
"transforms",
".",
"append",
"(",
"Lambda",
"(",
"lambda",
"img",
":",
"F",
".",
"adjust_contrast",
"(",
"img",
",",
"contrast_factor",
")",
")",
")",
"if",
"saturation",
"is",
"not",
"None",
":",
"saturation_factor",
"=",
"random",
".",
"uniform",
"(",
"saturation",
"[",
"0",
"]",
",",
"saturation",
"[",
"1",
"]",
")",
"transforms",
".",
"append",
"(",
"Lambda",
"(",
"lambda",
"img",
":",
"F",
".",
"adjust_saturation",
"(",
"img",
",",
"saturation_factor",
")",
")",
")",
"if",
"hue",
"is",
"not",
"None",
":",
"hue_factor",
"=",
"random",
".",
"uniform",
"(",
"hue",
"[",
"0",
"]",
",",
"hue",
"[",
"1",
"]",
")",
"transforms",
".",
"append",
"(",
"Lambda",
"(",
"lambda",
"img",
":",
"F",
".",
"adjust_hue",
"(",
"img",
",",
"hue_factor",
")",
")",
")",
"random",
".",
"shuffle",
"(",
"transforms",
")",
"transform",
"=",
"Compose",
"(",
"transforms",
")",
"return",
"transform"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
RandomAffine.get_params
|
Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
|
torchvision/transforms/transforms.py
|
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
|
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
|
[
"Get",
"parameters",
"for",
"affine",
"transformation"
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L1064-L1089
|
[
"def",
"get_params",
"(",
"degrees",
",",
"translate",
",",
"scale_ranges",
",",
"shears",
",",
"img_size",
")",
":",
"angle",
"=",
"random",
".",
"uniform",
"(",
"degrees",
"[",
"0",
"]",
",",
"degrees",
"[",
"1",
"]",
")",
"if",
"translate",
"is",
"not",
"None",
":",
"max_dx",
"=",
"translate",
"[",
"0",
"]",
"*",
"img_size",
"[",
"0",
"]",
"max_dy",
"=",
"translate",
"[",
"1",
"]",
"*",
"img_size",
"[",
"1",
"]",
"translations",
"=",
"(",
"np",
".",
"round",
"(",
"random",
".",
"uniform",
"(",
"-",
"max_dx",
",",
"max_dx",
")",
")",
",",
"np",
".",
"round",
"(",
"random",
".",
"uniform",
"(",
"-",
"max_dy",
",",
"max_dy",
")",
")",
")",
"else",
":",
"translations",
"=",
"(",
"0",
",",
"0",
")",
"if",
"scale_ranges",
"is",
"not",
"None",
":",
"scale",
"=",
"random",
".",
"uniform",
"(",
"scale_ranges",
"[",
"0",
"]",
",",
"scale_ranges",
"[",
"1",
"]",
")",
"else",
":",
"scale",
"=",
"1.0",
"if",
"shears",
"is",
"not",
"None",
":",
"shear",
"=",
"random",
".",
"uniform",
"(",
"shears",
"[",
"0",
"]",
",",
"shears",
"[",
"1",
"]",
")",
"else",
":",
"shear",
"=",
"0.0",
"return",
"angle",
",",
"translations",
",",
"scale",
",",
"shear"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
inception_v3
|
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
|
torchvision/models/inception.py
|
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
return model
return Inception3(**kwargs)
|
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
return model
return Inception3(**kwargs)
|
[
"r",
"Inception",
"v3",
"model",
"architecture",
"from",
"Rethinking",
"the",
"Inception",
"Architecture",
"for",
"Computer",
"Vision",
"<http",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1512",
".",
"00567",
">",
"_",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/inception.py#L19-L49
|
[
"def",
"inception_v3",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained",
":",
"if",
"'transform_input'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'transform_input'",
"]",
"=",
"True",
"if",
"'aux_logits'",
"in",
"kwargs",
":",
"original_aux_logits",
"=",
"kwargs",
"[",
"'aux_logits'",
"]",
"kwargs",
"[",
"'aux_logits'",
"]",
"=",
"True",
"else",
":",
"original_aux_logits",
"=",
"True",
"model",
"=",
"Inception3",
"(",
"*",
"*",
"kwargs",
")",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'inception_v3_google'",
"]",
")",
")",
"if",
"not",
"original_aux_logits",
":",
"model",
".",
"aux_logits",
"=",
"False",
"del",
"model",
".",
"AuxLogits",
"return",
"model",
"return",
"Inception3",
"(",
"*",
"*",
"kwargs",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
SBU.download
|
Download and extract the tarball, and download each individual photo.
|
torchvision/datasets/sbu.py
|
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
[
"Download",
"and",
"extract",
"the",
"tarball",
"and",
"download",
"each",
"individual",
"photo",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/sbu.py#L87-L110
|
[
"def",
"download",
"(",
"self",
")",
":",
"import",
"tarfile",
"if",
"self",
".",
"_check_integrity",
"(",
")",
":",
"print",
"(",
"'Files already downloaded and verified'",
")",
"return",
"download_url",
"(",
"self",
".",
"url",
",",
"self",
".",
"root",
",",
"self",
".",
"filename",
",",
"self",
".",
"md5_checksum",
")",
"# Extract file",
"with",
"tarfile",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"self",
".",
"filename",
")",
",",
"'r:gz'",
")",
"as",
"tar",
":",
"tar",
".",
"extractall",
"(",
"path",
"=",
"self",
".",
"root",
")",
"# Download individual photos",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"'dataset'",
",",
"'SBU_captioned_photo_dataset_urls.txt'",
")",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"url",
"=",
"line",
".",
"rstrip",
"(",
")",
"try",
":",
"download_url",
"(",
"url",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"'dataset'",
")",
")",
"except",
"OSError",
":",
"# The images point to public images on Flickr.",
"# Note: Images might be removed by users at anytime.",
"pass"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
googlenet
|
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
|
torchvision/models/googlenet.py
|
def googlenet(pretrained=False, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model
return GoogLeNet(**kwargs)
|
def googlenet(pretrained=False, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model
return GoogLeNet(**kwargs)
|
[
"r",
"GoogLeNet",
"(",
"Inception",
"v1",
")",
"model",
"architecture",
"from",
"Going",
"Deeper",
"with",
"Convolutions",
"<http",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1409",
".",
"4842",
">",
"_",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/googlenet.py#L18-L47
|
[
"def",
"googlenet",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained",
":",
"if",
"'transform_input'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'transform_input'",
"]",
"=",
"True",
"if",
"'aux_logits'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'aux_logits'",
"]",
"=",
"False",
"if",
"kwargs",
"[",
"'aux_logits'",
"]",
":",
"warnings",
".",
"warn",
"(",
"'auxiliary heads in the pretrained googlenet model are NOT pretrained, '",
"'so make sure to train them'",
")",
"original_aux_logits",
"=",
"kwargs",
"[",
"'aux_logits'",
"]",
"kwargs",
"[",
"'aux_logits'",
"]",
"=",
"True",
"kwargs",
"[",
"'init_weights'",
"]",
"=",
"False",
"model",
"=",
"GoogLeNet",
"(",
"*",
"*",
"kwargs",
")",
"model",
".",
"load_state_dict",
"(",
"model_zoo",
".",
"load_url",
"(",
"model_urls",
"[",
"'googlenet'",
"]",
")",
")",
"if",
"not",
"original_aux_logits",
":",
"model",
".",
"aux_logits",
"=",
"False",
"del",
"model",
".",
"aux1",
",",
"model",
".",
"aux2",
"return",
"model",
"return",
"GoogLeNet",
"(",
"*",
"*",
"kwargs",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
MNIST.download
|
Download the MNIST data if it doesn't exist in processed_folder already.
|
torchvision/datasets/mnist.py
|
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(url, root=self.raw_folder, filename=filename, md5=None)
self.extract_gzip(gzip_path=file_path, remove_finished=True)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
|
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(url, root=self.raw_folder, filename=filename, md5=None)
self.extract_gzip(gzip_path=file_path, remove_finished=True)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
|
[
"Download",
"the",
"MNIST",
"data",
"if",
"it",
"doesn",
"t",
"exist",
"in",
"processed_folder",
"already",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/mnist.py#L132-L164
|
[
"def",
"download",
"(",
"self",
")",
":",
"if",
"self",
".",
"_check_exists",
"(",
")",
":",
"return",
"makedir_exist_ok",
"(",
"self",
".",
"raw_folder",
")",
"makedir_exist_ok",
"(",
"self",
".",
"processed_folder",
")",
"# download files",
"for",
"url",
"in",
"self",
".",
"urls",
":",
"filename",
"=",
"url",
".",
"rpartition",
"(",
"'/'",
")",
"[",
"2",
"]",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"filename",
")",
"download_url",
"(",
"url",
",",
"root",
"=",
"self",
".",
"raw_folder",
",",
"filename",
"=",
"filename",
",",
"md5",
"=",
"None",
")",
"self",
".",
"extract_gzip",
"(",
"gzip_path",
"=",
"file_path",
",",
"remove_finished",
"=",
"True",
")",
"# process and save as torch files",
"print",
"(",
"'Processing...'",
")",
"training_set",
"=",
"(",
"read_image_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"'train-images-idx3-ubyte'",
")",
")",
",",
"read_label_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"'train-labels-idx1-ubyte'",
")",
")",
")",
"test_set",
"=",
"(",
"read_image_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"'t10k-images-idx3-ubyte'",
")",
")",
",",
"read_label_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"'t10k-labels-idx1-ubyte'",
")",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"processed_folder",
",",
"self",
".",
"training_file",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"torch",
".",
"save",
"(",
"training_set",
",",
"f",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"processed_folder",
",",
"self",
".",
"test_file",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"torch",
".",
"save",
"(",
"test_set",
",",
"f",
")",
"print",
"(",
"'Done!'",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
test
|
EMNIST.download
|
Download the EMNIST data if it doesn't exist in processed_folder already.
|
torchvision/datasets/mnist.py
|
def download(self):
"""Download the EMNIST data if it doesn't exist in processed_folder already."""
import shutil
import zipfile
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
filename = self.url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(self.url, root=self.raw_folder, filename=filename, md5=None)
print('Extracting zip archive')
with zipfile.ZipFile(file_path) as zip_f:
zip_f.extractall(self.raw_folder)
os.unlink(file_path)
gzip_folder = os.path.join(self.raw_folder, 'gzip')
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith('.gz'):
self.extract_gzip(gzip_path=os.path.join(gzip_folder, gzip_file))
# process and save as torch files
for split in self.splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f:
torch.save(test_set, f)
shutil.rmtree(gzip_folder)
print('Done!')
|
def download(self):
"""Download the EMNIST data if it doesn't exist in processed_folder already."""
import shutil
import zipfile
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
filename = self.url.rpartition('/')[2]
file_path = os.path.join(self.raw_folder, filename)
download_url(self.url, root=self.raw_folder, filename=filename, md5=None)
print('Extracting zip archive')
with zipfile.ZipFile(file_path) as zip_f:
zip_f.extractall(self.raw_folder)
os.unlink(file_path)
gzip_folder = os.path.join(self.raw_folder, 'gzip')
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith('.gz'):
self.extract_gzip(gzip_path=os.path.join(gzip_folder, gzip_file))
# process and save as torch files
for split in self.splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f:
torch.save(test_set, f)
shutil.rmtree(gzip_folder)
print('Done!')
|
[
"Download",
"the",
"EMNIST",
"data",
"if",
"it",
"doesn",
"t",
"exist",
"in",
"processed_folder",
"already",
"."
] |
pytorch/vision
|
python
|
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/mnist.py#L262-L304
|
[
"def",
"download",
"(",
"self",
")",
":",
"import",
"shutil",
"import",
"zipfile",
"if",
"self",
".",
"_check_exists",
"(",
")",
":",
"return",
"makedir_exist_ok",
"(",
"self",
".",
"raw_folder",
")",
"makedir_exist_ok",
"(",
"self",
".",
"processed_folder",
")",
"# download files",
"filename",
"=",
"self",
".",
"url",
".",
"rpartition",
"(",
"'/'",
")",
"[",
"2",
"]",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"filename",
")",
"download_url",
"(",
"self",
".",
"url",
",",
"root",
"=",
"self",
".",
"raw_folder",
",",
"filename",
"=",
"filename",
",",
"md5",
"=",
"None",
")",
"print",
"(",
"'Extracting zip archive'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"file_path",
")",
"as",
"zip_f",
":",
"zip_f",
".",
"extractall",
"(",
"self",
".",
"raw_folder",
")",
"os",
".",
"unlink",
"(",
"file_path",
")",
"gzip_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"raw_folder",
",",
"'gzip'",
")",
"for",
"gzip_file",
"in",
"os",
".",
"listdir",
"(",
"gzip_folder",
")",
":",
"if",
"gzip_file",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"self",
".",
"extract_gzip",
"(",
"gzip_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gzip_folder",
",",
"gzip_file",
")",
")",
"# process and save as torch files",
"for",
"split",
"in",
"self",
".",
"splits",
":",
"print",
"(",
"'Processing '",
"+",
"split",
")",
"training_set",
"=",
"(",
"read_image_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gzip_folder",
",",
"'emnist-{}-train-images-idx3-ubyte'",
".",
"format",
"(",
"split",
")",
")",
")",
",",
"read_label_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gzip_folder",
",",
"'emnist-{}-train-labels-idx1-ubyte'",
".",
"format",
"(",
"split",
")",
")",
")",
")",
"test_set",
"=",
"(",
"read_image_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gzip_folder",
",",
"'emnist-{}-test-images-idx3-ubyte'",
".",
"format",
"(",
"split",
")",
")",
")",
",",
"read_label_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"gzip_folder",
",",
"'emnist-{}-test-labels-idx1-ubyte'",
".",
"format",
"(",
"split",
")",
")",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"processed_folder",
",",
"self",
".",
"_training_file",
"(",
"split",
")",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"torch",
".",
"save",
"(",
"training_set",
",",
"f",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"processed_folder",
",",
"self",
".",
"_test_file",
"(",
"split",
")",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"torch",
".",
"save",
"(",
"test_set",
",",
"f",
")",
"shutil",
".",
"rmtree",
"(",
"gzip_folder",
")",
"print",
"(",
"'Done!'",
")"
] |
3afcf3cd49661c466c75ea536b0b2a7ff57f9a05
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.