partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
GoogleCloudStorageHook.get_conn
|
Returns a Google Cloud Storage service object.
|
airflow/contrib/hooks/gcs_hook.py
|
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
if not self._conn:
self._conn = storage.Client(credentials=self._get_credentials())
return self._conn
|
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
if not self._conn:
self._conn = storage.Client(credentials=self._get_credentials())
return self._conn
|
[
"Returns",
"a",
"Google",
"Cloud",
"Storage",
"service",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L45-L52
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_conn",
":",
"self",
".",
"_conn",
"=",
"storage",
".",
"Client",
"(",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_conn"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.copy
|
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
|
airflow/contrib/hooks/gcs_hook.py
|
def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(source_bucket)
source_object = source_bucket.blob(source_object)
destination_bucket = client.get_bucket(destination_bucket)
destination_object = source_bucket.copy_blob(
blob=source_object,
destination_bucket=destination_bucket,
new_name=destination_object)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object.name, destination_bucket.name)
|
def copy(self, source_bucket, source_object, destination_bucket=None,
destination_object=None):
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and \
source_object == destination_object:
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(source_bucket)
source_object = source_bucket.blob(source_object)
destination_bucket = client.get_bucket(destination_bucket)
destination_object = source_bucket.copy_blob(
blob=source_object,
destination_bucket=destination_bucket,
new_name=destination_object)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object.name, destination_bucket.name)
|
[
"Copies",
"an",
"object",
"from",
"a",
"bucket",
"to",
"another",
"with",
"renaming",
"if",
"requested",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L54-L96
|
[
"def",
"copy",
"(",
"self",
",",
"source_bucket",
",",
"source_object",
",",
"destination_bucket",
"=",
"None",
",",
"destination_object",
"=",
"None",
")",
":",
"destination_bucket",
"=",
"destination_bucket",
"or",
"source_bucket",
"destination_object",
"=",
"destination_object",
"or",
"source_object",
"if",
"source_bucket",
"==",
"destination_bucket",
"and",
"source_object",
"==",
"destination_object",
":",
"raise",
"ValueError",
"(",
"'Either source/destination bucket or source/destination object '",
"'must be different, not both the same: bucket=%s, object=%s'",
"%",
"(",
"source_bucket",
",",
"source_object",
")",
")",
"if",
"not",
"source_bucket",
"or",
"not",
"source_object",
":",
"raise",
"ValueError",
"(",
"'source_bucket and source_object cannot be empty.'",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"source_bucket",
"=",
"client",
".",
"get_bucket",
"(",
"source_bucket",
")",
"source_object",
"=",
"source_bucket",
".",
"blob",
"(",
"source_object",
")",
"destination_bucket",
"=",
"client",
".",
"get_bucket",
"(",
"destination_bucket",
")",
"destination_object",
"=",
"source_bucket",
".",
"copy_blob",
"(",
"blob",
"=",
"source_object",
",",
"destination_bucket",
"=",
"destination_bucket",
",",
"new_name",
"=",
"destination_object",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Object %s in bucket %s copied to object %s in bucket %s'",
",",
"source_object",
".",
"name",
",",
"source_bucket",
".",
"name",
",",
"destination_object",
".",
"name",
",",
"destination_bucket",
".",
"name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.rewrite
|
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
|
airflow/contrib/hooks/gcs_hook.py
|
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(bucket_name=source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.get_bucket(bucket_name=destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name)
|
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(bucket_name=source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.get_bucket(bucket_name=destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name)
|
[
"Has",
"the",
"same",
"functionality",
"as",
"copy",
"except",
"that",
"will",
"work",
"on",
"files",
"over",
"5",
"TB",
"as",
"well",
"as",
"when",
"copying",
"between",
"locations",
"and",
"/",
"or",
"storage",
"classes",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L98-L150
|
[
"def",
"rewrite",
"(",
"self",
",",
"source_bucket",
",",
"source_object",
",",
"destination_bucket",
",",
"destination_object",
"=",
"None",
")",
":",
"destination_object",
"=",
"destination_object",
"or",
"source_object",
"if",
"(",
"source_bucket",
"==",
"destination_bucket",
"and",
"source_object",
"==",
"destination_object",
")",
":",
"raise",
"ValueError",
"(",
"'Either source/destination bucket or source/destination object '",
"'must be different, not both the same: bucket=%s, object=%s'",
"%",
"(",
"source_bucket",
",",
"source_object",
")",
")",
"if",
"not",
"source_bucket",
"or",
"not",
"source_object",
":",
"raise",
"ValueError",
"(",
"'source_bucket and source_object cannot be empty.'",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"source_bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"source_bucket",
")",
"source_object",
"=",
"source_bucket",
".",
"blob",
"(",
"blob_name",
"=",
"source_object",
")",
"destination_bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"destination_bucket",
")",
"token",
",",
"bytes_rewritten",
",",
"total_bytes",
"=",
"destination_bucket",
".",
"blob",
"(",
"blob_name",
"=",
"destination_object",
")",
".",
"rewrite",
"(",
"source",
"=",
"source_object",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Total Bytes: %s | Bytes Written: %s'",
",",
"total_bytes",
",",
"bytes_rewritten",
")",
"while",
"token",
"is",
"not",
"None",
":",
"token",
",",
"bytes_rewritten",
",",
"total_bytes",
"=",
"destination_bucket",
".",
"blob",
"(",
"blob_name",
"=",
"destination_object",
")",
".",
"rewrite",
"(",
"source",
"=",
"source_object",
",",
"token",
"=",
"token",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Total Bytes: %s | Bytes Written: %s'",
",",
"total_bytes",
",",
"bytes_rewritten",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Object %s in bucket %s copied to object %s in bucket %s'",
",",
"source_object",
".",
"name",
",",
"source_bucket",
".",
"name",
",",
"destination_object",
",",
"destination_bucket",
".",
"name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.download
|
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
|
airflow/contrib/hooks/gcs_hook.py
|
def download(self, bucket_name, object_name, filename=None):
"""
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return blob.download_as_string()
|
def download(self, bucket_name, object_name, filename=None):
"""
Get a file from Google Cloud Storage.
:param bucket_name: The bucket to fetch from.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str
:param filename: If set, a local file path where the file should be written to.
:type filename: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
if filename:
blob.download_to_filename(filename)
self.log.info('File downloaded to %s', filename)
return blob.download_as_string()
|
[
"Get",
"a",
"file",
"from",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L152-L171
|
[
"def",
"download",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
",",
"filename",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_name",
"=",
"object_name",
")",
"if",
"filename",
":",
"blob",
".",
"download_to_filename",
"(",
"filename",
")",
"self",
".",
"log",
".",
"info",
"(",
"'File downloaded to %s'",
",",
"filename",
")",
"return",
"blob",
".",
"download_as_string",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.upload
|
Uploads a local file to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the local file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param mime_type: The MIME type to set when uploading the file.
:type mime_type: str
:param gzip: Option to compress file for upload
:type gzip: bool
|
airflow/contrib/hooks/gcs_hook.py
|
def upload(self, bucket_name, object_name, filename,
mime_type='application/octet-stream', gzip=False):
"""
Uploads a local file to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the local file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param mime_type: The MIME type to set when uploading the file.
:type mime_type: str
:param gzip: Option to compress file for upload
:type gzip: bool
"""
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.upload_from_filename(filename=filename,
content_type=mime_type)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
|
def upload(self, bucket_name, object_name, filename,
mime_type='application/octet-stream', gzip=False):
"""
Uploads a local file to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param object_name: The object name to set when uploading the local file.
:type object_name: str
:param filename: The local file path to the file to be uploaded.
:type filename: str
:param mime_type: The MIME type to set when uploading the file.
:type mime_type: str
:param gzip: Option to compress file for upload
:type gzip: bool
"""
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.upload_from_filename(filename=filename,
content_type=mime_type)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
|
[
"Uploads",
"a",
"local",
"file",
"to",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L173-L206
|
[
"def",
"upload",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
",",
"filename",
",",
"mime_type",
"=",
"'application/octet-stream'",
",",
"gzip",
"=",
"False",
")",
":",
"if",
"gzip",
":",
"filename_gz",
"=",
"filename",
"+",
"'.gz'",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f_in",
":",
"with",
"gz",
".",
"open",
"(",
"filename_gz",
",",
"'wb'",
")",
"as",
"f_out",
":",
"shutil",
".",
"copyfileobj",
"(",
"f_in",
",",
"f_out",
")",
"filename",
"=",
"filename_gz",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"upload_from_filename",
"(",
"filename",
"=",
"filename",
",",
"content_type",
"=",
"mime_type",
")",
"if",
"gzip",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"self",
".",
"log",
".",
"info",
"(",
"'File %s uploaded to %s in %s bucket'",
",",
"filename",
",",
"object_name",
",",
"bucket_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.exists
|
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
|
airflow/contrib/hooks/gcs_hook.py
|
def exists(self, bucket_name, object_name):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
|
def exists(self, bucket_name, object_name):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
|
[
"Checks",
"for",
"the",
"existence",
"of",
"a",
"file",
"in",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L208-L221
|
[
"def",
"exists",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_name",
"=",
"object_name",
")",
"return",
"blob",
".",
"exists",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.is_updated_after
|
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
|
airflow/contrib/hooks/gcs_hook.py
|
def is_updated_after(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
client = self.get_conn()
bucket = storage.Bucket(client=client, name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_update_time = blob.updated
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
|
def is_updated_after(self, bucket_name, object_name, ts):
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the object is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:type object_name: str
:param ts: The timestamp to check against.
:type ts: datetime.datetime
"""
client = self.get_conn()
bucket = storage.Bucket(client=client, name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_update_time = blob.updated
if blob_update_time is not None:
import dateutil.tz
if not ts.tzinfo:
ts = ts.replace(tzinfo=dateutil.tz.tzutc())
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
|
[
"Checks",
"if",
"an",
"blob_name",
"is",
"updated",
"in",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L223-L253
|
[
"def",
"is_updated_after",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
",",
"ts",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"storage",
".",
"Bucket",
"(",
"client",
"=",
"client",
",",
"name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"get_blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"reload",
"(",
")",
"blob_update_time",
"=",
"blob",
".",
"updated",
"if",
"blob_update_time",
"is",
"not",
"None",
":",
"import",
"dateutil",
".",
"tz",
"if",
"not",
"ts",
".",
"tzinfo",
":",
"ts",
"=",
"ts",
".",
"replace",
"(",
"tzinfo",
"=",
"dateutil",
".",
"tz",
".",
"tzutc",
"(",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Verify object date: %s > %s\"",
",",
"blob_update_time",
",",
"ts",
")",
"if",
"blob_update_time",
">",
"ts",
":",
"return",
"True",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.delete
|
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
|
airflow/contrib/hooks/gcs_hook.py
|
def delete(self, bucket_name, object_name):
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
|
def delete(self, bucket_name, object_name):
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:type bucket_name: str
:param object_name: name of the object to delete
:type object_name: str
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
|
[
"Deletes",
"an",
"object",
"from",
"the",
"bucket",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L255-L269
|
[
"def",
"delete",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"delete",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Blob %s deleted.'",
",",
"object_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.list
|
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
|
airflow/contrib/hooks/gcs_hook.py
|
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
ids = []
pageToken = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=pageToken,
prefix=prefix,
delimiter=delimiter,
versions=versions
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
pageToken = blobs.next_page_token
if pageToken is None:
# empty next page token
break
return ids
|
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None):
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:type bucket_name: str
:param versions: if true, list all versions of the objects
:type versions: bool
:param max_results: max count of items to return in a single page of responses
:type max_results: int
:param prefix: prefix string which filters objects whose name begin with
this prefix
:type prefix: str
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:type delimiter: str
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
ids = []
pageToken = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=pageToken,
prefix=prefix,
delimiter=delimiter,
versions=versions
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
pageToken = blobs.next_page_token
if pageToken is None:
# empty next page token
break
return ids
|
[
"List",
"all",
"objects",
"from",
"the",
"bucket",
"with",
"the",
"give",
"string",
"prefix",
"in",
"name"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L271-L316
|
[
"def",
"list",
"(",
"self",
",",
"bucket_name",
",",
"versions",
"=",
"None",
",",
"max_results",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"ids",
"=",
"[",
"]",
"pageToken",
"=",
"None",
"while",
"True",
":",
"blobs",
"=",
"bucket",
".",
"list_blobs",
"(",
"max_results",
"=",
"max_results",
",",
"page_token",
"=",
"pageToken",
",",
"prefix",
"=",
"prefix",
",",
"delimiter",
"=",
"delimiter",
",",
"versions",
"=",
"versions",
")",
"blob_names",
"=",
"[",
"]",
"for",
"blob",
"in",
"blobs",
":",
"blob_names",
".",
"append",
"(",
"blob",
".",
"name",
")",
"prefixes",
"=",
"blobs",
".",
"prefixes",
"if",
"prefixes",
":",
"ids",
"+=",
"list",
"(",
"prefixes",
")",
"else",
":",
"ids",
"+=",
"blob_names",
"pageToken",
"=",
"blobs",
".",
"next_page_token",
"if",
"pageToken",
"is",
"None",
":",
"# empty next page token",
"break",
"return",
"ids"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.get_size
|
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
|
airflow/contrib/hooks/gcs_hook.py
|
def get_size(self, bucket_name, object_name):
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
|
def get_size(self, bucket_name, object_name):
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
:type object_name: str
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s',
object_name,
bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
|
[
"Gets",
"the",
"size",
"of",
"a",
"file",
"in",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L318-L338
|
[
"def",
"get_size",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Checking the file size of object: %s in bucket_name: %s'",
",",
"object_name",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"get_blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"reload",
"(",
")",
"blob_size",
"=",
"blob",
".",
"size",
"self",
".",
"log",
".",
"info",
"(",
"'The file size of %s is %s bytes.'",
",",
"object_name",
",",
"blob_size",
")",
"return",
"blob_size"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.get_crc32c
|
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
|
airflow/contrib/hooks/gcs_hook.py
|
def get_crc32c(self, bucket_name, object_name):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
|
def get_crc32c(self, bucket_name, object_name):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the crc32c checksum of '
'object_name: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
|
[
"Gets",
"the",
"CRC32c",
"checksum",
"of",
"an",
"object",
"in",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L340-L358
|
[
"def",
"get_crc32c",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Retrieving the crc32c checksum of '",
"'object_name: %s in bucket_name: %s'",
",",
"object_name",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"get_blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"reload",
"(",
")",
"blob_crc32c",
"=",
"blob",
".",
"crc32c",
"self",
".",
"log",
".",
"info",
"(",
"'The crc32c checksum of %s is %s'",
",",
"object_name",
",",
"blob_crc32c",
")",
"return",
"blob_crc32c"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.get_md5hash
|
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
|
airflow/contrib/hooks/gcs_hook.py
|
def get_md5hash(self, bucket_name, object_name):
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
|
def get_md5hash(self, bucket_name, object_name):
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google cloud storage bucket where the blob_name is.
:type bucket_name: str
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
:type object_name: str
"""
self.log.info('Retrieving the MD5 hash of '
'object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name=bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob.reload()
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
|
[
"Gets",
"the",
"MD5",
"hash",
"of",
"an",
"object",
"in",
"Google",
"Cloud",
"Storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L360-L378
|
[
"def",
"get_md5hash",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Retrieving the MD5 hash of '",
"'object: %s in bucket: %s'",
",",
"object_name",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"get_blob",
"(",
"blob_name",
"=",
"object_name",
")",
"blob",
".",
"reload",
"(",
")",
"blob_md5hash",
"=",
"blob",
".",
"md5_hash",
"self",
".",
"log",
".",
"info",
"(",
"'The md5Hash of %s is %s'",
",",
"object_name",
",",
"blob_md5hash",
")",
"return",
"blob_md5hash"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.create_bucket
|
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
|
airflow/contrib/hooks/gcs_hook.py
|
def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',
bucket_name, location, storage_class)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item])
bucket.storage_class = storage_class
bucket.labels = labels or {}
bucket.create(project=project_id, location=location)
return bucket.id
|
def create_bucket(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None
):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project.
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info('Creating Bucket: %s; Location: %s; Storage Class: %s',
bucket_name, location, storage_class)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item])
bucket.storage_class = storage_class
bucket.labels = labels or {}
bucket.create(project=project_id, location=location)
return bucket.id
|
[
"Creates",
"a",
"new",
"bucket",
".",
"Google",
"Cloud",
"Storage",
"uses",
"a",
"flat",
"namespace",
"so",
"you",
"can",
"t",
"create",
"a",
"bucket",
"with",
"a",
"name",
"that",
"is",
"already",
"in",
"use",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L382-L445
|
[
"def",
"create_bucket",
"(",
"self",
",",
"bucket_name",
",",
"resource",
"=",
"None",
",",
"storage_class",
"=",
"'MULTI_REGIONAL'",
",",
"location",
"=",
"'US'",
",",
"project_id",
"=",
"None",
",",
"labels",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Creating Bucket: %s; Location: %s; Storage Class: %s'",
",",
"bucket_name",
",",
"location",
",",
"storage_class",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"bucket_resource",
"=",
"resource",
"or",
"{",
"}",
"for",
"item",
"in",
"bucket_resource",
":",
"if",
"item",
"!=",
"\"name\"",
":",
"bucket",
".",
"_patch_property",
"(",
"name",
"=",
"item",
",",
"value",
"=",
"resource",
"[",
"item",
"]",
")",
"bucket",
".",
"storage_class",
"=",
"storage_class",
"bucket",
".",
"labels",
"=",
"labels",
"or",
"{",
"}",
"bucket",
".",
"create",
"(",
"project",
"=",
"project_id",
",",
"location",
"=",
"location",
")",
"return",
"bucket",
".",
"id"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.insert_bucket_acl
|
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
|
airflow/contrib/hooks/gcs_hook.py
|
def insert_bucket_acl(self, bucket_name, entity, role, user_project=None):
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
|
def insert_bucket_acl(self, bucket_name, entity, role, user_project=None):
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
|
[
"Creates",
"a",
"new",
"ACL",
"entry",
"on",
"the",
"specified",
"bucket_name",
".",
"See",
":",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"storage",
"/",
"docs",
"/",
"json_api",
"/",
"v1",
"/",
"bucketAccessControls",
"/",
"insert"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L447-L475
|
[
"def",
"insert_bucket_acl",
"(",
"self",
",",
"bucket_name",
",",
"entity",
",",
"role",
",",
"user_project",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Creating a new ACL entry in bucket: %s'",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"bucket",
".",
"acl",
".",
"reload",
"(",
")",
"bucket",
".",
"acl",
".",
"entity_from_dict",
"(",
"entity_dict",
"=",
"{",
"\"entity\"",
":",
"entity",
",",
"\"role\"",
":",
"role",
"}",
")",
"if",
"user_project",
":",
"bucket",
".",
"acl",
".",
"user_project",
"=",
"user_project",
"bucket",
".",
"acl",
".",
"save",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'A new ACL entry created in bucket: %s'",
",",
"bucket_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.insert_object_acl
|
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
|
airflow/contrib/hooks/gcs_hook.py
|
def insert_object_acl(self, bucket_name, object_name, entity, role, user_project=None):
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s',
object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(object_name)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s',
object_name, bucket_name)
|
def insert_object_acl(self, bucket_name, object_name, entity, role, user_project=None):
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:type bucket_name: str
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:type object_name: str
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:type entity: str
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:type role: str
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:type user_project: str
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s',
object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(object_name)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s',
object_name, bucket_name)
|
[
"Creates",
"a",
"new",
"ACL",
"entry",
"on",
"the",
"specified",
"object",
".",
"See",
":",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"storage",
"/",
"docs",
"/",
"json_api",
"/",
"v1",
"/",
"objectAccessControls",
"/",
"insert"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L477-L513
|
[
"def",
"insert_object_acl",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
",",
"entity",
",",
"role",
",",
"user_project",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Creating a new ACL entry for object: %s in bucket: %s'",
",",
"object_name",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"bucket",
"(",
"bucket_name",
"=",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"object_name",
")",
"# Reload fetches the current ACL from Cloud Storage.",
"blob",
".",
"acl",
".",
"reload",
"(",
")",
"blob",
".",
"acl",
".",
"entity_from_dict",
"(",
"entity_dict",
"=",
"{",
"\"entity\"",
":",
"entity",
",",
"\"role\"",
":",
"role",
"}",
")",
"if",
"user_project",
":",
"blob",
".",
"acl",
".",
"user_project",
"=",
"user_project",
"blob",
".",
"acl",
".",
"save",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'A new ACL entry created for object: %s in bucket: %s'",
",",
"object_name",
",",
"bucket_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudStorageHook.compose
|
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
|
airflow/contrib/hooks/gcs_hook.py
|
def compose(self, bucket_name, source_objects, destination_object):
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects or not len(source_objects):
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s",
source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[
bucket.blob(blob_name=source_object) for source_object in source_objects
])
self.log.info("Completed successfully.")
|
def compose(self, bucket_name, source_objects, destination_object):
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:type bucket_name: str
:param source_objects: The list of source objects that will be composed
into a single object.
:type source_objects: list
:param destination_object: The path of the object if given.
:type destination_object: str
"""
if not source_objects or not len(source_objects):
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s",
source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.get_bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[
bucket.blob(blob_name=source_object) for source_object in source_objects
])
self.log.info("Completed successfully.")
|
[
"Composes",
"a",
"list",
"of",
"existing",
"object",
"into",
"a",
"new",
"object",
"in",
"the",
"same",
"storage",
"bucket_name"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L515-L550
|
[
"def",
"compose",
"(",
"self",
",",
"bucket_name",
",",
"source_objects",
",",
"destination_object",
")",
":",
"if",
"not",
"source_objects",
"or",
"not",
"len",
"(",
"source_objects",
")",
":",
"raise",
"ValueError",
"(",
"'source_objects cannot be empty.'",
")",
"if",
"not",
"bucket_name",
"or",
"not",
"destination_object",
":",
"raise",
"ValueError",
"(",
"'bucket_name and destination_object cannot be empty.'",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Composing %s to %s in the bucket %s\"",
",",
"source_objects",
",",
"destination_object",
",",
"bucket_name",
")",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket_name",
")",
"destination_blob",
"=",
"bucket",
".",
"blob",
"(",
"destination_object",
")",
"destination_blob",
".",
"compose",
"(",
"sources",
"=",
"[",
"bucket",
".",
"blob",
"(",
"blob_name",
"=",
"source_object",
")",
"for",
"source_object",
"in",
"source_objects",
"]",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Completed successfully.\"",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
argmin
|
Return the index, i, in arr that minimizes f(arr[i])
|
airflow/contrib/hooks/sagemaker_hook.py
|
def argmin(arr, f):
"""Return the index, i, in arr that minimizes f(arr[i])"""
m = None
i = None
for idx, item in enumerate(arr):
if item is not None:
if m is None or f(item) < m:
m = f(item)
i = idx
return i
|
def argmin(arr, f):
"""Return the index, i, in arr that minimizes f(arr[i])"""
m = None
i = None
for idx, item in enumerate(arr):
if item is not None:
if m is None or f(item) < m:
m = f(item)
i = idx
return i
|
[
"Return",
"the",
"index",
"i",
"in",
"arr",
"that",
"minimizes",
"f",
"(",
"arr",
"[",
"i",
"]",
")"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L47-L56
|
[
"def",
"argmin",
"(",
"arr",
",",
"f",
")",
":",
"m",
"=",
"None",
"i",
"=",
"None",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"arr",
")",
":",
"if",
"item",
"is",
"not",
"None",
":",
"if",
"m",
"is",
"None",
"or",
"f",
"(",
"item",
")",
"<",
"m",
":",
"m",
"=",
"f",
"(",
"item",
")",
"i",
"=",
"idx",
"return",
"i"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
secondary_training_status_changed
|
Returns true if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:type current_job_description: dict
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:type prev_job_description: dict
:return: Whether the secondary status message of a training job changed or not.
|
airflow/contrib/hooks/sagemaker_hook.py
|
def secondary_training_status_changed(current_job_description, prev_job_description):
"""
Returns true if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:type current_job_description: dict
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:type prev_job_description: dict
:return: Whether the secondary status message of a training job changed or not.
"""
current_secondary_status_transitions = current_job_description.get('SecondaryStatusTransitions')
if current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0:
return False
prev_job_secondary_status_transitions = prev_job_description.get('SecondaryStatusTransitions') \
if prev_job_description is not None else None
last_message = prev_job_secondary_status_transitions[-1]['StatusMessage'] \
if prev_job_secondary_status_transitions is not None \
and len(prev_job_secondary_status_transitions) > 0 else ''
message = current_job_description['SecondaryStatusTransitions'][-1]['StatusMessage']
return message != last_message
|
def secondary_training_status_changed(current_job_description, prev_job_description):
"""
Returns true if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:type current_job_description: dict
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:type prev_job_description: dict
:return: Whether the secondary status message of a training job changed or not.
"""
current_secondary_status_transitions = current_job_description.get('SecondaryStatusTransitions')
if current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0:
return False
prev_job_secondary_status_transitions = prev_job_description.get('SecondaryStatusTransitions') \
if prev_job_description is not None else None
last_message = prev_job_secondary_status_transitions[-1]['StatusMessage'] \
if prev_job_secondary_status_transitions is not None \
and len(prev_job_secondary_status_transitions) > 0 else ''
message = current_job_description['SecondaryStatusTransitions'][-1]['StatusMessage']
return message != last_message
|
[
"Returns",
"true",
"if",
"training",
"job",
"s",
"secondary",
"status",
"message",
"has",
"changed",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L59-L83
|
[
"def",
"secondary_training_status_changed",
"(",
"current_job_description",
",",
"prev_job_description",
")",
":",
"current_secondary_status_transitions",
"=",
"current_job_description",
".",
"get",
"(",
"'SecondaryStatusTransitions'",
")",
"if",
"current_secondary_status_transitions",
"is",
"None",
"or",
"len",
"(",
"current_secondary_status_transitions",
")",
"==",
"0",
":",
"return",
"False",
"prev_job_secondary_status_transitions",
"=",
"prev_job_description",
".",
"get",
"(",
"'SecondaryStatusTransitions'",
")",
"if",
"prev_job_description",
"is",
"not",
"None",
"else",
"None",
"last_message",
"=",
"prev_job_secondary_status_transitions",
"[",
"-",
"1",
"]",
"[",
"'StatusMessage'",
"]",
"if",
"prev_job_secondary_status_transitions",
"is",
"not",
"None",
"and",
"len",
"(",
"prev_job_secondary_status_transitions",
")",
">",
"0",
"else",
"''",
"message",
"=",
"current_job_description",
"[",
"'SecondaryStatusTransitions'",
"]",
"[",
"-",
"1",
"]",
"[",
"'StatusMessage'",
"]",
"return",
"message",
"!=",
"last_message"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
secondary_training_status_message
|
Returns a string contains start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:type job_description: dict
:param prev_description: Previous job description from DescribeTrainingJob call
:type prev_description: dict
:return: Job status string to be printed.
|
airflow/contrib/hooks/sagemaker_hook.py
|
def secondary_training_status_message(job_description, prev_description):
"""
Returns a string contains start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:type job_description: dict
:param prev_description: Previous job description from DescribeTrainingJob call
:type prev_description: dict
:return: Job status string to be printed.
"""
if job_description is None or job_description.get('SecondaryStatusTransitions') is None\
or len(job_description.get('SecondaryStatusTransitions')) == 0:
return ''
prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\
if prev_description is not None else None
prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\
if prev_description_secondary_transitions is not None else 0
current_transitions = job_description['SecondaryStatusTransitions']
transitions_to_print = current_transitions[-1:] if len(current_transitions) == prev_transitions_num else \
current_transitions[prev_transitions_num - len(current_transitions):]
status_strs = []
for transition in transitions_to_print:
message = transition['StatusMessage']
time_str = timezone.convert_to_utc(job_description['LastModifiedTime']).strftime('%Y-%m-%d %H:%M:%S')
status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message))
return '\n'.join(status_strs)
|
def secondary_training_status_message(job_description, prev_description):
"""
Returns a string contains start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:type job_description: dict
:param prev_description: Previous job description from DescribeTrainingJob call
:type prev_description: dict
:return: Job status string to be printed.
"""
if job_description is None or job_description.get('SecondaryStatusTransitions') is None\
or len(job_description.get('SecondaryStatusTransitions')) == 0:
return ''
prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\
if prev_description is not None else None
prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\
if prev_description_secondary_transitions is not None else 0
current_transitions = job_description['SecondaryStatusTransitions']
transitions_to_print = current_transitions[-1:] if len(current_transitions) == prev_transitions_num else \
current_transitions[prev_transitions_num - len(current_transitions):]
status_strs = []
for transition in transitions_to_print:
message = transition['StatusMessage']
time_str = timezone.convert_to_utc(job_description['LastModifiedTime']).strftime('%Y-%m-%d %H:%M:%S')
status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message))
return '\n'.join(status_strs)
|
[
"Returns",
"a",
"string",
"contains",
"start",
"time",
"and",
"the",
"secondary",
"training",
"job",
"status",
"message",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L86-L117
|
[
"def",
"secondary_training_status_message",
"(",
"job_description",
",",
"prev_description",
")",
":",
"if",
"job_description",
"is",
"None",
"or",
"job_description",
".",
"get",
"(",
"'SecondaryStatusTransitions'",
")",
"is",
"None",
"or",
"len",
"(",
"job_description",
".",
"get",
"(",
"'SecondaryStatusTransitions'",
")",
")",
"==",
"0",
":",
"return",
"''",
"prev_description_secondary_transitions",
"=",
"prev_description",
".",
"get",
"(",
"'SecondaryStatusTransitions'",
")",
"if",
"prev_description",
"is",
"not",
"None",
"else",
"None",
"prev_transitions_num",
"=",
"len",
"(",
"prev_description",
"[",
"'SecondaryStatusTransitions'",
"]",
")",
"if",
"prev_description_secondary_transitions",
"is",
"not",
"None",
"else",
"0",
"current_transitions",
"=",
"job_description",
"[",
"'SecondaryStatusTransitions'",
"]",
"transitions_to_print",
"=",
"current_transitions",
"[",
"-",
"1",
":",
"]",
"if",
"len",
"(",
"current_transitions",
")",
"==",
"prev_transitions_num",
"else",
"current_transitions",
"[",
"prev_transitions_num",
"-",
"len",
"(",
"current_transitions",
")",
":",
"]",
"status_strs",
"=",
"[",
"]",
"for",
"transition",
"in",
"transitions_to_print",
":",
"message",
"=",
"transition",
"[",
"'StatusMessage'",
"]",
"time_str",
"=",
"timezone",
".",
"convert_to_utc",
"(",
"job_description",
"[",
"'LastModifiedTime'",
"]",
")",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"status_strs",
".",
"append",
"(",
"'{} {} - {}'",
".",
"format",
"(",
"time_str",
",",
"transition",
"[",
"'Status'",
"]",
",",
"message",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"status_strs",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.tar_and_s3_upload
|
Tar the local file or directory and upload to s3
:param path: local file or directory
:type path: str
:param key: s3 key
:type key: str
:param bucket: s3 bucket
:type bucket: str
:return: None
|
airflow/contrib/hooks/sagemaker_hook.py
|
def tar_and_s3_upload(self, path, key, bucket):
"""
Tar the local file or directory and upload to s3
:param path: local file or directory
:type path: str
:param key: s3 key
:type key: str
:param bucket: s3 bucket
:type bucket: str
:return: None
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
|
def tar_and_s3_upload(self, path, key, bucket):
"""
Tar the local file or directory and upload to s3
:param path: local file or directory
:type path: str
:param key: s3 key
:type key: str
:param bucket: s3 bucket
:type bucket: str
:return: None
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
|
[
"Tar",
"the",
"local",
"file",
"or",
"directory",
"and",
"upload",
"to",
"s3"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L134-L155
|
[
"def",
"tar_and_s3_upload",
"(",
"self",
",",
"path",
",",
"key",
",",
"bucket",
")",
":",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"temp_file",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"]",
"else",
":",
"files",
"=",
"[",
"path",
"]",
"with",
"tarfile",
".",
"open",
"(",
"mode",
"=",
"'w:gz'",
",",
"fileobj",
"=",
"temp_file",
")",
"as",
"tar_file",
":",
"for",
"f",
"in",
"files",
":",
"tar_file",
".",
"add",
"(",
"f",
",",
"arcname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
")",
"temp_file",
".",
"seek",
"(",
"0",
")",
"self",
".",
"s3_hook",
".",
"load_file_obj",
"(",
"temp_file",
",",
"key",
",",
"bucket",
",",
"replace",
"=",
"True",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.configure_s3_resources
|
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:type config: dict
:rtype: dict
|
airflow/contrib/hooks/sagemaker_hook.py
|
def configure_s3_resources(self, config):
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:type config: dict
:rtype: dict
"""
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
if op['Tar']:
self.tar_and_s3_upload(op['Path'], op['Key'],
op['Bucket'])
else:
self.s3_hook.load_file(op['Path'], op['Key'],
op['Bucket'])
|
def configure_s3_resources(self, config):
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:type config: dict
:rtype: dict
"""
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
if op['Tar']:
self.tar_and_s3_upload(op['Path'], op['Key'],
op['Bucket'])
else:
self.s3_hook.load_file(op['Path'], op['Key'],
op['Bucket'])
|
[
"Extract",
"the",
"S3",
"operations",
"from",
"the",
"configuration",
"and",
"execute",
"them",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L157-L178
|
[
"def",
"configure_s3_resources",
"(",
"self",
",",
"config",
")",
":",
"s3_operations",
"=",
"config",
".",
"pop",
"(",
"'S3Operations'",
",",
"None",
")",
"if",
"s3_operations",
"is",
"not",
"None",
":",
"create_bucket_ops",
"=",
"s3_operations",
".",
"get",
"(",
"'S3CreateBucket'",
",",
"[",
"]",
")",
"upload_ops",
"=",
"s3_operations",
".",
"get",
"(",
"'S3Upload'",
",",
"[",
"]",
")",
"for",
"op",
"in",
"create_bucket_ops",
":",
"self",
".",
"s3_hook",
".",
"create_bucket",
"(",
"bucket_name",
"=",
"op",
"[",
"'Bucket'",
"]",
")",
"for",
"op",
"in",
"upload_ops",
":",
"if",
"op",
"[",
"'Tar'",
"]",
":",
"self",
".",
"tar_and_s3_upload",
"(",
"op",
"[",
"'Path'",
"]",
",",
"op",
"[",
"'Key'",
"]",
",",
"op",
"[",
"'Bucket'",
"]",
")",
"else",
":",
"self",
".",
"s3_hook",
".",
"load_file",
"(",
"op",
"[",
"'Path'",
"]",
",",
"op",
"[",
"'Key'",
"]",
",",
"op",
"[",
"'Bucket'",
"]",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.check_s3_url
|
Check if an S3 URL exists
:param s3url: S3 url
:type s3url: str
:rtype: bool
|
airflow/contrib/hooks/sagemaker_hook.py
|
def check_s3_url(self, s3url):
"""
Check if an S3 URL exists
:param s3url: S3 url
:type s3url: str
:rtype: bool
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException("The input S3 Key "
"or Prefix {} does not exist in the Bucket {}"
.format(s3url, bucket))
return True
|
def check_s3_url(self, s3url):
"""
Check if an S3 URL exists
:param s3url: S3 url
:type s3url: str
:rtype: bool
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(
"The input S3 Bucket {} does not exist ".format(bucket))
if key and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)\
and not self.s3_hook.check_for_prefix(
prefix=key, bucket_name=bucket, delimiter='/'):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException("The input S3 Key "
"or Prefix {} does not exist in the Bucket {}"
.format(s3url, bucket))
return True
|
[
"Check",
"if",
"an",
"S3",
"URL",
"exists"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L180-L201
|
[
"def",
"check_s3_url",
"(",
"self",
",",
"s3url",
")",
":",
"bucket",
",",
"key",
"=",
"S3Hook",
".",
"parse_s3_url",
"(",
"s3url",
")",
"if",
"not",
"self",
".",
"s3_hook",
".",
"check_for_bucket",
"(",
"bucket_name",
"=",
"bucket",
")",
":",
"raise",
"AirflowException",
"(",
"\"The input S3 Bucket {} does not exist \"",
".",
"format",
"(",
"bucket",
")",
")",
"if",
"key",
"and",
"not",
"self",
".",
"s3_hook",
".",
"check_for_key",
"(",
"key",
"=",
"key",
",",
"bucket_name",
"=",
"bucket",
")",
"and",
"not",
"self",
".",
"s3_hook",
".",
"check_for_prefix",
"(",
"prefix",
"=",
"key",
",",
"bucket_name",
"=",
"bucket",
",",
"delimiter",
"=",
"'/'",
")",
":",
"# check if s3 key exists in the case user provides a single file",
"# or if s3 prefix exists in the case user provides multiple files in",
"# a prefix",
"raise",
"AirflowException",
"(",
"\"The input S3 Key \"",
"\"or Prefix {} does not exist in the Bucket {}\"",
".",
"format",
"(",
"s3url",
",",
"bucket",
")",
")",
"return",
"True"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.get_log_conn
|
Establish an AWS connection for retrieving logs during training
:rtype: CloudWatchLogs.Client
|
airflow/contrib/hooks/sagemaker_hook.py
|
def get_log_conn(self):
"""
Establish an AWS connection for retrieving logs during training
:rtype: CloudWatchLogs.Client
"""
config = botocore.config.Config(retries={'max_attempts': 15})
return self.get_client_type('logs', config=config)
|
def get_log_conn(self):
"""
Establish an AWS connection for retrieving logs during training
:rtype: CloudWatchLogs.Client
"""
config = botocore.config.Config(retries={'max_attempts': 15})
return self.get_client_type('logs', config=config)
|
[
"Establish",
"an",
"AWS",
"connection",
"for",
"retrieving",
"logs",
"during",
"training"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L233-L240
|
[
"def",
"get_log_conn",
"(",
"self",
")",
":",
"config",
"=",
"botocore",
".",
"config",
".",
"Config",
"(",
"retries",
"=",
"{",
"'max_attempts'",
":",
"15",
"}",
")",
"return",
"self",
".",
"get_client_type",
"(",
"'logs'",
",",
"config",
"=",
"config",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.create_training_job
|
Create a training job
:param config: the config for training
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to training job creation
|
airflow/contrib/hooks/sagemaker_hook.py
|
def create_training_job(self, config, wait_for_completion=True, print_log=True,
check_interval=30, max_ingestion_time=None):
"""
Create a training job
:param config: the config for training
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval, max_ingestion_time
)
elif wait_for_completion:
describe_response = self.check_status(config['TrainingJobName'],
'TrainingJobStatus',
self.describe_training_job,
check_interval, max_ingestion_time
)
billable_time = \
(describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']) * \
describe_response['ResourceConfig']['InstanceCount']
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
return response
|
def create_training_job(self, config, wait_for_completion=True, print_log=True,
check_interval=30, max_ingestion_time=None):
"""
Create a training job
:param config: the config for training
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval, max_ingestion_time
)
elif wait_for_completion:
describe_response = self.check_status(config['TrainingJobName'],
'TrainingJobStatus',
self.describe_training_job,
check_interval, max_ingestion_time
)
billable_time = \
(describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']) * \
describe_response['ResourceConfig']['InstanceCount']
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
return response
|
[
"Create",
"a",
"training",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L325-L366
|
[
"def",
"create_training_job",
"(",
"self",
",",
"config",
",",
"wait_for_completion",
"=",
"True",
",",
"print_log",
"=",
"True",
",",
"check_interval",
"=",
"30",
",",
"max_ingestion_time",
"=",
"None",
")",
":",
"self",
".",
"check_training_config",
"(",
"config",
")",
"response",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"create_training_job",
"(",
"*",
"*",
"config",
")",
"if",
"print_log",
":",
"self",
".",
"check_training_status_with_log",
"(",
"config",
"[",
"'TrainingJobName'",
"]",
",",
"self",
".",
"non_terminal_states",
",",
"self",
".",
"failed_states",
",",
"wait_for_completion",
",",
"check_interval",
",",
"max_ingestion_time",
")",
"elif",
"wait_for_completion",
":",
"describe_response",
"=",
"self",
".",
"check_status",
"(",
"config",
"[",
"'TrainingJobName'",
"]",
",",
"'TrainingJobStatus'",
",",
"self",
".",
"describe_training_job",
",",
"check_interval",
",",
"max_ingestion_time",
")",
"billable_time",
"=",
"(",
"describe_response",
"[",
"'TrainingEndTime'",
"]",
"-",
"describe_response",
"[",
"'TrainingStartTime'",
"]",
")",
"*",
"describe_response",
"[",
"'ResourceConfig'",
"]",
"[",
"'InstanceCount'",
"]",
"self",
".",
"log",
".",
"info",
"(",
"'Billable seconds:{}'",
".",
"format",
"(",
"int",
"(",
"billable_time",
".",
"total_seconds",
"(",
")",
")",
"+",
"1",
")",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.create_tuning_job
|
Create a tuning job
:param config: the config for tuning
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to tuning job creation
|
airflow/contrib/hooks/sagemaker_hook.py
|
def create_tuning_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a tuning job
:param config: the config for tuning
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(config['HyperParameterTuningJobName'],
'HyperParameterTuningJobStatus',
self.describe_tuning_job,
check_interval, max_ingestion_time
)
return response
|
def create_tuning_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a tuning job
:param config: the config for tuning
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(config['HyperParameterTuningJobName'],
'HyperParameterTuningJobStatus',
self.describe_tuning_job,
check_interval, max_ingestion_time
)
return response
|
[
"Create",
"a",
"tuning",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L368-L396
|
[
"def",
"create_tuning_job",
"(",
"self",
",",
"config",
",",
"wait_for_completion",
"=",
"True",
",",
"check_interval",
"=",
"30",
",",
"max_ingestion_time",
"=",
"None",
")",
":",
"self",
".",
"check_tuning_config",
"(",
"config",
")",
"response",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"create_hyper_parameter_tuning_job",
"(",
"*",
"*",
"config",
")",
"if",
"wait_for_completion",
":",
"self",
".",
"check_status",
"(",
"config",
"[",
"'HyperParameterTuningJobName'",
"]",
",",
"'HyperParameterTuningJobStatus'",
",",
"self",
".",
"describe_tuning_job",
",",
"check_interval",
",",
"max_ingestion_time",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.create_transform_job
|
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
|
airflow/contrib/hooks/sagemaker_hook.py
|
def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
"""
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response
|
def create_transform_job(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create a transform job
:param config: the config for transform job
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to transform job creation
"""
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval, max_ingestion_time
)
return response
|
[
"Create",
"a",
"transform",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L398-L426
|
[
"def",
"create_transform_job",
"(",
"self",
",",
"config",
",",
"wait_for_completion",
"=",
"True",
",",
"check_interval",
"=",
"30",
",",
"max_ingestion_time",
"=",
"None",
")",
":",
"self",
".",
"check_s3_url",
"(",
"config",
"[",
"'TransformInput'",
"]",
"[",
"'DataSource'",
"]",
"[",
"'S3DataSource'",
"]",
"[",
"'S3Uri'",
"]",
")",
"response",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"create_transform_job",
"(",
"*",
"*",
"config",
")",
"if",
"wait_for_completion",
":",
"self",
".",
"check_status",
"(",
"config",
"[",
"'TransformJobName'",
"]",
",",
"'TransformJobStatus'",
",",
"self",
".",
"describe_transform_job",
",",
"check_interval",
",",
"max_ingestion_time",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.create_endpoint
|
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
|
airflow/contrib/hooks/sagemaker_hook.py
|
def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response
|
def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response
|
[
"Create",
"an",
"endpoint"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L450-L477
|
[
"def",
"create_endpoint",
"(",
"self",
",",
"config",
",",
"wait_for_completion",
"=",
"True",
",",
"check_interval",
"=",
"30",
",",
"max_ingestion_time",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"create_endpoint",
"(",
"*",
"*",
"config",
")",
"if",
"wait_for_completion",
":",
"self",
".",
"check_status",
"(",
"config",
"[",
"'EndpointName'",
"]",
",",
"'EndpointStatus'",
",",
"self",
".",
"describe_endpoint",
",",
"check_interval",
",",
"max_ingestion_time",
",",
"non_terminal_states",
"=",
"self",
".",
"endpoint_non_terminal_states",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.describe_training_job_with_log
|
Return the training job info associated with job_name and print CloudWatch logs
|
airflow/contrib/hooks/sagemaker_hook.py
|
def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
|
def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
|
[
"Return",
"the",
"training",
"job",
"info",
"associated",
"with",
"job_name",
"and",
"print",
"CloudWatch",
"logs"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L519-L572
|
[
"def",
"describe_training_job_with_log",
"(",
"self",
",",
"job_name",
",",
"positions",
",",
"stream_names",
",",
"instance_count",
",",
"state",
",",
"last_description",
",",
"last_describe_job_call",
")",
":",
"log_group",
"=",
"'/aws/sagemaker/TrainingJobs'",
"if",
"len",
"(",
"stream_names",
")",
"<",
"instance_count",
":",
"# Log streams are created whenever a container starts writing to stdout/err, so this list",
"# may be dynamic until we have a stream for every instance.",
"logs_conn",
"=",
"self",
".",
"get_log_conn",
"(",
")",
"try",
":",
"streams",
"=",
"logs_conn",
".",
"describe_log_streams",
"(",
"logGroupName",
"=",
"log_group",
",",
"logStreamNamePrefix",
"=",
"job_name",
"+",
"'/'",
",",
"orderBy",
"=",
"'LogStreamName'",
",",
"limit",
"=",
"instance_count",
")",
"stream_names",
"=",
"[",
"s",
"[",
"'logStreamName'",
"]",
"for",
"s",
"in",
"streams",
"[",
"'logStreams'",
"]",
"]",
"positions",
".",
"update",
"(",
"[",
"(",
"s",
",",
"Position",
"(",
"timestamp",
"=",
"0",
",",
"skip",
"=",
"0",
")",
")",
"for",
"s",
"in",
"stream_names",
"if",
"s",
"not",
"in",
"positions",
"]",
")",
"except",
"logs_conn",
".",
"exceptions",
".",
"ResourceNotFoundException",
":",
"# On the very first training job run on an account, there's no log group until",
"# the container starts logging, so ignore any errors thrown about that",
"pass",
"if",
"len",
"(",
"stream_names",
")",
">",
"0",
":",
"for",
"idx",
",",
"event",
"in",
"self",
".",
"multi_stream_iter",
"(",
"log_group",
",",
"stream_names",
",",
"positions",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"event",
"[",
"'message'",
"]",
")",
"ts",
",",
"count",
"=",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"if",
"event",
"[",
"'timestamp'",
"]",
"==",
"ts",
":",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"=",
"Position",
"(",
"timestamp",
"=",
"ts",
",",
"skip",
"=",
"count",
"+",
"1",
")",
"else",
":",
"positions",
"[",
"stream_names",
"[",
"idx",
"]",
"]",
"=",
"Position",
"(",
"timestamp",
"=",
"event",
"[",
"'timestamp'",
"]",
",",
"skip",
"=",
"1",
")",
"if",
"state",
"==",
"LogState",
".",
"COMPLETE",
":",
"return",
"state",
",",
"last_description",
",",
"last_describe_job_call",
"if",
"state",
"==",
"LogState",
".",
"JOB_COMPLETE",
":",
"state",
"=",
"LogState",
".",
"COMPLETE",
"elif",
"time",
".",
"time",
"(",
")",
"-",
"last_describe_job_call",
">=",
"30",
":",
"description",
"=",
"self",
".",
"describe_training_job",
"(",
"job_name",
")",
"last_describe_job_call",
"=",
"time",
".",
"time",
"(",
")",
"if",
"secondary_training_status_changed",
"(",
"description",
",",
"last_description",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"secondary_training_status_message",
"(",
"description",
",",
"last_description",
")",
")",
"last_description",
"=",
"description",
"status",
"=",
"description",
"[",
"'TrainingJobStatus'",
"]",
"if",
"status",
"not",
"in",
"self",
".",
"non_terminal_states",
":",
"state",
"=",
"LogState",
".",
"JOB_COMPLETE",
"return",
"state",
",",
"last_description",
",",
"last_describe_job_call"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.check_status
|
Check status of a SageMaker job
:param job_name: name of the job to check status
:type job_name: str
:param key: the key of the response dict
that points to the state
:type key: str
:param describe_function: the function used to retrieve the status
:type describe_function: python callable
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:param non_terminal_states: the set of nonterminal states
:type non_terminal_states: set
:return: response of describe call after job is done
|
airflow/contrib/hooks/sagemaker_hook.py
|
def check_status(self, job_name, key,
describe_function, check_interval,
max_ingestion_time,
non_terminal_states=None):
"""
Check status of a SageMaker job
:param job_name: name of the job to check status
:type job_name: str
:param key: the key of the response dict
that points to the state
:type key: str
:param describe_function: the function used to retrieve the status
:type describe_function: python callable
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:param non_terminal_states: the set of nonterminal states
:type non_terminal_states: set
:return: response of describe call after job is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
running = True
while running:
time.sleep(check_interval)
sec = sec + check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info('Job still running for %s seconds... '
'current status is %s' % (sec, status))
except KeyError:
raise AirflowException('Could not get status of the SageMaker job')
except ClientError:
raise AirflowException('AWS request failed, check logs for more info')
if status in non_terminal_states:
running = True
elif status in self.failed_states:
raise AirflowException('SageMaker job failed because %s' % response['FailureReason'])
else:
running = False
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
self.log.info('SageMaker Job Compeleted')
response = describe_function(job_name)
return response
|
def check_status(self, job_name, key,
describe_function, check_interval,
max_ingestion_time,
non_terminal_states=None):
"""
Check status of a SageMaker job
:param job_name: name of the job to check status
:type job_name: str
:param key: the key of the response dict
that points to the state
:type key: str
:param describe_function: the function used to retrieve the status
:type describe_function: python callable
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:param non_terminal_states: the set of nonterminal states
:type non_terminal_states: set
:return: response of describe call after job is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
running = True
while running:
time.sleep(check_interval)
sec = sec + check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info('Job still running for %s seconds... '
'current status is %s' % (sec, status))
except KeyError:
raise AirflowException('Could not get status of the SageMaker job')
except ClientError:
raise AirflowException('AWS request failed, check logs for more info')
if status in non_terminal_states:
running = True
elif status in self.failed_states:
raise AirflowException('SageMaker job failed because %s' % response['FailureReason'])
else:
running = False
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
self.log.info('SageMaker Job Compeleted')
response = describe_function(job_name)
return response
|
[
"Check",
"status",
"of",
"a",
"SageMaker",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L627-L686
|
[
"def",
"check_status",
"(",
"self",
",",
"job_name",
",",
"key",
",",
"describe_function",
",",
"check_interval",
",",
"max_ingestion_time",
",",
"non_terminal_states",
"=",
"None",
")",
":",
"if",
"not",
"non_terminal_states",
":",
"non_terminal_states",
"=",
"self",
".",
"non_terminal_states",
"sec",
"=",
"0",
"running",
"=",
"True",
"while",
"running",
":",
"time",
".",
"sleep",
"(",
"check_interval",
")",
"sec",
"=",
"sec",
"+",
"check_interval",
"try",
":",
"response",
"=",
"describe_function",
"(",
"job_name",
")",
"status",
"=",
"response",
"[",
"key",
"]",
"self",
".",
"log",
".",
"info",
"(",
"'Job still running for %s seconds... '",
"'current status is %s'",
"%",
"(",
"sec",
",",
"status",
")",
")",
"except",
"KeyError",
":",
"raise",
"AirflowException",
"(",
"'Could not get status of the SageMaker job'",
")",
"except",
"ClientError",
":",
"raise",
"AirflowException",
"(",
"'AWS request failed, check logs for more info'",
")",
"if",
"status",
"in",
"non_terminal_states",
":",
"running",
"=",
"True",
"elif",
"status",
"in",
"self",
".",
"failed_states",
":",
"raise",
"AirflowException",
"(",
"'SageMaker job failed because %s'",
"%",
"response",
"[",
"'FailureReason'",
"]",
")",
"else",
":",
"running",
"=",
"False",
"if",
"max_ingestion_time",
"and",
"sec",
">",
"max_ingestion_time",
":",
"# ensure that the job gets killed if the max ingestion time is exceeded",
"raise",
"AirflowException",
"(",
"'SageMaker job took more than %s seconds'",
",",
"max_ingestion_time",
")",
"self",
".",
"log",
".",
"info",
"(",
"'SageMaker Job Compeleted'",
")",
"response",
"=",
"describe_function",
"(",
"job_name",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SageMakerHook.check_training_status_with_log
|
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:type job_name: str
:param non_terminal_states: the set of non_terminal states
:type non_terminal_states: set
:param failed_states: the set of failed states
:type failed_states: set
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:type wait_for_completion: bool
:param check_interval: The interval in seconds between polling for new log entries and job completion
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: None
|
airflow/contrib/hooks/sagemaker_hook.py
|
def check_training_status_with_log(self, job_name, non_terminal_states, failed_states,
wait_for_completion, check_interval, max_ingestion_time):
"""
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:type job_name: str
:param non_terminal_states: the set of non_terminal states
:type non_terminal_states: set
:param failed_states: the set of failed states
:type failed_states: set
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:type wait_for_completion: bool
:param check_interval: The interval in seconds between polling for new log entries and job completion
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: None
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = [] # The list of log streams
positions = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.time()
last_description = description
while True:
time.sleep(check_interval)
sec = sec + check_interval
state, last_description, last_describe_job_call = \
self.describe_training_job_with_log(job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
if wait_for_completion:
status = last_description['TrainingJobStatus']
if status in failed_states:
reason = last_description.get('FailureReason', '(No reason provided)')
raise AirflowException('Error training {}: {} Reason: {}'.format(job_name, status, reason))
billable_time = (last_description['TrainingEndTime'] - last_description['TrainingStartTime']) \
* instance_count
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
|
def check_training_status_with_log(self, job_name, non_terminal_states, failed_states,
wait_for_completion, check_interval, max_ingestion_time):
"""
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:type job_name: str
:param non_terminal_states: the set of non_terminal states
:type non_terminal_states: set
:param failed_states: the set of failed states
:type failed_states: set
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:type wait_for_completion: bool
:param check_interval: The interval in seconds between polling for new log entries and job completion
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: None
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names = [] # The list of log streams
positions = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.time()
last_description = description
while True:
time.sleep(check_interval)
sec = sec + check_interval
state, last_description, last_describe_job_call = \
self.describe_training_job_with_log(job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time)
if wait_for_completion:
status = last_description['TrainingJobStatus']
if status in failed_states:
reason = last_description.get('FailureReason', '(No reason provided)')
raise AirflowException('Error training {}: {} Reason: {}'.format(job_name, status, reason))
billable_time = (last_description['TrainingEndTime'] - last_description['TrainingStartTime']) \
* instance_count
self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1))
|
[
"Display",
"the",
"logs",
"for",
"a",
"given",
"training",
"job",
"optionally",
"tailing",
"them",
"until",
"the",
"job",
"is",
"complete",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L688-L770
|
[
"def",
"check_training_status_with_log",
"(",
"self",
",",
"job_name",
",",
"non_terminal_states",
",",
"failed_states",
",",
"wait_for_completion",
",",
"check_interval",
",",
"max_ingestion_time",
")",
":",
"sec",
"=",
"0",
"description",
"=",
"self",
".",
"describe_training_job",
"(",
"job_name",
")",
"self",
".",
"log",
".",
"info",
"(",
"secondary_training_status_message",
"(",
"description",
",",
"None",
")",
")",
"instance_count",
"=",
"description",
"[",
"'ResourceConfig'",
"]",
"[",
"'InstanceCount'",
"]",
"status",
"=",
"description",
"[",
"'TrainingJobStatus'",
"]",
"stream_names",
"=",
"[",
"]",
"# The list of log streams",
"positions",
"=",
"{",
"}",
"# The current position in each stream, map of stream name -> position",
"job_already_completed",
"=",
"status",
"not",
"in",
"non_terminal_states",
"state",
"=",
"LogState",
".",
"TAILING",
"if",
"wait_for_completion",
"and",
"not",
"job_already_completed",
"else",
"LogState",
".",
"COMPLETE",
"# The loop below implements a state machine that alternates between checking the job status and",
"# reading whatever is available in the logs at this point. Note, that if we were called with",
"# wait_for_completion == False, we never check the job status.",
"#",
"# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING",
"# If wait_for_completion == FALSE, the initial state is COMPLETE",
"# (doesn't matter if the job really is complete).",
"#",
"# The state table:",
"#",
"# STATE ACTIONS CONDITION NEW STATE",
"# ---------------- ---------------- ----------------- ----------------",
"# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE",
"# Else TAILING",
"# JOB_COMPLETE Read logs, Pause Any COMPLETE",
"# COMPLETE Read logs, Exit N/A",
"#",
"# Notes:",
"# - The JOB_COMPLETE state forces us to do an extra pause and read any items that",
"# got to Cloudwatch after the job was marked complete.",
"last_describe_job_call",
"=",
"time",
".",
"time",
"(",
")",
"last_description",
"=",
"description",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"check_interval",
")",
"sec",
"=",
"sec",
"+",
"check_interval",
"state",
",",
"last_description",
",",
"last_describe_job_call",
"=",
"self",
".",
"describe_training_job_with_log",
"(",
"job_name",
",",
"positions",
",",
"stream_names",
",",
"instance_count",
",",
"state",
",",
"last_description",
",",
"last_describe_job_call",
")",
"if",
"state",
"==",
"LogState",
".",
"COMPLETE",
":",
"break",
"if",
"max_ingestion_time",
"and",
"sec",
">",
"max_ingestion_time",
":",
"# ensure that the job gets killed if the max ingestion time is exceeded",
"raise",
"AirflowException",
"(",
"'SageMaker job took more than %s seconds'",
",",
"max_ingestion_time",
")",
"if",
"wait_for_completion",
":",
"status",
"=",
"last_description",
"[",
"'TrainingJobStatus'",
"]",
"if",
"status",
"in",
"failed_states",
":",
"reason",
"=",
"last_description",
".",
"get",
"(",
"'FailureReason'",
",",
"'(No reason provided)'",
")",
"raise",
"AirflowException",
"(",
"'Error training {}: {} Reason: {}'",
".",
"format",
"(",
"job_name",
",",
"status",
",",
"reason",
")",
")",
"billable_time",
"=",
"(",
"last_description",
"[",
"'TrainingEndTime'",
"]",
"-",
"last_description",
"[",
"'TrainingStartTime'",
"]",
")",
"*",
"instance_count",
"self",
".",
"log",
".",
"info",
"(",
"'Billable seconds:{}'",
".",
"format",
"(",
"int",
"(",
"billable_time",
".",
"total_seconds",
"(",
")",
")",
"+",
"1",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DataFlowPythonOperator.execute
|
Execute the python dataflow job.
|
airflow/contrib/operators/dataflow_operator.py
|
def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options)
|
def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options)
|
[
"Execute",
"the",
"python",
"dataflow",
"job",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/dataflow_operator.py#L363-L380
|
[
"def",
"execute",
"(",
"self",
",",
"context",
")",
":",
"bucket_helper",
"=",
"GoogleCloudBucketHelper",
"(",
"self",
".",
"gcp_conn_id",
",",
"self",
".",
"delegate_to",
")",
"self",
".",
"py_file",
"=",
"bucket_helper",
".",
"google_cloud_to_local",
"(",
"self",
".",
"py_file",
")",
"hook",
"=",
"DataFlowHook",
"(",
"gcp_conn_id",
"=",
"self",
".",
"gcp_conn_id",
",",
"delegate_to",
"=",
"self",
".",
"delegate_to",
",",
"poll_sleep",
"=",
"self",
".",
"poll_sleep",
")",
"dataflow_options",
"=",
"self",
".",
"dataflow_default_options",
".",
"copy",
"(",
")",
"dataflow_options",
".",
"update",
"(",
"self",
".",
"options",
")",
"# Convert argument names from lowerCamelCase to snake case.",
"camel_to_snake",
"=",
"lambda",
"name",
":",
"re",
".",
"sub",
"(",
"r'[A-Z]'",
",",
"lambda",
"x",
":",
"'_'",
"+",
"x",
".",
"group",
"(",
"0",
")",
".",
"lower",
"(",
")",
",",
"name",
")",
"formatted_options",
"=",
"{",
"camel_to_snake",
"(",
"key",
")",
":",
"dataflow_options",
"[",
"key",
"]",
"for",
"key",
"in",
"dataflow_options",
"}",
"hook",
".",
"start_python_dataflow",
"(",
"self",
".",
"job_name",
",",
"formatted_options",
",",
"self",
".",
"py_file",
",",
"self",
".",
"py_options",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
GoogleCloudBucketHelper.google_cloud_to_local
|
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
|
airflow/contrib/operators/dataflow_operator.py
|
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8],
path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage (GCS) object: {}'
.format(file_name))
|
def google_cloud_to_local(self, file_name):
"""
Checks whether the file specified by file_name is stored in Google Cloud
Storage (GCS), if so, downloads the file and saves it locally. The full
path of the saved file will be returned. Otherwise the local file_name
will be returned immediately.
:param file_name: The full path of input file.
:type file_name: str
:return: The full path of local file.
:rtype: str
"""
if not file_name.startswith('gs://'):
return file_name
# Extracts bucket_id and object_id by first removing 'gs://' prefix and
# then split the remaining by path delimiter '/'.
path_components = file_name[self.GCS_PREFIX_LENGTH:].split('/')
if len(path_components) < 2:
raise Exception(
'Invalid Google Cloud Storage (GCS) object path: {}'
.format(file_name))
bucket_id = path_components[0]
object_id = '/'.join(path_components[1:])
local_file = '/tmp/dataflow{}-{}'.format(str(uuid.uuid4())[:8],
path_components[-1])
self._gcs_hook.download(bucket_id, object_id, local_file)
if os.stat(local_file).st_size > 0:
return local_file
raise Exception(
'Failed to download Google Cloud Storage (GCS) object: {}'
.format(file_name))
|
[
"Checks",
"whether",
"the",
"file",
"specified",
"by",
"file_name",
"is",
"stored",
"in",
"Google",
"Cloud",
"Storage",
"(",
"GCS",
")",
"if",
"so",
"downloads",
"the",
"file",
"and",
"saves",
"it",
"locally",
".",
"The",
"full",
"path",
"of",
"the",
"saved",
"file",
"will",
"be",
"returned",
".",
"Otherwise",
"the",
"local",
"file_name",
"will",
"be",
"returned",
"immediately",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/dataflow_operator.py#L392-L425
|
[
"def",
"google_cloud_to_local",
"(",
"self",
",",
"file_name",
")",
":",
"if",
"not",
"file_name",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"return",
"file_name",
"# Extracts bucket_id and object_id by first removing 'gs://' prefix and",
"# then split the remaining by path delimiter '/'.",
"path_components",
"=",
"file_name",
"[",
"self",
".",
"GCS_PREFIX_LENGTH",
":",
"]",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"path_components",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'Invalid Google Cloud Storage (GCS) object path: {}'",
".",
"format",
"(",
"file_name",
")",
")",
"bucket_id",
"=",
"path_components",
"[",
"0",
"]",
"object_id",
"=",
"'/'",
".",
"join",
"(",
"path_components",
"[",
"1",
":",
"]",
")",
"local_file",
"=",
"'/tmp/dataflow{}-{}'",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"[",
":",
"8",
"]",
",",
"path_components",
"[",
"-",
"1",
"]",
")",
"self",
".",
"_gcs_hook",
".",
"download",
"(",
"bucket_id",
",",
"object_id",
",",
"local_file",
")",
"if",
"os",
".",
"stat",
"(",
"local_file",
")",
".",
"st_size",
">",
"0",
":",
"return",
"local_file",
"raise",
"Exception",
"(",
"'Failed to download Google Cloud Storage (GCS) object: {}'",
".",
"format",
"(",
"file_name",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
run_migrations_offline
|
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
|
airflow/migrations/env.py
|
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=settings.SQL_ALCHEMY_CONN, target_metadata=target_metadata,
literal_binds=True, compare_type=COMPARE_TYPE)
with context.begin_transaction():
context.run_migrations()
|
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=settings.SQL_ALCHEMY_CONN, target_metadata=target_metadata,
literal_binds=True, compare_type=COMPARE_TYPE)
with context.begin_transaction():
context.run_migrations()
|
[
"Run",
"migrations",
"in",
"offline",
"mode",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/migrations/env.py#L48-L65
|
[
"def",
"run_migrations_offline",
"(",
")",
":",
"context",
".",
"configure",
"(",
"url",
"=",
"settings",
".",
"SQL_ALCHEMY_CONN",
",",
"target_metadata",
"=",
"target_metadata",
",",
"literal_binds",
"=",
"True",
",",
"compare_type",
"=",
"COMPARE_TYPE",
")",
"with",
"context",
".",
"begin_transaction",
"(",
")",
":",
"context",
".",
"run_migrations",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
run_migrations_online
|
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
|
airflow/migrations/env.py
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
)
with context.begin_transaction():
context.run_migrations()
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
)
with context.begin_transaction():
context.run_migrations()
|
[
"Run",
"migrations",
"in",
"online",
"mode",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/migrations/env.py#L68-L86
|
[
"def",
"run_migrations_online",
"(",
")",
":",
"connectable",
"=",
"settings",
".",
"engine",
"with",
"connectable",
".",
"connect",
"(",
")",
"as",
"connection",
":",
"context",
".",
"configure",
"(",
"connection",
"=",
"connection",
",",
"transaction_per_migration",
"=",
"True",
",",
"target_metadata",
"=",
"target_metadata",
",",
"compare_type",
"=",
"COMPARE_TYPE",
",",
")",
"with",
"context",
".",
"begin_transaction",
"(",
")",
":",
"context",
".",
"run_migrations",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigtableHook.delete_instance
|
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type instance_id: str
|
airflow/contrib/hooks/gcp_bigtable_hook.py
|
def delete_instance(self, instance_id, project_id=None):
"""
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type instance_id: str
"""
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance:
instance.delete()
else:
self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id,
project_id)
|
def delete_instance(self, instance_id, project_id=None):
"""
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type instance_id: str
"""
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance:
instance.delete()
else:
self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id,
project_id)
|
[
"Deletes",
"the",
"specified",
"Cloud",
"Bigtable",
"instance",
".",
"Raises",
"google",
".",
"api_core",
".",
"exceptions",
".",
"NotFound",
"if",
"the",
"Cloud",
"Bigtable",
"instance",
"does",
"not",
"exist",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L69-L87
|
[
"def",
"delete_instance",
"(",
"self",
",",
"instance_id",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"get_instance",
"(",
"instance_id",
"=",
"instance_id",
",",
"project_id",
"=",
"project_id",
")",
"if",
"instance",
":",
"instance",
".",
"delete",
"(",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"The instance '%s' does not exist in project '%s'. Exiting\"",
",",
"instance_id",
",",
"project_id",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigtableHook.create_instance
|
Creates new instance.
:type instance_id: str
:param instance_id: The ID for the new instance.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type replica_cluster_id: str
:param replica_cluster_id: (optional) The ID for replica cluster for the new
instance.
:type replica_cluster_zone: str
:param replica_cluster_zone: (optional) The zone for replica cluster.
:type instance_type: enums.Instance.Type
:param instance_type: (optional) The type of the instance.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance.
Defaults to ``instance_id``.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:type cluster_nodes: int
:param cluster_nodes: (optional) Number of nodes for cluster.
:type cluster_storage_type: enums.StorageType
:param cluster_storage_type: (optional) The type of storage.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
|
airflow/contrib/hooks/gcp_bigtable_hook.py
|
def create_instance(self,
instance_id,
main_cluster_id,
main_cluster_zone,
project_id=None,
replica_cluster_id=None,
replica_cluster_zone=None,
instance_display_name=None,
instance_type=enums.Instance.Type.TYPE_UNSPECIFIED,
instance_labels=None,
cluster_nodes=None,
cluster_storage_type=enums.StorageType.STORAGE_TYPE_UNSPECIFIED,
timeout=None):
"""
Creates new instance.
:type instance_id: str
:param instance_id: The ID for the new instance.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type replica_cluster_id: str
:param replica_cluster_id: (optional) The ID for replica cluster for the new
instance.
:type replica_cluster_zone: str
:param replica_cluster_zone: (optional) The zone for replica cluster.
:type instance_type: enums.Instance.Type
:param instance_type: (optional) The type of the instance.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance.
Defaults to ``instance_id``.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:type cluster_nodes: int
:param cluster_nodes: (optional) Number of nodes for cluster.
:type cluster_storage_type: enums.StorageType
:param cluster_storage_type: (optional) The type of storage.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
"""
cluster_storage_type = enums.StorageType(cluster_storage_type)
instance_type = enums.Instance.Type(instance_type)
instance = Instance(
instance_id,
self._get_client(project_id=project_id),
instance_display_name,
instance_type,
instance_labels,
)
clusters = [
instance.cluster(
main_cluster_id,
main_cluster_zone,
cluster_nodes,
cluster_storage_type
)
]
if replica_cluster_id and replica_cluster_zone:
clusters.append(instance.cluster(
replica_cluster_id,
replica_cluster_zone,
cluster_nodes,
cluster_storage_type
))
operation = instance.create(
clusters=clusters
)
operation.result(timeout)
return instance
|
def create_instance(self,
instance_id,
main_cluster_id,
main_cluster_zone,
project_id=None,
replica_cluster_id=None,
replica_cluster_zone=None,
instance_display_name=None,
instance_type=enums.Instance.Type.TYPE_UNSPECIFIED,
instance_labels=None,
cluster_nodes=None,
cluster_storage_type=enums.StorageType.STORAGE_TYPE_UNSPECIFIED,
timeout=None):
"""
Creates new instance.
:type instance_id: str
:param instance_id: The ID for the new instance.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
:type replica_cluster_id: str
:param replica_cluster_id: (optional) The ID for replica cluster for the new
instance.
:type replica_cluster_zone: str
:param replica_cluster_zone: (optional) The zone for replica cluster.
:type instance_type: enums.Instance.Type
:param instance_type: (optional) The type of the instance.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance.
Defaults to ``instance_id``.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:type cluster_nodes: int
:param cluster_nodes: (optional) Number of nodes for cluster.
:type cluster_storage_type: enums.StorageType
:param cluster_storage_type: (optional) The type of storage.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
"""
cluster_storage_type = enums.StorageType(cluster_storage_type)
instance_type = enums.Instance.Type(instance_type)
instance = Instance(
instance_id,
self._get_client(project_id=project_id),
instance_display_name,
instance_type,
instance_labels,
)
clusters = [
instance.cluster(
main_cluster_id,
main_cluster_zone,
cluster_nodes,
cluster_storage_type
)
]
if replica_cluster_id and replica_cluster_zone:
clusters.append(instance.cluster(
replica_cluster_id,
replica_cluster_zone,
cluster_nodes,
cluster_storage_type
))
operation = instance.create(
clusters=clusters
)
operation.result(timeout)
return instance
|
[
"Creates",
"new",
"instance",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L90-L168
|
[
"def",
"create_instance",
"(",
"self",
",",
"instance_id",
",",
"main_cluster_id",
",",
"main_cluster_zone",
",",
"project_id",
"=",
"None",
",",
"replica_cluster_id",
"=",
"None",
",",
"replica_cluster_zone",
"=",
"None",
",",
"instance_display_name",
"=",
"None",
",",
"instance_type",
"=",
"enums",
".",
"Instance",
".",
"Type",
".",
"TYPE_UNSPECIFIED",
",",
"instance_labels",
"=",
"None",
",",
"cluster_nodes",
"=",
"None",
",",
"cluster_storage_type",
"=",
"enums",
".",
"StorageType",
".",
"STORAGE_TYPE_UNSPECIFIED",
",",
"timeout",
"=",
"None",
")",
":",
"cluster_storage_type",
"=",
"enums",
".",
"StorageType",
"(",
"cluster_storage_type",
")",
"instance_type",
"=",
"enums",
".",
"Instance",
".",
"Type",
"(",
"instance_type",
")",
"instance",
"=",
"Instance",
"(",
"instance_id",
",",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
",",
"instance_display_name",
",",
"instance_type",
",",
"instance_labels",
",",
")",
"clusters",
"=",
"[",
"instance",
".",
"cluster",
"(",
"main_cluster_id",
",",
"main_cluster_zone",
",",
"cluster_nodes",
",",
"cluster_storage_type",
")",
"]",
"if",
"replica_cluster_id",
"and",
"replica_cluster_zone",
":",
"clusters",
".",
"append",
"(",
"instance",
".",
"cluster",
"(",
"replica_cluster_id",
",",
"replica_cluster_zone",
",",
"cluster_nodes",
",",
"cluster_storage_type",
")",
")",
"operation",
"=",
"instance",
".",
"create",
"(",
"clusters",
"=",
"clusters",
")",
"operation",
".",
"result",
"(",
"timeout",
")",
"return",
"instance"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigtableHook.create_table
|
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the table.
:type table_id: str
:param table_id: The ID of the table to create in Cloud Bigtable.
:type initial_split_keys: list
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:type column_families: dict
:param column_families: (Optional) A map of columns to create. The key is the
column_id str, and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`.
|
airflow/contrib/hooks/gcp_bigtable_hook.py
|
def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
"""
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the table.
:type table_id: str
:param table_id: The ID of the table to create in Cloud Bigtable.
:type initial_split_keys: list
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:type column_families: dict
:param column_families: (Optional) A map of columns to create. The key is the
column_id str, and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`.
"""
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families)
|
def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
"""
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the table.
:type table_id: str
:param table_id: The ID of the table to create in Cloud Bigtable.
:type initial_split_keys: list
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:type column_families: dict
:param column_families: (Optional) A map of columns to create. The key is the
column_id str, and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`.
"""
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families)
|
[
"Creates",
"the",
"specified",
"Cloud",
"Bigtable",
"table",
".",
"Raises",
"google",
".",
"api_core",
".",
"exceptions",
".",
"AlreadyExists",
"if",
"the",
"table",
"exists",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L171-L196
|
[
"def",
"create_table",
"(",
"instance",
",",
"table_id",
",",
"initial_split_keys",
"=",
"None",
",",
"column_families",
"=",
"None",
")",
":",
"if",
"column_families",
"is",
"None",
":",
"column_families",
"=",
"{",
"}",
"if",
"initial_split_keys",
"is",
"None",
":",
"initial_split_keys",
"=",
"[",
"]",
"table",
"=",
"Table",
"(",
"table_id",
",",
"instance",
")",
"table",
".",
"create",
"(",
"initial_split_keys",
",",
"column_families",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigtableHook.delete_table
|
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
|
airflow/contrib/hooks/gcp_bigtable_hook.py
|
def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
"""
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete()
|
def delete_table(self, instance_id, table_id, project_id=None):
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance.
:type table_id: str
:param table_id: The ID of the table in Cloud Bigtable.
:type project_id: str
:param project_id: Optional, Google Cloud Platform project ID where the
BigTable exists. If set to None or missing,
the default project_id from the GCP connection is used.
"""
table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)
table.delete()
|
[
"Deletes",
"the",
"specified",
"table",
"in",
"Cloud",
"Bigtable",
".",
"Raises",
"google",
".",
"api_core",
".",
"exceptions",
".",
"NotFound",
"if",
"the",
"table",
"does",
"not",
"exist",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L199-L214
|
[
"def",
"delete_table",
"(",
"self",
",",
"instance_id",
",",
"table_id",
",",
"project_id",
"=",
"None",
")",
":",
"table",
"=",
"self",
".",
"get_instance",
"(",
"instance_id",
"=",
"instance_id",
",",
"project_id",
"=",
"project_id",
")",
".",
"table",
"(",
"table_id",
"=",
"table_id",
")",
"table",
".",
"delete",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigtableHook.update_cluster
|
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the cluster.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type nodes: int
:param nodes: The desired number of nodes.
|
airflow/contrib/hooks/gcp_bigtable_hook.py
|
def update_cluster(instance, cluster_id, nodes):
"""
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the cluster.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type nodes: int
:param nodes: The desired number of nodes.
"""
cluster = Cluster(cluster_id, instance)
cluster.serve_nodes = nodes
cluster.update()
|
def update_cluster(instance, cluster_id, nodes):
"""
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:type instance: Instance
:param instance: The Cloud Bigtable instance that owns the cluster.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type nodes: int
:param nodes: The desired number of nodes.
"""
cluster = Cluster(cluster_id, instance)
cluster.serve_nodes = nodes
cluster.update()
|
[
"Updates",
"number",
"of",
"nodes",
"in",
"the",
"specified",
"Cloud",
"Bigtable",
"cluster",
".",
"Raises",
"google",
".",
"api_core",
".",
"exceptions",
".",
"NotFound",
"if",
"the",
"cluster",
"does",
"not",
"exist",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L217-L231
|
[
"def",
"update_cluster",
"(",
"instance",
",",
"cluster_id",
",",
"nodes",
")",
":",
"cluster",
"=",
"Cluster",
"(",
"cluster_id",
",",
"instance",
")",
"cluster",
".",
"serve_nodes",
"=",
"nodes",
"cluster",
".",
"update",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveCliHook._prepare_cli_cmd
|
This function creates the command list from available information
|
airflow/hooks/hive_hooks.py
|
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
|
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if configuration.conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
|
[
"This",
"function",
"creates",
"the",
"command",
"list",
"from",
"available",
"information"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L106-L146
|
[
"def",
"_prepare_cli_cmd",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"conn",
"hive_bin",
"=",
"'hive'",
"cmd_extra",
"=",
"[",
"]",
"if",
"self",
".",
"use_beeline",
":",
"hive_bin",
"=",
"'beeline'",
"jdbc_url",
"=",
"\"jdbc:hive2://{host}:{port}/{schema}\"",
".",
"format",
"(",
"host",
"=",
"conn",
".",
"host",
",",
"port",
"=",
"conn",
".",
"port",
",",
"schema",
"=",
"conn",
".",
"schema",
")",
"if",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'security'",
")",
"==",
"'kerberos'",
":",
"template",
"=",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'principal'",
",",
"\"hive/_HOST@EXAMPLE.COM\"",
")",
"if",
"\"_HOST\"",
"in",
"template",
":",
"template",
"=",
"utils",
".",
"replace_hostname_pattern",
"(",
"utils",
".",
"get_components",
"(",
"template",
")",
")",
"proxy_user",
"=",
"\"\"",
"# noqa",
"if",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'proxy_user'",
")",
"==",
"\"login\"",
"and",
"conn",
".",
"login",
":",
"proxy_user",
"=",
"\"hive.server2.proxy.user={0}\"",
".",
"format",
"(",
"conn",
".",
"login",
")",
"elif",
"conn",
".",
"extra_dejson",
".",
"get",
"(",
"'proxy_user'",
")",
"==",
"\"owner\"",
"and",
"self",
".",
"run_as",
":",
"proxy_user",
"=",
"\"hive.server2.proxy.user={0}\"",
".",
"format",
"(",
"self",
".",
"run_as",
")",
"jdbc_url",
"+=",
"\";principal={template};{proxy_user}\"",
".",
"format",
"(",
"template",
"=",
"template",
",",
"proxy_user",
"=",
"proxy_user",
")",
"elif",
"self",
".",
"auth",
":",
"jdbc_url",
"+=",
"\";auth=\"",
"+",
"self",
".",
"auth",
"jdbc_url",
"=",
"'\"{}\"'",
".",
"format",
"(",
"jdbc_url",
")",
"cmd_extra",
"+=",
"[",
"'-u'",
",",
"jdbc_url",
"]",
"if",
"conn",
".",
"login",
":",
"cmd_extra",
"+=",
"[",
"'-n'",
",",
"conn",
".",
"login",
"]",
"if",
"conn",
".",
"password",
":",
"cmd_extra",
"+=",
"[",
"'-p'",
",",
"conn",
".",
"password",
"]",
"hive_params_list",
"=",
"self",
".",
"hive_cli_params",
".",
"split",
"(",
")",
"return",
"[",
"hive_bin",
"]",
"+",
"cmd_extra",
"+",
"hive_params_list"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveCliHook._prepare_hiveconf
|
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
|
airflow/hooks/hive_hooks.py
|
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
|
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
|
[
"This",
"function",
"prepares",
"a",
"list",
"of",
"hiveconf",
"params",
"from",
"a",
"dictionary",
"of",
"key",
"value",
"pairs",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L149-L169
|
[
"def",
"_prepare_hiveconf",
"(",
"d",
")",
":",
"if",
"not",
"d",
":",
"return",
"[",
"]",
"return",
"as_flattened_list",
"(",
"zip",
"(",
"[",
"\"-hiveconf\"",
"]",
"*",
"len",
"(",
"d",
")",
",",
"[",
"\"{}={}\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"]",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveCliHook.run_cli
|
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
|
airflow/hooks/hive_hooks.py
|
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.job.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
|
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.job.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
|
[
"Run",
"an",
"hql",
"statement",
"using",
"the",
"hive",
"cli",
".",
"If",
"hive_conf",
"is",
"specified",
"it",
"should",
"be",
"a",
"dict",
"and",
"the",
"entries",
"will",
"be",
"set",
"as",
"key",
"/",
"value",
"pairs",
"in",
"HiveConf"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L171-L255
|
[
"def",
"run_cli",
"(",
"self",
",",
"hql",
",",
"schema",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"hive_conf",
"=",
"None",
")",
":",
"conn",
"=",
"self",
".",
"conn",
"schema",
"=",
"schema",
"or",
"conn",
".",
"schema",
"if",
"schema",
":",
"hql",
"=",
"\"USE {schema};\\n{hql}\"",
".",
"format",
"(",
"schema",
"=",
"schema",
",",
"hql",
"=",
"hql",
")",
"with",
"TemporaryDirectory",
"(",
"prefix",
"=",
"'airflow_hiveop_'",
")",
"as",
"tmp_dir",
":",
"with",
"NamedTemporaryFile",
"(",
"dir",
"=",
"tmp_dir",
")",
"as",
"f",
":",
"hql",
"=",
"hql",
"+",
"'\\n'",
"f",
".",
"write",
"(",
"hql",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
"f",
".",
"flush",
"(",
")",
"hive_cmd",
"=",
"self",
".",
"_prepare_cli_cmd",
"(",
")",
"env_context",
"=",
"get_context_from_env_var",
"(",
")",
"# Only extend the hive_conf if it is defined.",
"if",
"hive_conf",
":",
"env_context",
".",
"update",
"(",
"hive_conf",
")",
"hive_conf_params",
"=",
"self",
".",
"_prepare_hiveconf",
"(",
"env_context",
")",
"if",
"self",
".",
"mapred_queue",
":",
"hive_conf_params",
".",
"extend",
"(",
"[",
"'-hiveconf'",
",",
"'mapreduce.job.queuename={}'",
".",
"format",
"(",
"self",
".",
"mapred_queue",
")",
",",
"'-hiveconf'",
",",
"'mapred.job.queue.name={}'",
".",
"format",
"(",
"self",
".",
"mapred_queue",
")",
",",
"'-hiveconf'",
",",
"'tez.job.queue.name={}'",
".",
"format",
"(",
"self",
".",
"mapred_queue",
")",
"]",
")",
"if",
"self",
".",
"mapred_queue_priority",
":",
"hive_conf_params",
".",
"extend",
"(",
"[",
"'-hiveconf'",
",",
"'mapreduce.job.priority={}'",
".",
"format",
"(",
"self",
".",
"mapred_queue_priority",
")",
"]",
")",
"if",
"self",
".",
"mapred_job_name",
":",
"hive_conf_params",
".",
"extend",
"(",
"[",
"'-hiveconf'",
",",
"'mapred.job.name={}'",
".",
"format",
"(",
"self",
".",
"mapred_job_name",
")",
"]",
")",
"hive_cmd",
".",
"extend",
"(",
"hive_conf_params",
")",
"hive_cmd",
".",
"extend",
"(",
"[",
"'-f'",
",",
"f",
".",
"name",
"]",
")",
"if",
"verbose",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"%s\"",
",",
"\" \"",
".",
"join",
"(",
"hive_cmd",
")",
")",
"sp",
"=",
"subprocess",
".",
"Popen",
"(",
"hive_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"cwd",
"=",
"tmp_dir",
",",
"close_fds",
"=",
"True",
")",
"self",
".",
"sp",
"=",
"sp",
"stdout",
"=",
"''",
"while",
"True",
":",
"line",
"=",
"sp",
".",
"stdout",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"break",
"stdout",
"+=",
"line",
".",
"decode",
"(",
"'UTF-8'",
")",
"if",
"verbose",
":",
"self",
".",
"log",
".",
"info",
"(",
"line",
".",
"decode",
"(",
"'UTF-8'",
")",
".",
"strip",
"(",
")",
")",
"sp",
".",
"wait",
"(",
")",
"if",
"sp",
".",
"returncode",
":",
"raise",
"AirflowException",
"(",
"stdout",
")",
"return",
"stdout"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveCliHook.load_df
|
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
|
airflow/hooks/hive_hooks.py
|
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
|
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
|
[
"Loads",
"a",
"pandas",
"DataFrame",
"into",
"hive",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L300-L372
|
[
"def",
"load_df",
"(",
"self",
",",
"df",
",",
"table",
",",
"field_dict",
"=",
"None",
",",
"delimiter",
"=",
"','",
",",
"encoding",
"=",
"'utf8'",
",",
"pandas_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_infer_field_types_from_df",
"(",
"df",
")",
":",
"DTYPE_KIND_HIVE_TYPE",
"=",
"{",
"'b'",
":",
"'BOOLEAN'",
",",
"# boolean",
"'i'",
":",
"'BIGINT'",
",",
"# signed integer",
"'u'",
":",
"'BIGINT'",
",",
"# unsigned integer",
"'f'",
":",
"'DOUBLE'",
",",
"# floating-point",
"'c'",
":",
"'STRING'",
",",
"# complex floating-point",
"'M'",
":",
"'TIMESTAMP'",
",",
"# datetime",
"'O'",
":",
"'STRING'",
",",
"# object",
"'S'",
":",
"'STRING'",
",",
"# (byte-)string",
"'U'",
":",
"'STRING'",
",",
"# Unicode",
"'V'",
":",
"'STRING'",
"# void",
"}",
"d",
"=",
"OrderedDict",
"(",
")",
"for",
"col",
",",
"dtype",
"in",
"df",
".",
"dtypes",
".",
"iteritems",
"(",
")",
":",
"d",
"[",
"col",
"]",
"=",
"DTYPE_KIND_HIVE_TYPE",
"[",
"dtype",
".",
"kind",
"]",
"return",
"d",
"if",
"pandas_kwargs",
"is",
"None",
":",
"pandas_kwargs",
"=",
"{",
"}",
"with",
"TemporaryDirectory",
"(",
"prefix",
"=",
"'airflow_hiveop_'",
")",
"as",
"tmp_dir",
":",
"with",
"NamedTemporaryFile",
"(",
"dir",
"=",
"tmp_dir",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"f",
":",
"if",
"field_dict",
"is",
"None",
":",
"field_dict",
"=",
"_infer_field_types_from_df",
"(",
"df",
")",
"df",
".",
"to_csv",
"(",
"path_or_buf",
"=",
"f",
",",
"sep",
"=",
"delimiter",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
",",
"encoding",
"=",
"encoding",
",",
"date_format",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
",",
"*",
"*",
"pandas_kwargs",
")",
"f",
".",
"flush",
"(",
")",
"return",
"self",
".",
"load_file",
"(",
"filepath",
"=",
"f",
".",
"name",
",",
"table",
"=",
"table",
",",
"delimiter",
"=",
"delimiter",
",",
"field_dict",
"=",
"field_dict",
",",
"*",
"*",
"kwargs",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveCliHook.load_file
|
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
|
airflow/hooks/hive_hooks.py
|
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
|
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
|
[
"Loads",
"a",
"local",
"file",
"into",
"Hive"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L374-L457
|
[
"def",
"load_file",
"(",
"self",
",",
"filepath",
",",
"table",
",",
"delimiter",
"=",
"\",\"",
",",
"field_dict",
"=",
"None",
",",
"create",
"=",
"True",
",",
"overwrite",
"=",
"True",
",",
"partition",
"=",
"None",
",",
"recreate",
"=",
"False",
",",
"tblproperties",
"=",
"None",
")",
":",
"hql",
"=",
"''",
"if",
"recreate",
":",
"hql",
"+=",
"\"DROP TABLE IF EXISTS {table};\\n\"",
".",
"format",
"(",
"table",
"=",
"table",
")",
"if",
"create",
"or",
"recreate",
":",
"if",
"field_dict",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must provide a field dict when creating a table\"",
")",
"fields",
"=",
"\",\\n \"",
".",
"join",
"(",
"[",
"k",
"+",
"' '",
"+",
"v",
"for",
"k",
",",
"v",
"in",
"field_dict",
".",
"items",
"(",
")",
"]",
")",
"hql",
"+=",
"\"CREATE TABLE IF NOT EXISTS {table} (\\n{fields})\\n\"",
".",
"format",
"(",
"table",
"=",
"table",
",",
"fields",
"=",
"fields",
")",
"if",
"partition",
":",
"pfields",
"=",
"\",\\n \"",
".",
"join",
"(",
"[",
"p",
"+",
"\" STRING\"",
"for",
"p",
"in",
"partition",
"]",
")",
"hql",
"+=",
"\"PARTITIONED BY ({pfields})\\n\"",
".",
"format",
"(",
"pfields",
"=",
"pfields",
")",
"hql",
"+=",
"\"ROW FORMAT DELIMITED\\n\"",
"hql",
"+=",
"\"FIELDS TERMINATED BY '{delimiter}'\\n\"",
".",
"format",
"(",
"delimiter",
"=",
"delimiter",
")",
"hql",
"+=",
"\"STORED AS textfile\\n\"",
"if",
"tblproperties",
"is",
"not",
"None",
":",
"tprops",
"=",
"\", \"",
".",
"join",
"(",
"[",
"\"'{0}'='{1}'\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"tblproperties",
".",
"items",
"(",
")",
"]",
")",
"hql",
"+=",
"\"TBLPROPERTIES({tprops})\\n\"",
".",
"format",
"(",
"tprops",
"=",
"tprops",
")",
"hql",
"+=",
"\";\"",
"self",
".",
"log",
".",
"info",
"(",
"hql",
")",
"self",
".",
"run_cli",
"(",
"hql",
")",
"hql",
"=",
"\"LOAD DATA LOCAL INPATH '{filepath}' \"",
".",
"format",
"(",
"filepath",
"=",
"filepath",
")",
"if",
"overwrite",
":",
"hql",
"+=",
"\"OVERWRITE \"",
"hql",
"+=",
"\"INTO TABLE {table} \"",
".",
"format",
"(",
"table",
"=",
"table",
")",
"if",
"partition",
":",
"pvals",
"=",
"\", \"",
".",
"join",
"(",
"[",
"\"{0}='{1}'\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"partition",
".",
"items",
"(",
")",
"]",
")",
"hql",
"+=",
"\"PARTITION ({pvals})\"",
".",
"format",
"(",
"pvals",
"=",
"pvals",
")",
"# As a workaround for HIVE-10541, add a newline character",
"# at the end of hql (AIRFLOW-2412).",
"hql",
"+=",
"';\\n'",
"self",
".",
"log",
".",
"info",
"(",
"hql",
")",
"self",
".",
"run_cli",
"(",
"hql",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.get_metastore_client
|
Returns a Hive thrift client.
|
airflow/hooks/hive_hooks.py
|
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
|
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
|
[
"Returns",
"a",
"Hive",
"thrift",
"client",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L489-L524
|
[
"def",
"get_metastore_client",
"(",
"self",
")",
":",
"import",
"hmsclient",
"from",
"thrift",
".",
"transport",
"import",
"TSocket",
",",
"TTransport",
"from",
"thrift",
".",
"protocol",
"import",
"TBinaryProtocol",
"ms",
"=",
"self",
".",
"metastore_conn",
"auth_mechanism",
"=",
"ms",
".",
"extra_dejson",
".",
"get",
"(",
"'authMechanism'",
",",
"'NOSASL'",
")",
"if",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'security'",
")",
"==",
"'kerberos'",
":",
"auth_mechanism",
"=",
"ms",
".",
"extra_dejson",
".",
"get",
"(",
"'authMechanism'",
",",
"'GSSAPI'",
")",
"kerberos_service_name",
"=",
"ms",
".",
"extra_dejson",
".",
"get",
"(",
"'kerberos_service_name'",
",",
"'hive'",
")",
"socket",
"=",
"TSocket",
".",
"TSocket",
"(",
"ms",
".",
"host",
",",
"ms",
".",
"port",
")",
"if",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'security'",
")",
"==",
"'kerberos'",
"and",
"auth_mechanism",
"==",
"'GSSAPI'",
":",
"try",
":",
"import",
"saslwrapper",
"as",
"sasl",
"except",
"ImportError",
":",
"import",
"sasl",
"def",
"sasl_factory",
"(",
")",
":",
"sasl_client",
"=",
"sasl",
".",
"Client",
"(",
")",
"sasl_client",
".",
"setAttr",
"(",
"\"host\"",
",",
"ms",
".",
"host",
")",
"sasl_client",
".",
"setAttr",
"(",
"\"service\"",
",",
"kerberos_service_name",
")",
"sasl_client",
".",
"init",
"(",
")",
"return",
"sasl_client",
"from",
"thrift_sasl",
"import",
"TSaslClientTransport",
"transport",
"=",
"TSaslClientTransport",
"(",
"sasl_factory",
",",
"\"GSSAPI\"",
",",
"socket",
")",
"else",
":",
"transport",
"=",
"TTransport",
".",
"TBufferedTransport",
"(",
"socket",
")",
"protocol",
"=",
"TBinaryProtocol",
".",
"TBinaryProtocol",
"(",
"transport",
")",
"return",
"hmsclient",
".",
"HMSClient",
"(",
"iprot",
"=",
"protocol",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.check_for_partition
|
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
|
airflow/hooks/hive_hooks.py
|
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
|
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
|
[
"Checks",
"whether",
"a",
"partition",
"exists"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L529-L554
|
[
"def",
"check_for_partition",
"(",
"self",
",",
"schema",
",",
"table",
",",
"partition",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"partitions",
"=",
"client",
".",
"get_partitions_by_filter",
"(",
"schema",
",",
"table",
",",
"partition",
",",
"1",
")",
"if",
"partitions",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.check_for_named_partition
|
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
|
airflow/hooks/hive_hooks.py
|
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
|
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
|
[
"Checks",
"whether",
"a",
"partition",
"with",
"a",
"given",
"name",
"exists"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L556-L576
|
[
"def",
"check_for_named_partition",
"(",
"self",
",",
"schema",
",",
"table",
",",
"partition_name",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"return",
"client",
".",
"check_for_named_partition",
"(",
"schema",
",",
"table",
",",
"partition_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.get_table
|
Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
|
airflow/hooks/hive_hooks.py
|
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
|
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
|
[
"Get",
"a",
"metastore",
"table",
"object"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L578-L591
|
[
"def",
"get_table",
"(",
"self",
",",
"table_name",
",",
"db",
"=",
"'default'",
")",
":",
"if",
"db",
"==",
"'default'",
"and",
"'.'",
"in",
"table_name",
":",
"db",
",",
"table_name",
"=",
"table_name",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"2",
"]",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"return",
"client",
".",
"get_table",
"(",
"dbname",
"=",
"db",
",",
"tbl_name",
"=",
"table_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.get_tables
|
Get a metastore table object
|
airflow/hooks/hive_hooks.py
|
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
|
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
|
[
"Get",
"a",
"metastore",
"table",
"object"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L593-L599
|
[
"def",
"get_tables",
"(",
"self",
",",
"db",
",",
"pattern",
"=",
"'*'",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"tables",
"=",
"client",
".",
"get_tables",
"(",
"db_name",
"=",
"db",
",",
"pattern",
"=",
"pattern",
")",
"return",
"client",
".",
"get_table_objects_by_name",
"(",
"db",
",",
"tables",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.get_partitions
|
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
|
airflow/hooks/hive_hooks.py
|
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
|
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
|
[
"Returns",
"a",
"list",
"of",
"all",
"partitions",
"in",
"a",
"table",
".",
"Works",
"only",
"for",
"tables",
"with",
"less",
"than",
"32767",
"(",
"java",
"short",
"max",
"val",
")",
".",
"For",
"subpartitioned",
"table",
"the",
"number",
"might",
"easily",
"exceed",
"this",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L608-L638
|
[
"def",
"get_partitions",
"(",
"self",
",",
"schema",
",",
"table_name",
",",
"filter",
"=",
"None",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"table",
"=",
"client",
".",
"get_table",
"(",
"dbname",
"=",
"schema",
",",
"tbl_name",
"=",
"table_name",
")",
"if",
"len",
"(",
"table",
".",
"partitionKeys",
")",
"==",
"0",
":",
"raise",
"AirflowException",
"(",
"\"The table isn't partitioned\"",
")",
"else",
":",
"if",
"filter",
":",
"parts",
"=",
"client",
".",
"get_partitions_by_filter",
"(",
"db_name",
"=",
"schema",
",",
"tbl_name",
"=",
"table_name",
",",
"filter",
"=",
"filter",
",",
"max_parts",
"=",
"HiveMetastoreHook",
".",
"MAX_PART_COUNT",
")",
"else",
":",
"parts",
"=",
"client",
".",
"get_partitions",
"(",
"db_name",
"=",
"schema",
",",
"tbl_name",
"=",
"table_name",
",",
"max_parts",
"=",
"HiveMetastoreHook",
".",
"MAX_PART_COUNT",
")",
"pnames",
"=",
"[",
"p",
".",
"name",
"for",
"p",
"in",
"table",
".",
"partitionKeys",
"]",
"return",
"[",
"dict",
"(",
"zip",
"(",
"pnames",
",",
"p",
".",
"values",
")",
")",
"for",
"p",
"in",
"parts",
"]"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook._get_max_partition_from_part_specs
|
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
|
airflow/hooks/hive_hooks.py
|
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
|
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
|
[
"Helper",
"method",
"to",
"get",
"max",
"partition",
"of",
"partitions",
"with",
"partition_key",
"from",
"part",
"specs",
".",
"key",
":",
"value",
"pair",
"in",
"filter_map",
"will",
"be",
"used",
"to",
"filter",
"out",
"partitions",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L641-L680
|
[
"def",
"_get_max_partition_from_part_specs",
"(",
"part_specs",
",",
"partition_key",
",",
"filter_map",
")",
":",
"if",
"not",
"part_specs",
":",
"return",
"None",
"# Assuming all specs have the same keys.",
"if",
"partition_key",
"not",
"in",
"part_specs",
"[",
"0",
"]",
".",
"keys",
"(",
")",
":",
"raise",
"AirflowException",
"(",
"\"Provided partition_key {} \"",
"\"is not in part_specs.\"",
".",
"format",
"(",
"partition_key",
")",
")",
"if",
"filter_map",
":",
"is_subset",
"=",
"set",
"(",
"filter_map",
".",
"keys",
"(",
")",
")",
".",
"issubset",
"(",
"set",
"(",
"part_specs",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
")",
"if",
"filter_map",
"and",
"not",
"is_subset",
":",
"raise",
"AirflowException",
"(",
"\"Keys in provided filter_map {} \"",
"\"are not subset of part_spec keys: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"filter_map",
".",
"keys",
"(",
")",
")",
",",
"', '",
".",
"join",
"(",
"part_specs",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
")",
")",
"candidates",
"=",
"[",
"p_dict",
"[",
"partition_key",
"]",
"for",
"p_dict",
"in",
"part_specs",
"if",
"filter_map",
"is",
"None",
"or",
"all",
"(",
"item",
"in",
"p_dict",
".",
"items",
"(",
")",
"for",
"item",
"in",
"filter_map",
".",
"items",
"(",
")",
")",
"]",
"if",
"not",
"candidates",
":",
"return",
"None",
"else",
":",
"return",
"max",
"(",
"candidates",
")",
".",
"encode",
"(",
"'utf-8'",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.max_partition
|
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
|
airflow/hooks/hive_hooks.py
|
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = set(key.name for key in table.partitionKeys)
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
|
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = set(key.name for key in table.partitionKeys)
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
|
[
"Returns",
"the",
"maximum",
"value",
"for",
"all",
"partitions",
"with",
"given",
"field",
"in",
"a",
"table",
".",
"If",
"only",
"one",
"partition",
"key",
"exist",
"in",
"the",
"table",
"the",
"key",
"will",
"be",
"used",
"as",
"field",
".",
"filter_map",
"should",
"be",
"a",
"partition_key",
":",
"partition_value",
"map",
"and",
"will",
"be",
"used",
"to",
"filter",
"out",
"partitions",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L682-L729
|
[
"def",
"max_partition",
"(",
"self",
",",
"schema",
",",
"table_name",
",",
"field",
"=",
"None",
",",
"filter_map",
"=",
"None",
")",
":",
"with",
"self",
".",
"metastore",
"as",
"client",
":",
"table",
"=",
"client",
".",
"get_table",
"(",
"dbname",
"=",
"schema",
",",
"tbl_name",
"=",
"table_name",
")",
"key_name_set",
"=",
"set",
"(",
"key",
".",
"name",
"for",
"key",
"in",
"table",
".",
"partitionKeys",
")",
"if",
"len",
"(",
"table",
".",
"partitionKeys",
")",
"==",
"1",
":",
"field",
"=",
"table",
".",
"partitionKeys",
"[",
"0",
"]",
".",
"name",
"elif",
"not",
"field",
":",
"raise",
"AirflowException",
"(",
"\"Please specify the field you want the max \"",
"\"value for.\"",
")",
"elif",
"field",
"not",
"in",
"key_name_set",
":",
"raise",
"AirflowException",
"(",
"\"Provided field is not a partition key.\"",
")",
"if",
"filter_map",
"and",
"not",
"set",
"(",
"filter_map",
".",
"keys",
"(",
")",
")",
".",
"issubset",
"(",
"key_name_set",
")",
":",
"raise",
"AirflowException",
"(",
"\"Provided filter_map contains keys \"",
"\"that are not partition key.\"",
")",
"part_names",
"=",
"client",
".",
"get_partition_names",
"(",
"schema",
",",
"table_name",
",",
"max_parts",
"=",
"HiveMetastoreHook",
".",
"MAX_PART_COUNT",
")",
"part_specs",
"=",
"[",
"client",
".",
"partition_name_to_spec",
"(",
"part_name",
")",
"for",
"part_name",
"in",
"part_names",
"]",
"return",
"HiveMetastoreHook",
".",
"_get_max_partition_from_part_specs",
"(",
"part_specs",
",",
"field",
",",
"filter_map",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveMetastoreHook.table_exists
|
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
|
airflow/hooks/hive_hooks.py
|
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
|
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
|
[
"Check",
"if",
"table",
"exists"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L731-L745
|
[
"def",
"table_exists",
"(",
"self",
",",
"table_name",
",",
"db",
"=",
"'default'",
")",
":",
"try",
":",
"self",
".",
"get_table",
"(",
"table_name",
",",
"db",
")",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveServer2Hook.get_conn
|
Returns a Hive connection object.
|
airflow/hooks/hive_hooks.py
|
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
|
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
|
[
"Returns",
"a",
"Hive",
"connection",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L758-L789
|
[
"def",
"get_conn",
"(",
"self",
",",
"schema",
"=",
"None",
")",
":",
"db",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"hiveserver2_conn_id",
")",
"auth_mechanism",
"=",
"db",
".",
"extra_dejson",
".",
"get",
"(",
"'authMechanism'",
",",
"'NONE'",
")",
"if",
"auth_mechanism",
"==",
"'NONE'",
"and",
"db",
".",
"login",
"is",
"None",
":",
"# we need to give a username",
"username",
"=",
"'airflow'",
"kerberos_service_name",
"=",
"None",
"if",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'security'",
")",
"==",
"'kerberos'",
":",
"auth_mechanism",
"=",
"db",
".",
"extra_dejson",
".",
"get",
"(",
"'authMechanism'",
",",
"'KERBEROS'",
")",
"kerberos_service_name",
"=",
"db",
".",
"extra_dejson",
".",
"get",
"(",
"'kerberos_service_name'",
",",
"'hive'",
")",
"# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier",
"if",
"auth_mechanism",
"==",
"'GSSAPI'",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Detected deprecated 'GSSAPI' for authMechanism \"",
"\"for %s. Please use 'KERBEROS' instead\"",
",",
"self",
".",
"hiveserver2_conn_id",
")",
"auth_mechanism",
"=",
"'KERBEROS'",
"from",
"pyhive",
".",
"hive",
"import",
"connect",
"return",
"connect",
"(",
"host",
"=",
"db",
".",
"host",
",",
"port",
"=",
"db",
".",
"port",
",",
"auth",
"=",
"auth_mechanism",
",",
"kerberos_service_name",
"=",
"kerberos_service_name",
",",
"username",
"=",
"db",
".",
"login",
"or",
"username",
",",
"password",
"=",
"db",
".",
"password",
",",
"database",
"=",
"schema",
"or",
"db",
".",
"schema",
"or",
"'default'",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveServer2Hook.get_results
|
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
|
airflow/hooks/hive_hooks.py
|
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
|
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
|
[
"Get",
"results",
"of",
"the",
"provided",
"hql",
"in",
"target",
"schema",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L834-L856
|
[
"def",
"get_results",
"(",
"self",
",",
"hql",
",",
"schema",
"=",
"'default'",
",",
"fetch_size",
"=",
"None",
",",
"hive_conf",
"=",
"None",
")",
":",
"results_iter",
"=",
"self",
".",
"_get_results",
"(",
"hql",
",",
"schema",
",",
"fetch_size",
"=",
"fetch_size",
",",
"hive_conf",
"=",
"hive_conf",
")",
"header",
"=",
"next",
"(",
"results_iter",
")",
"results",
"=",
"{",
"'data'",
":",
"list",
"(",
"results_iter",
")",
",",
"'header'",
":",
"header",
"}",
"return",
"results"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveServer2Hook.to_csv
|
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
|
airflow/hooks/hive_hooks.py
|
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
|
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
|
[
"Execute",
"hql",
"in",
"target",
"schema",
"and",
"write",
"results",
"to",
"a",
"csv",
"file",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L858-L918
|
[
"def",
"to_csv",
"(",
"self",
",",
"hql",
",",
"csv_filepath",
",",
"schema",
"=",
"'default'",
",",
"delimiter",
"=",
"','",
",",
"lineterminator",
"=",
"'\\r\\n'",
",",
"output_header",
"=",
"True",
",",
"fetch_size",
"=",
"1000",
",",
"hive_conf",
"=",
"None",
")",
":",
"results_iter",
"=",
"self",
".",
"_get_results",
"(",
"hql",
",",
"schema",
",",
"fetch_size",
"=",
"fetch_size",
",",
"hive_conf",
"=",
"hive_conf",
")",
"header",
"=",
"next",
"(",
"results_iter",
")",
"message",
"=",
"None",
"i",
"=",
"0",
"with",
"open",
"(",
"csv_filepath",
",",
"'wb'",
")",
"as",
"f",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"delimiter",
"=",
"delimiter",
",",
"lineterminator",
"=",
"lineterminator",
",",
"encoding",
"=",
"'utf-8'",
")",
"try",
":",
"if",
"output_header",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Cursor description is %s'",
",",
"header",
")",
"writer",
".",
"writerow",
"(",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"header",
"]",
")",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"results_iter",
",",
"1",
")",
":",
"writer",
".",
"writerow",
"(",
"row",
")",
"if",
"i",
"%",
"fetch_size",
"==",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Written %s rows so far.\"",
",",
"i",
")",
"except",
"ValueError",
"as",
"exception",
":",
"message",
"=",
"str",
"(",
"exception",
")",
"if",
"message",
":",
"# need to clean up the file first",
"os",
".",
"remove",
"(",
"csv_filepath",
")",
"raise",
"ValueError",
"(",
"message",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Done. Loaded a total of %s rows.\"",
",",
"i",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveServer2Hook.get_records
|
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
|
airflow/hooks/hive_hooks.py
|
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
|
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
|
[
"Get",
"a",
"set",
"of",
"records",
"from",
"a",
"Hive",
"query",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L920-L938
|
[
"def",
"get_records",
"(",
"self",
",",
"hql",
",",
"schema",
"=",
"'default'",
",",
"hive_conf",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_results",
"(",
"hql",
",",
"schema",
"=",
"schema",
",",
"hive_conf",
"=",
"hive_conf",
")",
"[",
"'data'",
"]"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
HiveServer2Hook.get_pandas_df
|
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
|
airflow/hooks/hive_hooks.py
|
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
|
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
|
[
"Get",
"a",
"pandas",
"dataframe",
"from",
"a",
"Hive",
"query"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L940-L963
|
[
"def",
"get_pandas_df",
"(",
"self",
",",
"hql",
",",
"schema",
"=",
"'default'",
")",
":",
"import",
"pandas",
"as",
"pd",
"res",
"=",
"self",
".",
"get_results",
"(",
"hql",
",",
"schema",
"=",
"schema",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"res",
"[",
"'data'",
"]",
")",
"df",
".",
"columns",
"=",
"[",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"res",
"[",
"'header'",
"]",
"]",
"return",
"df"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.get_conn
|
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client
|
def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client
|
[
"Retrieves",
"connection",
"to",
"Cloud",
"Vision",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L106-L115
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_client",
":",
"self",
".",
"_client",
"=",
"ProductSearchClient",
"(",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_client"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.create_product_set
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def create_product_set(
self,
location,
product_set,
project_id=None,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new ProductSet under the parent: %s', parent)
response = client.create_product_set(
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet created: %s', response.name if response else '')
self.log.debug('ProductSet created:\n%s', response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated ProductSet ID from the response: %s', product_set_id)
return product_set_id
|
def create_product_set(
self,
location,
product_set,
project_id=None,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new ProductSet under the parent: %s', parent)
response = client.create_product_set(
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ProductSet created: %s', response.name if response else '')
self.log.debug('ProductSet created:\n%s', response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated ProductSet ID from the response: %s', product_set_id)
return product_set_id
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductSetCreateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L128-L161
|
[
"def",
"create_product_set",
"(",
"self",
",",
"location",
",",
"product_set",
",",
"project_id",
"=",
"None",
",",
"product_set_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"parent",
"=",
"ProductSearchClient",
".",
"location_path",
"(",
"project_id",
",",
"location",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Creating a new ProductSet under the parent: %s'",
",",
"parent",
")",
"response",
"=",
"client",
".",
"create_product_set",
"(",
"parent",
"=",
"parent",
",",
"product_set",
"=",
"product_set",
",",
"product_set_id",
"=",
"product_set_id",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ProductSet created: %s'",
",",
"response",
".",
"name",
"if",
"response",
"else",
"''",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'ProductSet created:\\n%s'",
",",
"response",
")",
"if",
"not",
"product_set_id",
":",
"# Product set id was generated by the API",
"product_set_id",
"=",
"self",
".",
"_get_autogenerated_id",
"(",
"response",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Extracted autogenerated ProductSet ID from the response: %s'",
",",
"product_set_id",
")",
"return",
"product_set_id"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.get_product_set
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetGetOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def get_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Retrieving ProductSet: %s', name)
response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet retrieved.')
self.log.debug('ProductSet retrieved:\n%s', response)
return MessageToDict(response)
|
def get_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Retrieving ProductSet: %s', name)
response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet retrieved.')
self.log.debug('ProductSet retrieved:\n%s', response)
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductSetGetOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L165-L178
|
[
"def",
"get_product_set",
"(",
"self",
",",
"location",
",",
"product_set_id",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"name",
"=",
"ProductSearchClient",
".",
"product_set_path",
"(",
"project_id",
",",
"location",
",",
"product_set_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Retrieving ProductSet: %s'",
",",
"name",
")",
"response",
"=",
"client",
".",
"get_product_set",
"(",
"name",
"=",
"name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ProductSet retrieved.'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'ProductSet retrieved:\\n%s'",
",",
"response",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.update_product_set
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def update_product_set(
self,
product_set,
location=None,
product_set_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = client.update_product_set(
product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response)
|
def update_product_set(
self,
product_set,
location=None,
product_set_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator`
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
self.log.info('Updating ProductSet: %s', product_set.name)
response = client.update_product_set(
product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('ProductSet updated: %s', response.name if response else '')
self.log.debug('ProductSet updated:\n%s', response)
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductSetUpdateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L182-L207
|
[
"def",
"update_product_set",
"(",
"self",
",",
"product_set",
",",
"location",
"=",
"None",
",",
"product_set_id",
"=",
"None",
",",
"update_mask",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"product_set",
"=",
"self",
".",
"product_set_name_determiner",
".",
"get_entity_with_name",
"(",
"product_set",
",",
"product_set_id",
",",
"location",
",",
"project_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Updating ProductSet: %s'",
",",
"product_set",
".",
"name",
")",
"response",
"=",
"client",
".",
"update_product_set",
"(",
"product_set",
"=",
"product_set",
",",
"update_mask",
"=",
"update_mask",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ProductSet updated: %s'",
",",
"response",
".",
"name",
"if",
"response",
"else",
"''",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'ProductSet updated:\\n%s'",
",",
"response",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.delete_product_set
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def delete_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet with the name [%s] deleted.', name)
|
def delete_product_set(
self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ProductSet with the name [%s] deleted.', name)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductSetDeleteOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L211-L222
|
[
"def",
"delete_product_set",
"(",
"self",
",",
"location",
",",
"product_set_id",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"name",
"=",
"ProductSearchClient",
".",
"product_set_path",
"(",
"project_id",
",",
"location",
",",
"product_set_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Deleting ProductSet: %s'",
",",
"name",
")",
"client",
".",
"delete_product_set",
"(",
"name",
"=",
"name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ProductSet with the name [%s] deleted.'",
",",
"name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.create_product
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductCreateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def create_product(
self, location, product, project_id=None, product_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new Product under the parent: %s', parent)
response = client.create_product(
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product created: %s', response.name if response else '')
self.log.debug('Product created:\n%s', response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated Product ID from the response: %s', product_id)
return product_id
|
def create_product(
self, location, product, project_id=None, product_id=None, retry=None, timeout=None, metadata=None
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductCreateOperator`
"""
client = self.get_conn()
parent = ProductSearchClient.location_path(project_id, location)
self.log.info('Creating a new Product under the parent: %s', parent)
response = client.create_product(
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('Product created: %s', response.name if response else '')
self.log.debug('Product created:\n%s', response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info('Extracted autogenerated Product ID from the response: %s', product_id)
return product_id
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductCreateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L226-L252
|
[
"def",
"create_product",
"(",
"self",
",",
"location",
",",
"product",
",",
"project_id",
"=",
"None",
",",
"product_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"parent",
"=",
"ProductSearchClient",
".",
"location_path",
"(",
"project_id",
",",
"location",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Creating a new Product under the parent: %s'",
",",
"parent",
")",
"response",
"=",
"client",
".",
"create_product",
"(",
"parent",
"=",
"parent",
",",
"product",
"=",
"product",
",",
"product_id",
"=",
"product_id",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Product created: %s'",
",",
"response",
".",
"name",
"if",
"response",
"else",
"''",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Product created:\\n%s'",
",",
"response",
")",
"if",
"not",
"product_id",
":",
"# Product id was generated by the API",
"product_id",
"=",
"self",
".",
"_get_autogenerated_id",
"(",
"response",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Extracted autogenerated Product ID from the response: %s'",
",",
"product_id",
")",
"return",
"product_id"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.get_product
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def get_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Retrieving Product: %s', name)
response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product retrieved.')
self.log.debug('Product retrieved:\n%s', response)
return MessageToDict(response)
|
def get_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Retrieving Product: %s', name)
response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product retrieved.')
self.log.debug('Product retrieved:\n%s', response)
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductGetOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L256-L267
|
[
"def",
"get_product",
"(",
"self",
",",
"location",
",",
"product_id",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"name",
"=",
"ProductSearchClient",
".",
"product_path",
"(",
"project_id",
",",
"location",
",",
"product_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Retrieving Product: %s'",
",",
"name",
")",
"response",
"=",
"client",
".",
"get_product",
"(",
"name",
"=",
"name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Product retrieved.'",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Product retrieved:\\n%s'",
",",
"response",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.update_product
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response)
|
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info('Updating ProductSet: %s', product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product updated: %s', response.name if response else '')
self.log.debug('Product updated:\n%s', response)
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductUpdateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L271-L294
|
[
"def",
"update_product",
"(",
"self",
",",
"product",
",",
"location",
"=",
"None",
",",
"product_id",
"=",
"None",
",",
"update_mask",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"product",
"=",
"self",
".",
"product_name_determiner",
".",
"get_entity_with_name",
"(",
"product",
",",
"product_id",
",",
"location",
",",
"project_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Updating ProductSet: %s'",
",",
"product",
".",
"name",
")",
"response",
"=",
"client",
".",
"update_product",
"(",
"product",
"=",
"product",
",",
"update_mask",
"=",
"update_mask",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Product updated: %s'",
",",
"response",
".",
"name",
"if",
"response",
"else",
"''",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Product updated:\\n%s'",
",",
"response",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.delete_product
|
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductDeleteOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def delete_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product with the name [%s] deleted:', name)
|
def delete_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):
"""
For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductDeleteOperator`
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info('Deleting ProductSet: %s', name)
client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('Product with the name [%s] deleted:', name)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionProductDeleteOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L298-L307
|
[
"def",
"delete_product",
"(",
"self",
",",
"location",
",",
"product_id",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"name",
"=",
"ProductSearchClient",
".",
"product_path",
"(",
"project_id",
",",
"location",
",",
"product_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Deleting ProductSet: %s'",
",",
"name",
")",
"client",
".",
"delete_product",
"(",
"name",
"=",
"name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Product with the name [%s] deleted:'",
",",
"name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.create_reference_image
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def create_reference_image(
self,
location,
product_id,
reference_image,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
"""
client = self.get_conn()
self.log.info('Creating ReferenceImage')
parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)
response = client.create_reference_image(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ReferenceImage created: %s', response.name if response else '')
self.log.debug('ReferenceImage created:\n%s', response)
if not reference_image_id:
# Refernece image id was generated by the API
reference_image_id = self._get_autogenerated_id(response)
self.log.info(
'Extracted autogenerated ReferenceImage ID from the response: %s', reference_image_id
)
return reference_image_id
|
def create_reference_image(
self,
location,
product_id,
reference_image,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
"""
client = self.get_conn()
self.log.info('Creating ReferenceImage')
parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)
response = client.create_reference_image(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info('ReferenceImage created: %s', response.name if response else '')
self.log.debug('ReferenceImage created:\n%s', response)
if not reference_image_id:
# Refernece image id was generated by the API
reference_image_id = self._get_autogenerated_id(response)
self.log.info(
'Extracted autogenerated ReferenceImage ID from the response: %s', reference_image_id
)
return reference_image_id
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"py",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionReferenceImageCreateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L311-L349
|
[
"def",
"create_reference_image",
"(",
"self",
",",
"location",
",",
"product_id",
",",
"reference_image",
",",
"reference_image_id",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Creating ReferenceImage'",
")",
"parent",
"=",
"ProductSearchClient",
".",
"product_path",
"(",
"project",
"=",
"project_id",
",",
"location",
"=",
"location",
",",
"product",
"=",
"product_id",
")",
"response",
"=",
"client",
".",
"create_reference_image",
"(",
"parent",
"=",
"parent",
",",
"reference_image",
"=",
"reference_image",
",",
"reference_image_id",
"=",
"reference_image_id",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ReferenceImage created: %s'",
",",
"response",
".",
"name",
"if",
"response",
"else",
"''",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'ReferenceImage created:\\n%s'",
",",
"response",
")",
"if",
"not",
"reference_image_id",
":",
"# Refernece image id was generated by the API",
"reference_image_id",
"=",
"self",
".",
"_get_autogenerated_id",
"(",
"response",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Extracted autogenerated ReferenceImage ID from the response: %s'",
",",
"reference_image_id",
")",
"return",
"reference_image_id"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.delete_reference_image
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def delete_reference_image(
self,
location,
product_id,
reference_image_id,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
"""
client = self.get_conn()
self.log.info('Deleting ReferenceImage')
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
response = client.delete_reference_image(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ReferenceImage with the name [%s] deleted.', name)
return MessageToDict(response)
|
def delete_reference_image(
self,
location,
product_id,
reference_image_id,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
"""
client = self.get_conn()
self.log.info('Deleting ReferenceImage')
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
response = client.delete_reference_image(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ReferenceImage with the name [%s] deleted.', name)
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"py",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionReferenceImageCreateOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L353-L375
|
[
"def",
"delete_reference_image",
"(",
"self",
",",
"location",
",",
"product_id",
",",
"reference_image_id",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Deleting ReferenceImage'",
")",
"name",
"=",
"ProductSearchClient",
".",
"reference_image_path",
"(",
"project",
"=",
"project_id",
",",
"location",
"=",
"location",
",",
"product",
"=",
"product_id",
",",
"reference_image",
"=",
"reference_image_id",
")",
"response",
"=",
"client",
".",
"delete_reference_image",
"(",
"name",
"=",
"name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'ReferenceImage with the name [%s] deleted.'",
",",
"name",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.add_product_to_product_set
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionAddProductToProductSetOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def add_product_to_product_set(
self,
product_set_id,
product_id,
location=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionAddProductToProductSetOperator`
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Add Product[name=%s] to Product Set[name=%s]', product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product added to Product Set')
|
def add_product_to_product_set(
self,
product_set_id,
product_id,
location=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionAddProductToProductSetOperator`
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Add Product[name=%s] to Product Set[name=%s]', product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product added to Product Set')
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"py",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionAddProductToProductSetOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L379-L404
|
[
"def",
"add_product_to_product_set",
"(",
"self",
",",
"product_set_id",
",",
"product_id",
",",
"location",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
")",
":",
"client",
"=",
"self",
".",
"get_conn",
"(",
")",
"product_name",
"=",
"ProductSearchClient",
".",
"product_path",
"(",
"project_id",
",",
"location",
",",
"product_id",
")",
"product_set_name",
"=",
"ProductSearchClient",
".",
"product_set_path",
"(",
"project_id",
",",
"location",
",",
"product_set_id",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Add Product[name=%s] to Product Set[name=%s]'",
",",
"product_name",
",",
"product_set_name",
")",
"client",
".",
"add_product_to_product_set",
"(",
"name",
"=",
"product_set_name",
",",
"product",
"=",
"product_name",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Product added to Product Set'",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.annotate_image
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_image_annotator_operator.CloudVisionAnnotateImage`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def annotate_image(self, request, retry=None, timeout=None):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_image_annotator_operator.CloudVisionAnnotateImage`
"""
client = self.annotator_client
self.log.info('Annotating image')
response = client.annotate_image(request=request, retry=retry, timeout=timeout)
self.log.info('Image annotated')
return MessageToDict(response)
|
def annotate_image(self, request, retry=None, timeout=None):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_image_annotator_operator.CloudVisionAnnotateImage`
"""
client = self.annotator_client
self.log.info('Annotating image')
response = client.annotate_image(request=request, retry=retry, timeout=timeout)
self.log.info('Image annotated')
return MessageToDict(response)
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"py",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_image_annotator_operator",
".",
"CloudVisionAnnotateImage"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L436-L449
|
[
"def",
"annotate_image",
"(",
"self",
",",
"request",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"annotator_client",
"self",
".",
"log",
".",
"info",
"(",
"'Annotating image'",
")",
"response",
"=",
"client",
".",
"annotate_image",
"(",
"request",
"=",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Image annotated'",
")",
"return",
"MessageToDict",
"(",
"response",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudVisionHook.safe_search_detection
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator`
|
airflow/contrib/hooks/gcp_vision_hook.py
|
def safe_search_detection(
self, image, max_results=None, retry=None, timeout=None, additional_properties=None
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator`
"""
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
|
def safe_search_detection(
self, image, max_results=None, retry=None, timeout=None, additional_properties=None
):
"""
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator`
"""
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
|
[
"For",
"the",
"documentation",
"see",
":",
":",
"py",
":",
"class",
":",
"~airflow",
".",
"contrib",
".",
"operators",
".",
"gcp_vision_operator",
".",
"CloudVisionDetectImageSafeSearchOperator"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L527-L548
|
[
"def",
"safe_search_detection",
"(",
"self",
",",
"image",
",",
"max_results",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"additional_properties",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"annotator_client",
"self",
".",
"log",
".",
"info",
"(",
"\"Detecting safe search\"",
")",
"if",
"additional_properties",
"is",
"None",
":",
"additional_properties",
"=",
"{",
"}",
"response",
"=",
"client",
".",
"safe_search_detection",
"(",
"image",
"=",
"image",
",",
"max_results",
"=",
"max_results",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"additional_properties",
")",
"response",
"=",
"MessageToDict",
"(",
"response",
")",
"self",
".",
"_check_for_error",
"(",
"response",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Safe search detection finished\"",
")",
"return",
"response"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DingdingHook._get_endpoint
|
Get Dingding endpoint for sending message.
|
airflow/contrib/hooks/dingding_hook.py
|
def _get_endpoint(self):
"""
Get Dingding endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
'check you conn_id configuration.')
return 'robot/send?access_token={}'.format(token)
|
def _get_endpoint(self):
"""
Get Dingding endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
'check you conn_id configuration.')
return 'robot/send?access_token={}'.format(token)
|
[
"Get",
"Dingding",
"endpoint",
"for",
"sending",
"message",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/dingding_hook.py#L65-L74
|
[
"def",
"_get_endpoint",
"(",
"self",
")",
":",
"conn",
"=",
"self",
".",
"get_connection",
"(",
"self",
".",
"http_conn_id",
")",
"token",
"=",
"conn",
".",
"password",
"if",
"not",
"token",
":",
"raise",
"AirflowException",
"(",
"'Dingding token is requests but get nothing, '",
"'check you conn_id configuration.'",
")",
"return",
"'robot/send?access_token={}'",
".",
"format",
"(",
"token",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DingdingHook._build_message
|
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
|
airflow/contrib/hooks/dingding_hook.py
|
def _build_message(self):
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
} if self.message_type == 'text' else self.message,
'at': {
'atMobiles': self.at_mobiles,
'isAtAll': self.at_all
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data)
|
def _build_message(self):
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
} if self.message_type == 'text' else self.message,
'at': {
'atMobiles': self.at_mobiles,
'isAtAll': self.at_all
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data)
|
[
"Build",
"different",
"type",
"of",
"Dingding",
"message",
"As",
"most",
"commonly",
"used",
"type",
"text",
"message",
"just",
"need",
"post",
"message",
"content",
"rather",
"than",
"a",
"dict",
"like",
"{",
"content",
":",
"message",
"}"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/dingding_hook.py#L76-L98
|
[
"def",
"_build_message",
"(",
"self",
")",
":",
"if",
"self",
".",
"message_type",
"in",
"[",
"'text'",
",",
"'markdown'",
"]",
":",
"data",
"=",
"{",
"'msgtype'",
":",
"self",
".",
"message_type",
",",
"self",
".",
"message_type",
":",
"{",
"'content'",
":",
"self",
".",
"message",
"}",
"if",
"self",
".",
"message_type",
"==",
"'text'",
"else",
"self",
".",
"message",
",",
"'at'",
":",
"{",
"'atMobiles'",
":",
"self",
".",
"at_mobiles",
",",
"'isAtAll'",
":",
"self",
".",
"at_all",
"}",
"}",
"else",
":",
"data",
"=",
"{",
"'msgtype'",
":",
"self",
".",
"message_type",
",",
"self",
".",
"message_type",
":",
"self",
".",
"message",
"}",
"return",
"json",
".",
"dumps",
"(",
"data",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DingdingHook.send
|
Send Dingding message
|
airflow/contrib/hooks/dingding_hook.py
|
def send(self):
"""
Send Dingding message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data = self._build_message()
self.log.info('Sending Dingding type %s message %s', self.message_type, data)
resp = self.run(endpoint=self._get_endpoint(),
data=data,
headers={'Content-Type': 'application/json'})
# Dingding success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException('Send Dingding message failed, receive error '
'message %s', resp.text)
self.log.info('Success Send Dingding message')
|
def send(self):
"""
Send Dingding message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data = self._build_message()
self.log.info('Sending Dingding type %s message %s', self.message_type, data)
resp = self.run(endpoint=self._get_endpoint(),
data=data,
headers={'Content-Type': 'application/json'})
# Dingding success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException('Send Dingding message failed, receive error '
'message %s', resp.text)
self.log.info('Success Send Dingding message')
|
[
"Send",
"Dingding",
"message"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/dingding_hook.py#L115-L134
|
[
"def",
"send",
"(",
"self",
")",
":",
"support_type",
"=",
"[",
"'text'",
",",
"'link'",
",",
"'markdown'",
",",
"'actionCard'",
",",
"'feedCard'",
"]",
"if",
"self",
".",
"message_type",
"not",
"in",
"support_type",
":",
"raise",
"ValueError",
"(",
"'DingdingWebhookHook only support {} '",
"'so far, but receive {}'",
".",
"format",
"(",
"support_type",
",",
"self",
".",
"message_type",
")",
")",
"data",
"=",
"self",
".",
"_build_message",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Sending Dingding type %s message %s'",
",",
"self",
".",
"message_type",
",",
"data",
")",
"resp",
"=",
"self",
".",
"run",
"(",
"endpoint",
"=",
"self",
".",
"_get_endpoint",
"(",
")",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")",
"# Dingding success send message will with errcode equal to 0",
"if",
"int",
"(",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"'errcode'",
")",
")",
"!=",
"0",
":",
"raise",
"AirflowException",
"(",
"'Send Dingding message failed, receive error '",
"'message %s'",
",",
"resp",
".",
"text",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Success Send Dingding message'",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
ElasticsearchTaskHandler._read
|
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of log documents and metadata.
|
airflow/utils/log/es_task_handler.py
|
def _read(self, ti, try_number, metadata=None):
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of log documents and metadata.
"""
if not metadata:
metadata = {'offset': 0}
if 'offset' not in metadata:
metadata['offset'] = 0
offset = metadata['offset']
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset)
next_offset = offset if not logs else logs[-1].offset
metadata['offset'] = next_offset
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata['end_of_log'] = False if not logs \
else logs[-1].message == self.end_of_log_mark.strip()
cur_ts = pendulum.now()
# Assume end of log after not receiving new log for 5 min,
# as executor heartbeat is 1 min and there might be some
# delay before Elasticsearch makes the log available.
if 'last_log_timestamp' in metadata:
last_log_ts = timezone.parse(metadata['last_log_timestamp'])
if cur_ts.diff(last_log_ts).in_minutes() >= 5:
metadata['end_of_log'] = True
if offset != next_offset or 'last_log_timestamp' not in metadata:
metadata['last_log_timestamp'] = str(cur_ts)
message = '\n'.join([log.message for log in logs])
return message, metadata
|
def _read(self, ti, try_number, metadata=None):
"""
Endpoint for streaming log.
:param ti: task instance object
:param try_number: try_number of the task instance
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
:return: a list of log documents and metadata.
"""
if not metadata:
metadata = {'offset': 0}
if 'offset' not in metadata:
metadata['offset'] = 0
offset = metadata['offset']
log_id = self._render_log_id(ti, try_number)
logs = self.es_read(log_id, offset)
next_offset = offset if not logs else logs[-1].offset
metadata['offset'] = next_offset
# end_of_log_mark may contain characters like '\n' which is needed to
# have the log uploaded but will not be stored in elasticsearch.
metadata['end_of_log'] = False if not logs \
else logs[-1].message == self.end_of_log_mark.strip()
cur_ts = pendulum.now()
# Assume end of log after not receiving new log for 5 min,
# as executor heartbeat is 1 min and there might be some
# delay before Elasticsearch makes the log available.
if 'last_log_timestamp' in metadata:
last_log_ts = timezone.parse(metadata['last_log_timestamp'])
if cur_ts.diff(last_log_ts).in_minutes() >= 5:
metadata['end_of_log'] = True
if offset != next_offset or 'last_log_timestamp' not in metadata:
metadata['last_log_timestamp'] = str(cur_ts)
message = '\n'.join([log.message for log in logs])
return message, metadata
|
[
"Endpoint",
"for",
"streaming",
"log",
".",
":",
"param",
"ti",
":",
"task",
"instance",
"object",
":",
"param",
"try_number",
":",
"try_number",
"of",
"the",
"task",
"instance",
":",
"param",
"metadata",
":",
"log",
"metadata",
"can",
"be",
"used",
"for",
"steaming",
"log",
"reading",
"and",
"auto",
"-",
"tailing",
".",
":",
"return",
":",
"a",
"list",
"of",
"log",
"documents",
"and",
"metadata",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/es_task_handler.py#L83-L124
|
[
"def",
"_read",
"(",
"self",
",",
"ti",
",",
"try_number",
",",
"metadata",
"=",
"None",
")",
":",
"if",
"not",
"metadata",
":",
"metadata",
"=",
"{",
"'offset'",
":",
"0",
"}",
"if",
"'offset'",
"not",
"in",
"metadata",
":",
"metadata",
"[",
"'offset'",
"]",
"=",
"0",
"offset",
"=",
"metadata",
"[",
"'offset'",
"]",
"log_id",
"=",
"self",
".",
"_render_log_id",
"(",
"ti",
",",
"try_number",
")",
"logs",
"=",
"self",
".",
"es_read",
"(",
"log_id",
",",
"offset",
")",
"next_offset",
"=",
"offset",
"if",
"not",
"logs",
"else",
"logs",
"[",
"-",
"1",
"]",
".",
"offset",
"metadata",
"[",
"'offset'",
"]",
"=",
"next_offset",
"# end_of_log_mark may contain characters like '\\n' which is needed to",
"# have the log uploaded but will not be stored in elasticsearch.",
"metadata",
"[",
"'end_of_log'",
"]",
"=",
"False",
"if",
"not",
"logs",
"else",
"logs",
"[",
"-",
"1",
"]",
".",
"message",
"==",
"self",
".",
"end_of_log_mark",
".",
"strip",
"(",
")",
"cur_ts",
"=",
"pendulum",
".",
"now",
"(",
")",
"# Assume end of log after not receiving new log for 5 min,",
"# as executor heartbeat is 1 min and there might be some",
"# delay before Elasticsearch makes the log available.",
"if",
"'last_log_timestamp'",
"in",
"metadata",
":",
"last_log_ts",
"=",
"timezone",
".",
"parse",
"(",
"metadata",
"[",
"'last_log_timestamp'",
"]",
")",
"if",
"cur_ts",
".",
"diff",
"(",
"last_log_ts",
")",
".",
"in_minutes",
"(",
")",
">=",
"5",
":",
"metadata",
"[",
"'end_of_log'",
"]",
"=",
"True",
"if",
"offset",
"!=",
"next_offset",
"or",
"'last_log_timestamp'",
"not",
"in",
"metadata",
":",
"metadata",
"[",
"'last_log_timestamp'",
"]",
"=",
"str",
"(",
"cur_ts",
")",
"message",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"log",
".",
"message",
"for",
"log",
"in",
"logs",
"]",
")",
"return",
"message",
",",
"metadata"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
ElasticsearchTaskHandler.es_read
|
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
|
airflow/utils/log/es_task_handler.py
|
def es_read(self, log_id, offset):
"""
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
"""
# Offset is the unique key for sorting logs given log_id.
s = Search(using=self.client) \
.query('match_phrase', log_id=log_id) \
.sort('offset')
s = s.filter('range', offset={'gt': offset})
logs = []
if s.count() != 0:
try:
logs = s[self.MAX_LINE_PER_PAGE * self.PAGE:self.MAX_LINE_PER_PAGE] \
.execute()
except Exception as e:
self.log.exception('Could not read log with log_id: %s, error: %s', log_id, str(e))
return logs
|
def es_read(self, log_id, offset):
"""
Returns the logs matching log_id in Elasticsearch and next offset.
Returns '' if no log is found or there was an error.
:param log_id: the log_id of the log to read.
:type log_id: str
:param offset: the offset start to read log from.
:type offset: str
"""
# Offset is the unique key for sorting logs given log_id.
s = Search(using=self.client) \
.query('match_phrase', log_id=log_id) \
.sort('offset')
s = s.filter('range', offset={'gt': offset})
logs = []
if s.count() != 0:
try:
logs = s[self.MAX_LINE_PER_PAGE * self.PAGE:self.MAX_LINE_PER_PAGE] \
.execute()
except Exception as e:
self.log.exception('Could not read log with log_id: %s, error: %s', log_id, str(e))
return logs
|
[
"Returns",
"the",
"logs",
"matching",
"log_id",
"in",
"Elasticsearch",
"and",
"next",
"offset",
".",
"Returns",
"if",
"no",
"log",
"is",
"found",
"or",
"there",
"was",
"an",
"error",
".",
":",
"param",
"log_id",
":",
"the",
"log_id",
"of",
"the",
"log",
"to",
"read",
".",
":",
"type",
"log_id",
":",
"str",
":",
"param",
"offset",
":",
"the",
"offset",
"start",
"to",
"read",
"log",
"from",
".",
":",
"type",
"offset",
":",
"str"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/es_task_handler.py#L126-L152
|
[
"def",
"es_read",
"(",
"self",
",",
"log_id",
",",
"offset",
")",
":",
"# Offset is the unique key for sorting logs given log_id.",
"s",
"=",
"Search",
"(",
"using",
"=",
"self",
".",
"client",
")",
".",
"query",
"(",
"'match_phrase'",
",",
"log_id",
"=",
"log_id",
")",
".",
"sort",
"(",
"'offset'",
")",
"s",
"=",
"s",
".",
"filter",
"(",
"'range'",
",",
"offset",
"=",
"{",
"'gt'",
":",
"offset",
"}",
")",
"logs",
"=",
"[",
"]",
"if",
"s",
".",
"count",
"(",
")",
"!=",
"0",
":",
"try",
":",
"logs",
"=",
"s",
"[",
"self",
".",
"MAX_LINE_PER_PAGE",
"*",
"self",
".",
"PAGE",
":",
"self",
".",
"MAX_LINE_PER_PAGE",
"]",
".",
"execute",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"exception",
"(",
"'Could not read log with log_id: %s, error: %s'",
",",
"log_id",
",",
"str",
"(",
"e",
")",
")",
"return",
"logs"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
_bind_parameters
|
Helper method that binds parameters to a SQL query.
|
airflow/contrib/hooks/bigquery_hook.py
|
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
|
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
|
[
"Helper",
"method",
"that",
"binds",
"parameters",
"to",
"a",
"SQL",
"query",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1948-L1959
|
[
"def",
"_bind_parameters",
"(",
"operation",
",",
"parameters",
")",
":",
"# inspired by MySQL Python Connector (conversion.py)",
"string_parameters",
"=",
"{",
"}",
"for",
"(",
"name",
",",
"value",
")",
"in",
"iteritems",
"(",
"parameters",
")",
":",
"if",
"value",
"is",
"None",
":",
"string_parameters",
"[",
"name",
"]",
"=",
"'NULL'",
"elif",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"string_parameters",
"[",
"name",
"]",
"=",
"\"'\"",
"+",
"_escape",
"(",
"value",
")",
"+",
"\"'\"",
"else",
":",
"string_parameters",
"[",
"name",
"]",
"=",
"str",
"(",
"value",
")",
"return",
"operation",
"%",
"string_parameters"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
_escape
|
Helper method that escapes parameters to a SQL query.
|
airflow/contrib/hooks/bigquery_hook.py
|
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
|
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
|
[
"Helper",
"method",
"that",
"escapes",
"parameters",
"to",
"a",
"SQL",
"query",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1962-L1970
|
[
"def",
"_escape",
"(",
"s",
")",
":",
"e",
"=",
"s",
"e",
"=",
"e",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"e",
"=",
"e",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
"e",
"=",
"e",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"e",
"=",
"e",
".",
"replace",
"(",
"\"'\"",
",",
"\"\\\\'\"",
")",
"e",
"=",
"e",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"return",
"e"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
_bq_cast
|
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
|
airflow/contrib/hooks/bigquery_hook.py
|
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
string_field))
return string_field == 'true'
else:
return string_field
|
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
string_field))
return string_field == 'true'
else:
return string_field
|
[
"Helper",
"method",
"that",
"casts",
"a",
"BigQuery",
"row",
"to",
"the",
"appropriate",
"data",
"types",
".",
"This",
"is",
"useful",
"because",
"BigQuery",
"returns",
"all",
"fields",
"as",
"strings",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1973-L1990
|
[
"def",
"_bq_cast",
"(",
"string_field",
",",
"bq_type",
")",
":",
"if",
"string_field",
"is",
"None",
":",
"return",
"None",
"elif",
"bq_type",
"==",
"'INTEGER'",
":",
"return",
"int",
"(",
"string_field",
")",
"elif",
"bq_type",
"==",
"'FLOAT'",
"or",
"bq_type",
"==",
"'TIMESTAMP'",
":",
"return",
"float",
"(",
"string_field",
")",
"elif",
"bq_type",
"==",
"'BOOLEAN'",
":",
"if",
"string_field",
"not",
"in",
"[",
"'true'",
",",
"'false'",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} must have value 'true' or 'false'\"",
".",
"format",
"(",
"string_field",
")",
")",
"return",
"string_field",
"==",
"'true'",
"else",
":",
"return",
"string_field"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
_validate_value
|
function to check expected type and raise
error if type is not correct
|
airflow/contrib/hooks/bigquery_hook.py
|
def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value)))
|
def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value)))
|
[
"function",
"to",
"check",
"expected",
"type",
"and",
"raise",
"error",
"if",
"type",
"is",
"not",
"correct"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L2071-L2076
|
[
"def",
"_validate_value",
"(",
"key",
",",
"value",
",",
"expected_type",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"expected_type",
")",
":",
"raise",
"TypeError",
"(",
"\"{} argument must have a type {} not {}\"",
".",
"format",
"(",
"key",
",",
"expected_type",
",",
"type",
"(",
"value",
")",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryHook.get_conn
|
Returns a BigQuery PEP 249 connection object.
|
airflow/contrib/hooks/bigquery_hook.py
|
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries
)
|
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries
)
|
[
"Returns",
"a",
"BigQuery",
"PEP",
"249",
"connection",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L65-L77
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"service",
"=",
"self",
".",
"get_service",
"(",
")",
"project",
"=",
"self",
".",
"_get_field",
"(",
"'project'",
")",
"return",
"BigQueryConnection",
"(",
"service",
"=",
"service",
",",
"project_id",
"=",
"project",
",",
"use_legacy_sql",
"=",
"self",
".",
"use_legacy_sql",
",",
"location",
"=",
"self",
".",
"location",
",",
"num_retries",
"=",
"self",
".",
"num_retries",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryHook.get_service
|
Returns a BigQuery service object.
|
airflow/contrib/hooks/bigquery_hook.py
|
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
|
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
|
[
"Returns",
"a",
"BigQuery",
"service",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L79-L85
|
[
"def",
"get_service",
"(",
"self",
")",
":",
"http_authorized",
"=",
"self",
".",
"_authorize",
"(",
")",
"return",
"build",
"(",
"'bigquery'",
",",
"'v2'",
",",
"http",
"=",
"http_authorized",
",",
"cache_discovery",
"=",
"False",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryHook.get_pandas_df
|
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
|
airflow/contrib/hooks/bigquery_hook.py
|
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
"""
private_key = self._get_field('key_path', None) or self._get_field('keyfile_dict', None)
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False,
private_key=private_key)
|
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
"""
private_key = self._get_field('key_path', None) or self._get_field('keyfile_dict', None)
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False,
private_key=private_key)
|
[
"Returns",
"a",
"Pandas",
"DataFrame",
"for",
"the",
"results",
"produced",
"by",
"a",
"BigQuery",
"query",
".",
"The",
"DbApiHook",
"method",
"must",
"be",
"overridden",
"because",
"Pandas",
"doesn",
"t",
"support",
"PEP",
"249",
"connections",
"except",
"for",
"SQLite",
".",
"See",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L95-L122
|
[
"def",
"get_pandas_df",
"(",
"self",
",",
"sql",
",",
"parameters",
"=",
"None",
",",
"dialect",
"=",
"None",
")",
":",
"private_key",
"=",
"self",
".",
"_get_field",
"(",
"'key_path'",
",",
"None",
")",
"or",
"self",
".",
"_get_field",
"(",
"'keyfile_dict'",
",",
"None",
")",
"if",
"dialect",
"is",
"None",
":",
"dialect",
"=",
"'legacy'",
"if",
"self",
".",
"use_legacy_sql",
"else",
"'standard'",
"return",
"read_gbq",
"(",
"sql",
",",
"project_id",
"=",
"self",
".",
"_get_field",
"(",
"'project'",
")",
",",
"dialect",
"=",
"dialect",
",",
"verbose",
"=",
"False",
",",
"private_key",
"=",
"private_key",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryHook.table_exists
|
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
|
airflow/contrib/hooks/bigquery_hook.py
|
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute(num_retries=self.num_retries)
return True
except HttpError as e:
if e.resp['status'] == '404':
return False
raise
|
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute(num_retries=self.num_retries)
return True
except HttpError as e:
if e.resp['status'] == '404':
return False
raise
|
[
"Checks",
"for",
"the",
"existence",
"of",
"a",
"table",
"in",
"Google",
"BigQuery",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L124-L147
|
[
"def",
"table_exists",
"(",
"self",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
":",
"service",
"=",
"self",
".",
"get_service",
"(",
")",
"try",
":",
"service",
".",
"tables",
"(",
")",
".",
"get",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"return",
"True",
"except",
"HttpError",
"as",
"e",
":",
"if",
"e",
".",
"resp",
"[",
"'status'",
"]",
"==",
"'404'",
":",
"return",
"False",
"raise"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.create_empty_table
|
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
Must be specified with time_partitioning, data in the table will be first
partitioned and subsequently clustered.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return: None
|
airflow/contrib/hooks/bigquery_hook.py
|
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
labels=None,
view=None,
num_retries=None):
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
Must be specified with time_partitioning, data in the table will be first
partitioned and subsequently clustered.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return: None
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
table_resource['clustering'] = {
'fields': cluster_fields
}
if labels:
table_resource['labels'] = labels
if view:
table_resource['view'] = view
num_retries = num_retries if num_retries else self.num_retries
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=num_retries)
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
labels=None,
view=None,
num_retries=None):
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
Must be specified with time_partitioning, data in the table will be first
partitioned and subsequently clustered.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return: None
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
table_resource['clustering'] = {
'fields': cluster_fields
}
if labels:
table_resource['labels'] = labels
if view:
table_resource['view'] = view
num_retries = num_retries if num_retries else self.num_retries
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=num_retries)
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
[
"Creates",
"a",
"new",
"empty",
"table",
"in",
"the",
"dataset",
".",
"To",
"create",
"a",
"view",
"which",
"is",
"defined",
"by",
"a",
"SQL",
"query",
"parse",
"a",
"dictionary",
"to",
"view",
"kwarg"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L229-L328
|
[
"def",
"create_empty_table",
"(",
"self",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
",",
"schema_fields",
"=",
"None",
",",
"time_partitioning",
"=",
"None",
",",
"cluster_fields",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"view",
"=",
"None",
",",
"num_retries",
"=",
"None",
")",
":",
"project_id",
"=",
"project_id",
"if",
"project_id",
"is",
"not",
"None",
"else",
"self",
".",
"project_id",
"table_resource",
"=",
"{",
"'tableReference'",
":",
"{",
"'tableId'",
":",
"table_id",
"}",
"}",
"if",
"schema_fields",
":",
"table_resource",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema_fields",
"}",
"if",
"time_partitioning",
":",
"table_resource",
"[",
"'timePartitioning'",
"]",
"=",
"time_partitioning",
"if",
"cluster_fields",
":",
"table_resource",
"[",
"'clustering'",
"]",
"=",
"{",
"'fields'",
":",
"cluster_fields",
"}",
"if",
"labels",
":",
"table_resource",
"[",
"'labels'",
"]",
"=",
"labels",
"if",
"view",
":",
"table_resource",
"[",
"'view'",
"]",
"=",
"view",
"num_retries",
"=",
"num_retries",
"if",
"num_retries",
"else",
"self",
".",
"num_retries",
"self",
".",
"log",
".",
"info",
"(",
"'Creating Table %s:%s.%s'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"try",
":",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"insert",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"body",
"=",
"table_resource",
")",
".",
"execute",
"(",
"num_retries",
"=",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Table created successfully: %s:%s.%s'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"except",
"HttpError",
"as",
"err",
":",
"raise",
"AirflowException",
"(",
"'BigQuery job failed. Error was: {}'",
".",
"format",
"(",
"err",
".",
"content",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.create_external_table
|
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
|
airflow/contrib/hooks/bigquery_hook.py
|
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs=None,
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
if src_fmt_configs is None:
src_fmt_configs = {}
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute(num_retries=self.num_retries)
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs=None,
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
if src_fmt_configs is None:
src_fmt_configs = {}
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute(num_retries=self.num_retries)
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
[
"Creates",
"a",
"new",
"external",
"table",
"in",
"the",
"dataset",
"with",
"the",
"data",
"in",
"Google",
"Cloud",
"Storage",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L330-L524
|
[
"def",
"create_external_table",
"(",
"self",
",",
"external_project_dataset_table",
",",
"schema_fields",
",",
"source_uris",
",",
"source_format",
"=",
"'CSV'",
",",
"autodetect",
"=",
"False",
",",
"compression",
"=",
"'NONE'",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
",",
"skip_leading_rows",
"=",
"0",
",",
"field_delimiter",
"=",
"','",
",",
"quote_character",
"=",
"None",
",",
"allow_quoted_newlines",
"=",
"False",
",",
"allow_jagged_rows",
"=",
"False",
",",
"src_fmt_configs",
"=",
"None",
",",
"labels",
"=",
"None",
")",
":",
"if",
"src_fmt_configs",
"is",
"None",
":",
"src_fmt_configs",
"=",
"{",
"}",
"project_id",
",",
"dataset_id",
",",
"external_table_id",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"external_project_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
",",
"var_name",
"=",
"'external_project_dataset_table'",
")",
"# bigquery only allows certain source formats",
"# we check to make sure the passed source format is valid",
"# if it's not, we raise a ValueError",
"# Refer to this link for more details:",
"# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat",
"source_format",
"=",
"source_format",
".",
"upper",
"(",
")",
"allowed_formats",
"=",
"[",
"\"CSV\"",
",",
"\"NEWLINE_DELIMITED_JSON\"",
",",
"\"AVRO\"",
",",
"\"GOOGLE_SHEETS\"",
",",
"\"DATASTORE_BACKUP\"",
",",
"\"PARQUET\"",
"]",
"if",
"source_format",
"not",
"in",
"allowed_formats",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a valid source format. \"",
"\"Please use one of the following types: {1}\"",
".",
"format",
"(",
"source_format",
",",
"allowed_formats",
")",
")",
"compression",
"=",
"compression",
".",
"upper",
"(",
")",
"allowed_compressions",
"=",
"[",
"'NONE'",
",",
"'GZIP'",
"]",
"if",
"compression",
"not",
"in",
"allowed_compressions",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a valid compression format. \"",
"\"Please use one of the following types: {1}\"",
".",
"format",
"(",
"compression",
",",
"allowed_compressions",
")",
")",
"table_resource",
"=",
"{",
"'externalDataConfiguration'",
":",
"{",
"'autodetect'",
":",
"autodetect",
",",
"'sourceFormat'",
":",
"source_format",
",",
"'sourceUris'",
":",
"source_uris",
",",
"'compression'",
":",
"compression",
",",
"'ignoreUnknownValues'",
":",
"ignore_unknown_values",
"}",
",",
"'tableReference'",
":",
"{",
"'projectId'",
":",
"project_id",
",",
"'datasetId'",
":",
"dataset_id",
",",
"'tableId'",
":",
"external_table_id",
",",
"}",
"}",
"if",
"schema_fields",
":",
"table_resource",
"[",
"'externalDataConfiguration'",
"]",
".",
"update",
"(",
"{",
"'schema'",
":",
"{",
"'fields'",
":",
"schema_fields",
"}",
"}",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Creating external table: %s'",
",",
"external_project_dataset_table",
")",
"if",
"max_bad_records",
":",
"table_resource",
"[",
"'externalDataConfiguration'",
"]",
"[",
"'maxBadRecords'",
"]",
"=",
"max_bad_records",
"# if following fields are not specified in src_fmt_configs,",
"# honor the top-level params for backward-compatibility",
"if",
"'skipLeadingRows'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'skipLeadingRows'",
"]",
"=",
"skip_leading_rows",
"if",
"'fieldDelimiter'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'fieldDelimiter'",
"]",
"=",
"field_delimiter",
"if",
"'quote_character'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'quote'",
"]",
"=",
"quote_character",
"if",
"'allowQuotedNewlines'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'allowQuotedNewlines'",
"]",
"=",
"allow_quoted_newlines",
"if",
"'allowJaggedRows'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'allowJaggedRows'",
"]",
"=",
"allow_jagged_rows",
"src_fmt_to_param_mapping",
"=",
"{",
"'CSV'",
":",
"'csvOptions'",
",",
"'GOOGLE_SHEETS'",
":",
"'googleSheetsOptions'",
"}",
"src_fmt_to_configs_mapping",
"=",
"{",
"'csvOptions'",
":",
"[",
"'allowJaggedRows'",
",",
"'allowQuotedNewlines'",
",",
"'fieldDelimiter'",
",",
"'skipLeadingRows'",
",",
"'quote'",
"]",
",",
"'googleSheetsOptions'",
":",
"[",
"'skipLeadingRows'",
"]",
"}",
"if",
"source_format",
"in",
"src_fmt_to_param_mapping",
".",
"keys",
"(",
")",
":",
"valid_configs",
"=",
"src_fmt_to_configs_mapping",
"[",
"src_fmt_to_param_mapping",
"[",
"source_format",
"]",
"]",
"src_fmt_configs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"src_fmt_configs",
".",
"items",
"(",
")",
"if",
"k",
"in",
"valid_configs",
"}",
"table_resource",
"[",
"'externalDataConfiguration'",
"]",
"[",
"src_fmt_to_param_mapping",
"[",
"source_format",
"]",
"]",
"=",
"src_fmt_configs",
"if",
"labels",
":",
"table_resource",
"[",
"'labels'",
"]",
"=",
"labels",
"try",
":",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"insert",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"body",
"=",
"table_resource",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'External table created successfully: %s'",
",",
"external_project_dataset_table",
")",
"except",
"HttpError",
"as",
"err",
":",
"raise",
"Exception",
"(",
"'BigQuery job failed. Error was: {}'",
".",
"format",
"(",
"err",
".",
"content",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.patch_table
|
Patch information in an existing table.
It only updates fileds that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
|
airflow/contrib/hooks/bigquery_hook.py
|
def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
labels=None,
schema=None,
time_partitioning=None,
view=None,
require_partition_filter=None):
"""
Patch information in an existing table.
It only updates fileds that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
self.log.info('Patching Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
self.log.info('Table patched successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
labels=None,
schema=None,
time_partitioning=None,
view=None,
require_partition_filter=None):
"""
Patch information in an existing table.
It only updates fileds that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
self.log.info('Patching Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
self.log.info('Table patched successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
[
"Patch",
"information",
"in",
"an",
"existing",
"table",
".",
"It",
"only",
"updates",
"fileds",
"that",
"are",
"provided",
"in",
"the",
"request",
"object",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L526-L632
|
[
"def",
"patch_table",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
",",
"project_id",
"=",
"None",
",",
"description",
"=",
"None",
",",
"expiration_time",
"=",
"None",
",",
"external_data_configuration",
"=",
"None",
",",
"friendly_name",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"time_partitioning",
"=",
"None",
",",
"view",
"=",
"None",
",",
"require_partition_filter",
"=",
"None",
")",
":",
"project_id",
"=",
"project_id",
"if",
"project_id",
"is",
"not",
"None",
"else",
"self",
".",
"project_id",
"table_resource",
"=",
"{",
"}",
"if",
"description",
"is",
"not",
"None",
":",
"table_resource",
"[",
"'description'",
"]",
"=",
"description",
"if",
"expiration_time",
"is",
"not",
"None",
":",
"table_resource",
"[",
"'expirationTime'",
"]",
"=",
"expiration_time",
"if",
"external_data_configuration",
":",
"table_resource",
"[",
"'externalDataConfiguration'",
"]",
"=",
"external_data_configuration",
"if",
"friendly_name",
"is",
"not",
"None",
":",
"table_resource",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"labels",
":",
"table_resource",
"[",
"'labels'",
"]",
"=",
"labels",
"if",
"schema",
":",
"table_resource",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema",
"}",
"if",
"time_partitioning",
":",
"table_resource",
"[",
"'timePartitioning'",
"]",
"=",
"time_partitioning",
"if",
"view",
":",
"table_resource",
"[",
"'view'",
"]",
"=",
"view",
"if",
"require_partition_filter",
"is",
"not",
"None",
":",
"table_resource",
"[",
"'requirePartitionFilter'",
"]",
"=",
"require_partition_filter",
"self",
".",
"log",
".",
"info",
"(",
"'Patching Table %s:%s.%s'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"try",
":",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"patch",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
",",
"body",
"=",
"table_resource",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Table patched successfully: %s:%s.%s'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"except",
"HttpError",
"as",
"err",
":",
"raise",
"AirflowException",
"(",
"'BigQuery job failed. Error was: {}'",
".",
"format",
"(",
"err",
".",
"content",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_query
|
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_query(self,
sql,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
location=None):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
"""
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs',
api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']",
configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` '
'missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options
).issuperset(set(schema_update_options)):
raise ValueError("{0} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {1}"
.format(schema_update_options,
allowed_schema_update_options))
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
if destination_dataset_table:
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
destination_dataset_table = {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields}
query_param_list = [
(sql, 'query', None, six.string_types),
(priority, 'priority', 'INTERACTIVE', six.string_types),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, list),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, tuple),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
]
for param_tuple in query_param_list:
param, param_name, param_default, param_type = param_tuple
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(
destination_dataset_table, time_partitioning)
param = param_default
if param not in [None, {}, ()]:
_api_resource_configs_duplication_check(
param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name],
param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': "
"%s", schema_update_options)
if param_name == 'destinationTable':
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}")
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
})
if 'useLegacySql' in configuration['query'] and configuration['query']['useLegacySql'] and\
'queryParameters' in configuration['query']:
raise ValueError("Query parameters are not allowed "
"when using legacy SQL")
if labels:
_api_resource_configs_duplication_check(
'labels', labels, configuration)
configuration['labels'] = labels
return self.run_with_configuration(configuration)
|
def run_query(self,
sql,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
location=None):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
"""
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs',
api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']",
configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` '
'missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options
).issuperset(set(schema_update_options)):
raise ValueError("{0} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {1}"
.format(schema_update_options,
allowed_schema_update_options))
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
if destination_dataset_table:
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
destination_dataset_table = {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields}
query_param_list = [
(sql, 'query', None, six.string_types),
(priority, 'priority', 'INTERACTIVE', six.string_types),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, list),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, tuple),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
]
for param_tuple in query_param_list:
param, param_name, param_default, param_type = param_tuple
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(
destination_dataset_table, time_partitioning)
param = param_default
if param not in [None, {}, ()]:
_api_resource_configs_duplication_check(
param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name],
param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': "
"%s", schema_update_options)
if param_name == 'destinationTable':
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}")
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
})
if 'useLegacySql' in configuration['query'] and configuration['query']['useLegacySql'] and\
'queryParameters' in configuration['query']:
raise ValueError("Query parameters are not allowed "
"when using legacy SQL")
if labels:
_api_resource_configs_duplication_check(
'labels', labels, configuration)
configuration['labels'] = labels
return self.run_with_configuration(configuration)
|
[
"Executes",
"a",
"BigQuery",
"SQL",
"query",
".",
"Optionally",
"persists",
"results",
"in",
"a",
"BigQuery",
"table",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L634-L853
|
[
"def",
"run_query",
"(",
"self",
",",
"sql",
",",
"destination_dataset_table",
"=",
"None",
",",
"write_disposition",
"=",
"'WRITE_EMPTY'",
",",
"allow_large_results",
"=",
"False",
",",
"flatten_results",
"=",
"None",
",",
"udf_config",
"=",
"None",
",",
"use_legacy_sql",
"=",
"None",
",",
"maximum_billing_tier",
"=",
"None",
",",
"maximum_bytes_billed",
"=",
"None",
",",
"create_disposition",
"=",
"'CREATE_IF_NEEDED'",
",",
"query_params",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"schema_update_options",
"=",
"(",
")",
",",
"priority",
"=",
"'INTERACTIVE'",
",",
"time_partitioning",
"=",
"None",
",",
"api_resource_configs",
"=",
"None",
",",
"cluster_fields",
"=",
"None",
",",
"location",
"=",
"None",
")",
":",
"if",
"time_partitioning",
"is",
"None",
":",
"time_partitioning",
"=",
"{",
"}",
"if",
"location",
":",
"self",
".",
"location",
"=",
"location",
"if",
"not",
"api_resource_configs",
":",
"api_resource_configs",
"=",
"self",
".",
"api_resource_configs",
"else",
":",
"_validate_value",
"(",
"'api_resource_configs'",
",",
"api_resource_configs",
",",
"dict",
")",
"configuration",
"=",
"deepcopy",
"(",
"api_resource_configs",
")",
"if",
"'query'",
"not",
"in",
"configuration",
":",
"configuration",
"[",
"'query'",
"]",
"=",
"{",
"}",
"else",
":",
"_validate_value",
"(",
"\"api_resource_configs['query']\"",
",",
"configuration",
"[",
"'query'",
"]",
",",
"dict",
")",
"if",
"sql",
"is",
"None",
"and",
"not",
"configuration",
"[",
"'query'",
"]",
".",
"get",
"(",
"'query'",
",",
"None",
")",
":",
"raise",
"TypeError",
"(",
"'`BigQueryBaseCursor.run_query` '",
"'missing 1 required positional argument: `sql`'",
")",
"# BigQuery also allows you to define how you want a table's schema to change",
"# as a side effect of a query job",
"# for more details:",
"# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions",
"allowed_schema_update_options",
"=",
"[",
"'ALLOW_FIELD_ADDITION'",
",",
"\"ALLOW_FIELD_RELAXATION\"",
"]",
"if",
"not",
"set",
"(",
"allowed_schema_update_options",
")",
".",
"issuperset",
"(",
"set",
"(",
"schema_update_options",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} contains invalid schema update options. \"",
"\"Please only use one or more of the following \"",
"\"options: {1}\"",
".",
"format",
"(",
"schema_update_options",
",",
"allowed_schema_update_options",
")",
")",
"if",
"schema_update_options",
":",
"if",
"write_disposition",
"not",
"in",
"[",
"\"WRITE_APPEND\"",
",",
"\"WRITE_TRUNCATE\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"schema_update_options is only \"",
"\"allowed if write_disposition is \"",
"\"'WRITE_APPEND' or 'WRITE_TRUNCATE'.\"",
")",
"if",
"destination_dataset_table",
":",
"destination_project",
",",
"destination_dataset",
",",
"destination_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"destination_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
")",
"destination_dataset_table",
"=",
"{",
"'projectId'",
":",
"destination_project",
",",
"'datasetId'",
":",
"destination_dataset",
",",
"'tableId'",
":",
"destination_table",
",",
"}",
"if",
"cluster_fields",
":",
"cluster_fields",
"=",
"{",
"'fields'",
":",
"cluster_fields",
"}",
"query_param_list",
"=",
"[",
"(",
"sql",
",",
"'query'",
",",
"None",
",",
"six",
".",
"string_types",
")",
",",
"(",
"priority",
",",
"'priority'",
",",
"'INTERACTIVE'",
",",
"six",
".",
"string_types",
")",
",",
"(",
"use_legacy_sql",
",",
"'useLegacySql'",
",",
"self",
".",
"use_legacy_sql",
",",
"bool",
")",
",",
"(",
"query_params",
",",
"'queryParameters'",
",",
"None",
",",
"list",
")",
",",
"(",
"udf_config",
",",
"'userDefinedFunctionResources'",
",",
"None",
",",
"list",
")",
",",
"(",
"maximum_billing_tier",
",",
"'maximumBillingTier'",
",",
"None",
",",
"int",
")",
",",
"(",
"maximum_bytes_billed",
",",
"'maximumBytesBilled'",
",",
"None",
",",
"float",
")",
",",
"(",
"time_partitioning",
",",
"'timePartitioning'",
",",
"{",
"}",
",",
"dict",
")",
",",
"(",
"schema_update_options",
",",
"'schemaUpdateOptions'",
",",
"None",
",",
"tuple",
")",
",",
"(",
"destination_dataset_table",
",",
"'destinationTable'",
",",
"None",
",",
"dict",
")",
",",
"(",
"cluster_fields",
",",
"'clustering'",
",",
"None",
",",
"dict",
")",
",",
"]",
"for",
"param_tuple",
"in",
"query_param_list",
":",
"param",
",",
"param_name",
",",
"param_default",
",",
"param_type",
"=",
"param_tuple",
"if",
"param_name",
"not",
"in",
"configuration",
"[",
"'query'",
"]",
"and",
"param",
"in",
"[",
"None",
",",
"{",
"}",
",",
"(",
")",
"]",
":",
"if",
"param_name",
"==",
"'timePartitioning'",
":",
"param_default",
"=",
"_cleanse_time_partitioning",
"(",
"destination_dataset_table",
",",
"time_partitioning",
")",
"param",
"=",
"param_default",
"if",
"param",
"not",
"in",
"[",
"None",
",",
"{",
"}",
",",
"(",
")",
"]",
":",
"_api_resource_configs_duplication_check",
"(",
"param_name",
",",
"param",
",",
"configuration",
"[",
"'query'",
"]",
")",
"configuration",
"[",
"'query'",
"]",
"[",
"param_name",
"]",
"=",
"param",
"# check valid type of provided param,",
"# it last step because we can get param from 2 sources,",
"# and first of all need to find it",
"_validate_value",
"(",
"param_name",
",",
"configuration",
"[",
"'query'",
"]",
"[",
"param_name",
"]",
",",
"param_type",
")",
"if",
"param_name",
"==",
"'schemaUpdateOptions'",
"and",
"param",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Adding experimental 'schemaUpdateOptions': \"",
"\"%s\"",
",",
"schema_update_options",
")",
"if",
"param_name",
"==",
"'destinationTable'",
":",
"for",
"key",
"in",
"[",
"'projectId'",
",",
"'datasetId'",
",",
"'tableId'",
"]",
":",
"if",
"key",
"not",
"in",
"configuration",
"[",
"'query'",
"]",
"[",
"'destinationTable'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Not correct 'destinationTable' in \"",
"\"api_resource_configs. 'destinationTable' \"",
"\"must be a dict with {'projectId':'', \"",
"\"'datasetId':'', 'tableId':''}\"",
")",
"configuration",
"[",
"'query'",
"]",
".",
"update",
"(",
"{",
"'allowLargeResults'",
":",
"allow_large_results",
",",
"'flattenResults'",
":",
"flatten_results",
",",
"'writeDisposition'",
":",
"write_disposition",
",",
"'createDisposition'",
":",
"create_disposition",
",",
"}",
")",
"if",
"'useLegacySql'",
"in",
"configuration",
"[",
"'query'",
"]",
"and",
"configuration",
"[",
"'query'",
"]",
"[",
"'useLegacySql'",
"]",
"and",
"'queryParameters'",
"in",
"configuration",
"[",
"'query'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Query parameters are not allowed \"",
"\"when using legacy SQL\"",
")",
"if",
"labels",
":",
"_api_resource_configs_duplication_check",
"(",
"'labels'",
",",
"labels",
",",
"configuration",
")",
"configuration",
"[",
"'labels'",
"]",
"=",
"labels",
"return",
"self",
".",
"run_with_configuration",
"(",
"configuration",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_extract
|
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
|
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
|
[
"Executes",
"a",
"BigQuery",
"extract",
"command",
"to",
"copy",
"data",
"from",
"BigQuery",
"to",
"Google",
"Cloud",
"Storage",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L855-L921
|
[
"def",
"run_extract",
"(",
"# noqa",
"self",
",",
"source_project_dataset_table",
",",
"destination_cloud_storage_uris",
",",
"compression",
"=",
"'NONE'",
",",
"export_format",
"=",
"'CSV'",
",",
"field_delimiter",
"=",
"','",
",",
"print_header",
"=",
"True",
",",
"labels",
"=",
"None",
")",
":",
"source_project",
",",
"source_dataset",
",",
"source_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"source_project_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
",",
"var_name",
"=",
"'source_project_dataset_table'",
")",
"configuration",
"=",
"{",
"'extract'",
":",
"{",
"'sourceTable'",
":",
"{",
"'projectId'",
":",
"source_project",
",",
"'datasetId'",
":",
"source_dataset",
",",
"'tableId'",
":",
"source_table",
",",
"}",
",",
"'compression'",
":",
"compression",
",",
"'destinationUris'",
":",
"destination_cloud_storage_uris",
",",
"'destinationFormat'",
":",
"export_format",
",",
"}",
"}",
"if",
"labels",
":",
"configuration",
"[",
"'labels'",
"]",
"=",
"labels",
"if",
"export_format",
"==",
"'CSV'",
":",
"# Only set fieldDelimiter and printHeader fields if using CSV.",
"# Google does not like it if you set these fields for other export",
"# formats.",
"configuration",
"[",
"'extract'",
"]",
"[",
"'fieldDelimiter'",
"]",
"=",
"field_delimiter",
"configuration",
"[",
"'extract'",
"]",
"[",
"'printHeader'",
"]",
"=",
"print_header",
"return",
"self",
".",
"run_with_configuration",
"(",
"configuration",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_copy
|
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
|
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
|
[
"Executes",
"a",
"BigQuery",
"copy",
"command",
"to",
"copy",
"data",
"from",
"one",
"BigQuery",
"table",
"to",
"another",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L923-L994
|
[
"def",
"run_copy",
"(",
"self",
",",
"source_project_dataset_tables",
",",
"destination_project_dataset_table",
",",
"write_disposition",
"=",
"'WRITE_EMPTY'",
",",
"create_disposition",
"=",
"'CREATE_IF_NEEDED'",
",",
"labels",
"=",
"None",
")",
":",
"source_project_dataset_tables",
"=",
"(",
"[",
"source_project_dataset_tables",
"]",
"if",
"not",
"isinstance",
"(",
"source_project_dataset_tables",
",",
"list",
")",
"else",
"source_project_dataset_tables",
")",
"source_project_dataset_tables_fixup",
"=",
"[",
"]",
"for",
"source_project_dataset_table",
"in",
"source_project_dataset_tables",
":",
"source_project",
",",
"source_dataset",
",",
"source_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"source_project_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
",",
"var_name",
"=",
"'source_project_dataset_table'",
")",
"source_project_dataset_tables_fixup",
".",
"append",
"(",
"{",
"'projectId'",
":",
"source_project",
",",
"'datasetId'",
":",
"source_dataset",
",",
"'tableId'",
":",
"source_table",
"}",
")",
"destination_project",
",",
"destination_dataset",
",",
"destination_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"destination_project_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
")",
"configuration",
"=",
"{",
"'copy'",
":",
"{",
"'createDisposition'",
":",
"create_disposition",
",",
"'writeDisposition'",
":",
"write_disposition",
",",
"'sourceTables'",
":",
"source_project_dataset_tables_fixup",
",",
"'destinationTable'",
":",
"{",
"'projectId'",
":",
"destination_project",
",",
"'datasetId'",
":",
"destination_dataset",
",",
"'tableId'",
":",
"destination_table",
"}",
"}",
"}",
"if",
"labels",
":",
"configuration",
"[",
"'labels'",
"]",
"=",
"labels",
"return",
"self",
".",
"run_with_configuration",
"(",
"configuration",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_load
|
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_load(self,
destination_project_dataset_table,
source_uris,
schema_fields=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs=None,
time_partitioning=None,
cluster_fields=None,
autodetect=False):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
if schema_fields is None and not autodetect:
raise ValueError(
'You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options."
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental 'schemaUpdateOptions': %s",
schema_update_options
)
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': ['useAvroLogicalTypes'],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
|
def run_load(self,
destination_project_dataset_table,
source_uris,
schema_fields=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs=None,
time_partitioning=None,
cluster_fields=None,
autodetect=False):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
if schema_fields is None and not autodetect:
raise ValueError(
'You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options."
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental 'schemaUpdateOptions': %s",
schema_update_options
)
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': ['useAvroLogicalTypes'],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
|
[
"Executes",
"a",
"BigQuery",
"load",
"command",
"to",
"load",
"data",
"from",
"Google",
"Cloud",
"Storage",
"to",
"BigQuery",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L996-L1209
|
[
"def",
"run_load",
"(",
"self",
",",
"destination_project_dataset_table",
",",
"source_uris",
",",
"schema_fields",
"=",
"None",
",",
"source_format",
"=",
"'CSV'",
",",
"create_disposition",
"=",
"'CREATE_IF_NEEDED'",
",",
"skip_leading_rows",
"=",
"0",
",",
"write_disposition",
"=",
"'WRITE_EMPTY'",
",",
"field_delimiter",
"=",
"','",
",",
"max_bad_records",
"=",
"0",
",",
"quote_character",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"allow_quoted_newlines",
"=",
"False",
",",
"allow_jagged_rows",
"=",
"False",
",",
"schema_update_options",
"=",
"(",
")",
",",
"src_fmt_configs",
"=",
"None",
",",
"time_partitioning",
"=",
"None",
",",
"cluster_fields",
"=",
"None",
",",
"autodetect",
"=",
"False",
")",
":",
"# bigquery only allows certain source formats",
"# we check to make sure the passed source format is valid",
"# if it's not, we raise a ValueError",
"# Refer to this link for more details:",
"# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat",
"if",
"schema_fields",
"is",
"None",
"and",
"not",
"autodetect",
":",
"raise",
"ValueError",
"(",
"'You must either pass a schema or autodetect=True.'",
")",
"if",
"src_fmt_configs",
"is",
"None",
":",
"src_fmt_configs",
"=",
"{",
"}",
"source_format",
"=",
"source_format",
".",
"upper",
"(",
")",
"allowed_formats",
"=",
"[",
"\"CSV\"",
",",
"\"NEWLINE_DELIMITED_JSON\"",
",",
"\"AVRO\"",
",",
"\"GOOGLE_SHEETS\"",
",",
"\"DATASTORE_BACKUP\"",
",",
"\"PARQUET\"",
"]",
"if",
"source_format",
"not",
"in",
"allowed_formats",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a valid source format. \"",
"\"Please use one of the following types: {1}\"",
".",
"format",
"(",
"source_format",
",",
"allowed_formats",
")",
")",
"# bigquery also allows you to define how you want a table's schema to change",
"# as a side effect of a load",
"# for more details:",
"# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions",
"allowed_schema_update_options",
"=",
"[",
"'ALLOW_FIELD_ADDITION'",
",",
"\"ALLOW_FIELD_RELAXATION\"",
"]",
"if",
"not",
"set",
"(",
"allowed_schema_update_options",
")",
".",
"issuperset",
"(",
"set",
"(",
"schema_update_options",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} contains invalid schema update options.\"",
"\"Please only use one or more of the following options: {1}\"",
".",
"format",
"(",
"schema_update_options",
",",
"allowed_schema_update_options",
")",
")",
"destination_project",
",",
"destination_dataset",
",",
"destination_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"destination_project_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
",",
"var_name",
"=",
"'destination_project_dataset_table'",
")",
"configuration",
"=",
"{",
"'load'",
":",
"{",
"'autodetect'",
":",
"autodetect",
",",
"'createDisposition'",
":",
"create_disposition",
",",
"'destinationTable'",
":",
"{",
"'projectId'",
":",
"destination_project",
",",
"'datasetId'",
":",
"destination_dataset",
",",
"'tableId'",
":",
"destination_table",
",",
"}",
",",
"'sourceFormat'",
":",
"source_format",
",",
"'sourceUris'",
":",
"source_uris",
",",
"'writeDisposition'",
":",
"write_disposition",
",",
"'ignoreUnknownValues'",
":",
"ignore_unknown_values",
"}",
"}",
"time_partitioning",
"=",
"_cleanse_time_partitioning",
"(",
"destination_project_dataset_table",
",",
"time_partitioning",
")",
"if",
"time_partitioning",
":",
"configuration",
"[",
"'load'",
"]",
".",
"update",
"(",
"{",
"'timePartitioning'",
":",
"time_partitioning",
"}",
")",
"if",
"cluster_fields",
":",
"configuration",
"[",
"'load'",
"]",
".",
"update",
"(",
"{",
"'clustering'",
":",
"{",
"'fields'",
":",
"cluster_fields",
"}",
"}",
")",
"if",
"schema_fields",
":",
"configuration",
"[",
"'load'",
"]",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema_fields",
"}",
"if",
"schema_update_options",
":",
"if",
"write_disposition",
"not",
"in",
"[",
"\"WRITE_APPEND\"",
",",
"\"WRITE_TRUNCATE\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"schema_update_options is only \"",
"\"allowed if write_disposition is \"",
"\"'WRITE_APPEND' or 'WRITE_TRUNCATE'.\"",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Adding experimental 'schemaUpdateOptions': %s\"",
",",
"schema_update_options",
")",
"configuration",
"[",
"'load'",
"]",
"[",
"'schemaUpdateOptions'",
"]",
"=",
"schema_update_options",
"if",
"max_bad_records",
":",
"configuration",
"[",
"'load'",
"]",
"[",
"'maxBadRecords'",
"]",
"=",
"max_bad_records",
"# if following fields are not specified in src_fmt_configs,",
"# honor the top-level params for backward-compatibility",
"if",
"'skipLeadingRows'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'skipLeadingRows'",
"]",
"=",
"skip_leading_rows",
"if",
"'fieldDelimiter'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'fieldDelimiter'",
"]",
"=",
"field_delimiter",
"if",
"'ignoreUnknownValues'",
"not",
"in",
"src_fmt_configs",
":",
"src_fmt_configs",
"[",
"'ignoreUnknownValues'",
"]",
"=",
"ignore_unknown_values",
"if",
"quote_character",
"is",
"not",
"None",
":",
"src_fmt_configs",
"[",
"'quote'",
"]",
"=",
"quote_character",
"if",
"allow_quoted_newlines",
":",
"src_fmt_configs",
"[",
"'allowQuotedNewlines'",
"]",
"=",
"allow_quoted_newlines",
"src_fmt_to_configs_mapping",
"=",
"{",
"'CSV'",
":",
"[",
"'allowJaggedRows'",
",",
"'allowQuotedNewlines'",
",",
"'autodetect'",
",",
"'fieldDelimiter'",
",",
"'skipLeadingRows'",
",",
"'ignoreUnknownValues'",
",",
"'nullMarker'",
",",
"'quote'",
"]",
",",
"'DATASTORE_BACKUP'",
":",
"[",
"'projectionFields'",
"]",
",",
"'NEWLINE_DELIMITED_JSON'",
":",
"[",
"'autodetect'",
",",
"'ignoreUnknownValues'",
"]",
",",
"'PARQUET'",
":",
"[",
"'autodetect'",
",",
"'ignoreUnknownValues'",
"]",
",",
"'AVRO'",
":",
"[",
"'useAvroLogicalTypes'",
"]",
",",
"}",
"valid_configs",
"=",
"src_fmt_to_configs_mapping",
"[",
"source_format",
"]",
"src_fmt_configs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"src_fmt_configs",
".",
"items",
"(",
")",
"if",
"k",
"in",
"valid_configs",
"}",
"configuration",
"[",
"'load'",
"]",
".",
"update",
"(",
"src_fmt_configs",
")",
"if",
"allow_jagged_rows",
":",
"configuration",
"[",
"'load'",
"]",
"[",
"'allowJaggedRows'",
"]",
"=",
"allow_jagged_rows",
"return",
"self",
".",
"run_with_configuration",
"(",
"configuration",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_with_configuration
|
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute(num_retries=self.num_retries)
self.running_job_id = query_reply['jobReference']['jobId']
if 'location' in query_reply['jobReference']:
location = query_reply['jobReference']['location']
else:
location = self.location
# Wait for query to finish.
keep_polling_job = True
while keep_polling_job:
try:
if location:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id,
location=location).execute(num_retries=self.num_retries)
else:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
if job['status']['state'] == 'DONE':
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: {}'.
format(err.resp.status))
return self.running_job_id
|
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute(num_retries=self.num_retries)
self.running_job_id = query_reply['jobReference']['jobId']
if 'location' in query_reply['jobReference']:
location = query_reply['jobReference']['location']
else:
location = self.location
# Wait for query to finish.
keep_polling_job = True
while keep_polling_job:
try:
if location:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id,
location=location).execute(num_retries=self.num_retries)
else:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
if job['status']['state'] == 'DONE':
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: {}'.
format(err.resp.status))
return self.running_job_id
|
[
"Executes",
"a",
"BigQuery",
"SQL",
"query",
".",
"See",
"here",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1211-L1273
|
[
"def",
"run_with_configuration",
"(",
"self",
",",
"configuration",
")",
":",
"jobs",
"=",
"self",
".",
"service",
".",
"jobs",
"(",
")",
"job_data",
"=",
"{",
"'configuration'",
":",
"configuration",
"}",
"# Send query and wait for reply.",
"query_reply",
"=",
"jobs",
".",
"insert",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"body",
"=",
"job_data",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"running_job_id",
"=",
"query_reply",
"[",
"'jobReference'",
"]",
"[",
"'jobId'",
"]",
"if",
"'location'",
"in",
"query_reply",
"[",
"'jobReference'",
"]",
":",
"location",
"=",
"query_reply",
"[",
"'jobReference'",
"]",
"[",
"'location'",
"]",
"else",
":",
"location",
"=",
"self",
".",
"location",
"# Wait for query to finish.",
"keep_polling_job",
"=",
"True",
"while",
"keep_polling_job",
":",
"try",
":",
"if",
"location",
":",
"job",
"=",
"jobs",
".",
"get",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"jobId",
"=",
"self",
".",
"running_job_id",
",",
"location",
"=",
"location",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"else",
":",
"job",
"=",
"jobs",
".",
"get",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"jobId",
"=",
"self",
".",
"running_job_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"if",
"job",
"[",
"'status'",
"]",
"[",
"'state'",
"]",
"==",
"'DONE'",
":",
"keep_polling_job",
"=",
"False",
"# Check if job had errors.",
"if",
"'errorResult'",
"in",
"job",
"[",
"'status'",
"]",
":",
"raise",
"Exception",
"(",
"'BigQuery job failed. Final error was: {}. The job was: {}'",
".",
"format",
"(",
"job",
"[",
"'status'",
"]",
"[",
"'errorResult'",
"]",
",",
"job",
")",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Waiting for job to complete : %s, %s'",
",",
"self",
".",
"project_id",
",",
"self",
".",
"running_job_id",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"except",
"HttpError",
"as",
"err",
":",
"if",
"err",
".",
"resp",
".",
"status",
"in",
"[",
"500",
",",
"503",
"]",
":",
"self",
".",
"log",
".",
"info",
"(",
"'%s: Retryable error, waiting for job to complete: %s'",
",",
"err",
".",
"resp",
".",
"status",
",",
"self",
".",
"running_job_id",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"else",
":",
"raise",
"Exception",
"(",
"'BigQuery job status check failed. Final error was: {}'",
".",
"format",
"(",
"err",
".",
"resp",
".",
"status",
")",
")",
"return",
"self",
".",
"running_job_id"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.cancel_query
|
Cancel all started queries that have not yet completed
|
airflow/contrib/hooks/bigquery_hook.py
|
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute(num_retries=self.num_retries)
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
|
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute(num_retries=self.num_retries)
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute(num_retries=self.num_retries)
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
|
[
"Cancel",
"all",
"started",
"queries",
"that",
"have",
"not",
"yet",
"completed"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1298-L1339
|
[
"def",
"cancel_query",
"(",
"self",
")",
":",
"jobs",
"=",
"self",
".",
"service",
".",
"jobs",
"(",
")",
"if",
"(",
"self",
".",
"running_job_id",
"and",
"not",
"self",
".",
"poll_job_complete",
"(",
"self",
".",
"running_job_id",
")",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Attempting to cancel job : %s, %s'",
",",
"self",
".",
"project_id",
",",
"self",
".",
"running_job_id",
")",
"if",
"self",
".",
"location",
":",
"jobs",
".",
"cancel",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"jobId",
"=",
"self",
".",
"running_job_id",
",",
"location",
"=",
"self",
".",
"location",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"else",
":",
"jobs",
".",
"cancel",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"jobId",
"=",
"self",
".",
"running_job_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'No running BigQuery jobs to cancel.'",
")",
"return",
"# Wait for all the calls to cancel to finish",
"max_polling_attempts",
"=",
"12",
"polling_attempts",
"=",
"0",
"job_complete",
"=",
"False",
"while",
"polling_attempts",
"<",
"max_polling_attempts",
"and",
"not",
"job_complete",
":",
"polling_attempts",
"=",
"polling_attempts",
"+",
"1",
"job_complete",
"=",
"self",
".",
"poll_job_complete",
"(",
"self",
".",
"running_job_id",
")",
"if",
"job_complete",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Job successfully canceled: %s, %s'",
",",
"self",
".",
"project_id",
",",
"self",
".",
"running_job_id",
")",
"elif",
"polling_attempts",
"==",
"max_polling_attempts",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Stopping polling due to timeout. Job with id %s \"",
"\"has not completed cancel and may or may not finish.\"",
",",
"self",
".",
"running_job_id",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Waiting for canceled job with id %s to finish.'",
",",
"self",
".",
"running_job_id",
")",
"time",
".",
"sleep",
"(",
"5",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.get_schema
|
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
|
airflow/contrib/hooks/bigquery_hook.py
|
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute(num_retries=self.num_retries)
return tables_resource['schema']
|
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute(num_retries=self.num_retries)
return tables_resource['schema']
|
[
"Get",
"the",
"schema",
"for",
"a",
"given",
"datset",
".",
"table",
".",
"see",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"bigquery",
"/",
"docs",
"/",
"reference",
"/",
"v2",
"/",
"tables#resource"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1341-L1353
|
[
"def",
"get_schema",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
")",
":",
"tables_resource",
"=",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"get",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"return",
"tables_resource",
"[",
"'schema'",
"]"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.get_tabledata
|
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
|
airflow/contrib/hooks/bigquery_hook.py
|
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute(num_retries=self.num_retries))
|
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute(num_retries=self.num_retries))
|
[
"Get",
"the",
"data",
"of",
"a",
"given",
"dataset",
".",
"table",
"and",
"optionally",
"with",
"selected",
"columns",
".",
"see",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"bigquery",
"/",
"docs",
"/",
"reference",
"/",
"v2",
"/",
"tabledata",
"/",
"list"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1355-L1385
|
[
"def",
"get_tabledata",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
",",
"max_results",
"=",
"None",
",",
"selected_fields",
"=",
"None",
",",
"page_token",
"=",
"None",
",",
"start_index",
"=",
"None",
")",
":",
"optional_params",
"=",
"{",
"}",
"if",
"max_results",
":",
"optional_params",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"selected_fields",
":",
"optional_params",
"[",
"'selectedFields'",
"]",
"=",
"selected_fields",
"if",
"page_token",
":",
"optional_params",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"if",
"start_index",
":",
"optional_params",
"[",
"'startIndex'",
"]",
"=",
"start_index",
"return",
"(",
"self",
".",
"service",
".",
"tabledata",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
",",
"*",
"*",
"optional_params",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_table_delete
|
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute(num_retries=self.num_retries)
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
|
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute(num_retries=self.num_retries)
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
|
[
"Delete",
"an",
"existing",
"table",
"from",
"the",
"dataset",
";",
"If",
"the",
"table",
"does",
"not",
"exist",
"return",
"an",
"error",
"unless",
"ignore_if_missing",
"is",
"set",
"to",
"True",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1387-L1419
|
[
"def",
"run_table_delete",
"(",
"self",
",",
"deletion_dataset_table",
",",
"ignore_if_missing",
"=",
"False",
")",
":",
"deletion_project",
",",
"deletion_dataset",
",",
"deletion_table",
"=",
"_split_tablename",
"(",
"table_input",
"=",
"deletion_dataset_table",
",",
"default_project_id",
"=",
"self",
".",
"project_id",
")",
"try",
":",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"delete",
"(",
"projectId",
"=",
"deletion_project",
",",
"datasetId",
"=",
"deletion_dataset",
",",
"tableId",
"=",
"deletion_table",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Deleted table %s:%s.%s.'",
",",
"deletion_project",
",",
"deletion_dataset",
",",
"deletion_table",
")",
"except",
"HttpError",
":",
"if",
"not",
"ignore_if_missing",
":",
"raise",
"Exception",
"(",
"'Table deletion failed. Table does not exist.'",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Table does not exist. Skipping.'",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_table_upsert
|
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries)
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute(num_retries=self.num_retries)
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=self.num_retries)
|
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries)
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute(num_retries=self.num_retries)
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute(num_retries=self.num_retries)
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=self.num_retries)
|
[
"creates",
"a",
"new",
"empty",
"table",
"in",
"the",
"dataset",
";",
"If",
"the",
"table",
"already",
"exists",
"update",
"the",
"existing",
"table",
".",
"Since",
"BigQuery",
"does",
"not",
"natively",
"allow",
"table",
"upserts",
"this",
"is",
"not",
"an",
"atomic",
"operation",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1421-L1468
|
[
"def",
"run_table_upsert",
"(",
"self",
",",
"dataset_id",
",",
"table_resource",
",",
"project_id",
"=",
"None",
")",
":",
"# check to see if the table exists",
"table_id",
"=",
"table_resource",
"[",
"'tableReference'",
"]",
"[",
"'tableId'",
"]",
"project_id",
"=",
"project_id",
"if",
"project_id",
"is",
"not",
"None",
"else",
"self",
".",
"project_id",
"tables_list_resp",
"=",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"while",
"True",
":",
"for",
"table",
"in",
"tables_list_resp",
".",
"get",
"(",
"'tables'",
",",
"[",
"]",
")",
":",
"if",
"table",
"[",
"'tableReference'",
"]",
"[",
"'tableId'",
"]",
"==",
"table_id",
":",
"# found the table, do update",
"self",
".",
"log",
".",
"info",
"(",
"'Table %s:%s.%s exists, updating.'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"return",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"update",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"tableId",
"=",
"table_id",
",",
"body",
"=",
"table_resource",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"# If there is a next page, we need to check the next page.",
"if",
"'nextPageToken'",
"in",
"tables_list_resp",
":",
"tables_list_resp",
"=",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"pageToken",
"=",
"tables_list_resp",
"[",
"'nextPageToken'",
"]",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"# If there is no next page, then the table doesn't exist.",
"else",
":",
"# do insert",
"self",
".",
"log",
".",
"info",
"(",
"'Table %s:%s.%s does not exist. creating.'",
",",
"project_id",
",",
"dataset_id",
",",
"table_id",
")",
"return",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"insert",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"body",
"=",
"table_resource",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.run_grant_dataset_view_access
|
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
|
airflow/contrib/hooks/bigquery_hook.py
|
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute(num_retries=self.num_retries)
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute(num_retries=self.num_retries)
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset)
return source_dataset_resource
|
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute(num_retries=self.num_retries)
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute(num_retries=self.num_retries)
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset)
return source_dataset_resource
|
[
"Grant",
"authorized",
"view",
"access",
"of",
"a",
"dataset",
"to",
"a",
"view",
"table",
".",
"If",
"this",
"view",
"has",
"already",
"been",
"granted",
"access",
"to",
"the",
"dataset",
"do",
"nothing",
".",
"This",
"method",
"is",
"not",
"atomic",
".",
"Running",
"it",
"may",
"clobber",
"a",
"simultaneous",
"update",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1470-L1531
|
[
"def",
"run_grant_dataset_view_access",
"(",
"self",
",",
"source_dataset",
",",
"view_dataset",
",",
"view_table",
",",
"source_project",
"=",
"None",
",",
"view_project",
"=",
"None",
")",
":",
"# Apply default values to projects",
"source_project",
"=",
"source_project",
"if",
"source_project",
"else",
"self",
".",
"project_id",
"view_project",
"=",
"view_project",
"if",
"view_project",
"else",
"self",
".",
"project_id",
"# we don't want to clobber any existing accesses, so we have to get",
"# info on the dataset before we can add view access",
"source_dataset_resource",
"=",
"self",
".",
"service",
".",
"datasets",
"(",
")",
".",
"get",
"(",
"projectId",
"=",
"source_project",
",",
"datasetId",
"=",
"source_dataset",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"access",
"=",
"source_dataset_resource",
"[",
"'access'",
"]",
"if",
"'access'",
"in",
"source_dataset_resource",
"else",
"[",
"]",
"view_access",
"=",
"{",
"'view'",
":",
"{",
"'projectId'",
":",
"view_project",
",",
"'datasetId'",
":",
"view_dataset",
",",
"'tableId'",
":",
"view_table",
"}",
"}",
"# check to see if the view we want to add already exists.",
"if",
"view_access",
"not",
"in",
"access",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Granting table %s:%s.%s authorized view access to %s:%s dataset.'",
",",
"view_project",
",",
"view_dataset",
",",
"view_table",
",",
"source_project",
",",
"source_dataset",
")",
"access",
".",
"append",
"(",
"view_access",
")",
"return",
"self",
".",
"service",
".",
"datasets",
"(",
")",
".",
"patch",
"(",
"projectId",
"=",
"source_project",
",",
"datasetId",
"=",
"source_dataset",
",",
"body",
"=",
"{",
"'access'",
":",
"access",
"}",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"else",
":",
"# if view is already in access, do nothing.",
"self",
".",
"log",
".",
"info",
"(",
"'Table %s:%s.%s already has authorized view access to %s:%s dataset.'",
",",
"view_project",
",",
"view_dataset",
",",
"view_table",
",",
"source_project",
",",
"source_dataset",
")",
"return",
"source_dataset_resource"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BigQueryBaseCursor.create_empty_dataset
|
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
|
airflow/contrib/hooks/bigquery_hook.py
|
def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
"""
if dataset_reference:
_validate_value('dataset_reference', dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference['datasetReference']:
if param_default and not param:
self.log.info(
"%s was not specified. Will be used default value %s.",
param_name, param_default
)
param = param_default
dataset_reference['datasetReference'].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference['datasetReference'], 'dataset_reference')
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info('Creating Dataset: %s in project: %s ', dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute(num_retries=self.num_retries)
self.log.info('Dataset created successfully: In project %s '
'Dataset %s', dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
"""
if dataset_reference:
_validate_value('dataset_reference', dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference['datasetReference']:
if param_default and not param:
self.log.info(
"%s was not specified. Will be used default value %s.",
param_name, param_default
)
param = param_default
dataset_reference['datasetReference'].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference['datasetReference'], 'dataset_reference')
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info('Creating Dataset: %s in project: %s ', dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute(num_retries=self.num_retries)
self.log.info('Dataset created successfully: In project %s '
'Dataset %s', dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
|
[
"Create",
"a",
"new",
"empty",
"dataset",
":",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"bigquery",
"/",
"docs",
"/",
"reference",
"/",
"rest",
"/",
"v2",
"/",
"datasets",
"/",
"insert"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1533-L1598
|
[
"def",
"create_empty_dataset",
"(",
"self",
",",
"dataset_id",
"=",
"\"\"",
",",
"project_id",
"=",
"\"\"",
",",
"dataset_reference",
"=",
"None",
")",
":",
"if",
"dataset_reference",
":",
"_validate_value",
"(",
"'dataset_reference'",
",",
"dataset_reference",
",",
"dict",
")",
"else",
":",
"dataset_reference",
"=",
"{",
"}",
"if",
"\"datasetReference\"",
"not",
"in",
"dataset_reference",
":",
"dataset_reference",
"[",
"\"datasetReference\"",
"]",
"=",
"{",
"}",
"if",
"not",
"dataset_reference",
"[",
"\"datasetReference\"",
"]",
".",
"get",
"(",
"\"datasetId\"",
")",
"and",
"not",
"dataset_id",
":",
"raise",
"ValueError",
"(",
"\"{} not provided datasetId. Impossible to create dataset\"",
")",
"dataset_required_params",
"=",
"[",
"(",
"dataset_id",
",",
"\"datasetId\"",
",",
"\"\"",
")",
",",
"(",
"project_id",
",",
"\"projectId\"",
",",
"self",
".",
"project_id",
")",
"]",
"for",
"param_tuple",
"in",
"dataset_required_params",
":",
"param",
",",
"param_name",
",",
"param_default",
"=",
"param_tuple",
"if",
"param_name",
"not",
"in",
"dataset_reference",
"[",
"'datasetReference'",
"]",
":",
"if",
"param_default",
"and",
"not",
"param",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"%s was not specified. Will be used default value %s.\"",
",",
"param_name",
",",
"param_default",
")",
"param",
"=",
"param_default",
"dataset_reference",
"[",
"'datasetReference'",
"]",
".",
"update",
"(",
"{",
"param_name",
":",
"param",
"}",
")",
"elif",
"param",
":",
"_api_resource_configs_duplication_check",
"(",
"param_name",
",",
"param",
",",
"dataset_reference",
"[",
"'datasetReference'",
"]",
",",
"'dataset_reference'",
")",
"dataset_id",
"=",
"dataset_reference",
".",
"get",
"(",
"\"datasetReference\"",
")",
".",
"get",
"(",
"\"datasetId\"",
")",
"dataset_project_id",
"=",
"dataset_reference",
".",
"get",
"(",
"\"datasetReference\"",
")",
".",
"get",
"(",
"\"projectId\"",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Creating Dataset: %s in project: %s '",
",",
"dataset_id",
",",
"dataset_project_id",
")",
"try",
":",
"self",
".",
"service",
".",
"datasets",
"(",
")",
".",
"insert",
"(",
"projectId",
"=",
"dataset_project_id",
",",
"body",
"=",
"dataset_reference",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Dataset created successfully: In project %s '",
"'Dataset %s'",
",",
"dataset_project_id",
",",
"dataset_id",
")",
"except",
"HttpError",
"as",
"err",
":",
"raise",
"AirflowException",
"(",
"'BigQuery job failed. Error was: {}'",
".",
"format",
"(",
"err",
".",
"content",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.