INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
update secrets will update metadata needed for pull and search
|
def _update_secrets(self):
'''update secrets will update metadata needed for pull and search
'''
self.token = self._required_get_and_update('SREGISTRY_GITLAB_TOKEN')
self.headers["Private-Token"] = self.token
|
since the user needs a job id and other parameters save this for them.
|
def _get_metadata(self):
'''since the user needs a job id and other parameters, save this
for them.
'''
metadata = {'SREGISTRY_GITLAB_FOLDER': self.artifacts,
'api_base': self.api_base,
'SREGISTRY_GITLAB_BASE': self.base,
'SREGISTRY_GITLAB_JOB': self.job }
return metadata
|
starting with an image string in either of the following formats: job_id|collection job_id|collection|job_name Parse the job_name job_id and collection uri from it. If the user provides the first option we use the job_name set by the client ( default is build ). Parameters ========== image: the string to parse with values separated by | retry: the client can call itself recursively once providing the default job_name if the user doesn t.
|
def _parse_image_name(self, image, retry=True):
'''starting with an image string in either of the following formats:
job_id|collection
job_id|collection|job_name
Parse the job_name, job_id, and collection uri from it. If the user
provides the first option, we use the job_name set by the client
(default is build).
Parameters
==========
image: the string to parse, with values separated by |
retry: the client can call itself recursively once, providing the
default job_name if the user doesn't.
'''
try:
job_id, collection, job_name = image.split(',')
except:
# Retry and add job_name
if retry:
return self._parse_image_name("%s,%s" %(image, self.job),
retry=False)
# Or fail
bot.exit('''Malformed image string! Please provide:
job_id,collection (or)
job_id,collection,job_name''')
return job_id, collection, job_name
|
get all settings either for a particular client if a name is provided or across clients.
|
def get_settings(self, client_name=None):
'''get all settings, either for a particular client if a name is provided,
or across clients.
Parameters
==========
client_name: the client name to return settings for (optional)
'''
settings = read_client_secrets()
if client_name is not None and client_name in settings:
return settings[client_name]
return settings
|
return a setting from the environment ( first priority ) and then secrets ( second priority ) if one can be found. If not return None.
|
def get_setting(self, name, default=None):
'''return a setting from the environment (first priority) and then
secrets (second priority) if one can be found. If not, return None.
Parameters
==========
name: they key (index) of the setting to look up
default: (optional) if not found, return default instead.
'''
# First priority is the environment
setting = os.environ.get(name)
# Second priority is the secrets file
if setting is None:
secrets = read_client_secrets()
if self.client_name in secrets:
secrets = secrets[self.client_name]
if name in secrets:
setting = secrets[name]
if setting is None and default is not None:
setting = default
return setting
|
Look for a setting in the environment ( first priority ) and then the settings file ( second ). If something is found the settings file is updated. The order of operations works as follows:
|
def get_and_update_setting(self, name, default=None):
'''Look for a setting in the environment (first priority) and then
the settings file (second). If something is found, the settings
file is updated. The order of operations works as follows:
1. The .sregistry settings file is used as a cache for the variable
2. the environment variable always takes priority to cache, and if
found, will update the cache.
3. If the variable is not found and the cache is set, we are good
5. If the variable is not found and the cache isn't set, return
default (default is None)
So the user of the function can assume a return of None equates to
not set anywhere, and take the appropriate action.
'''
setting = self._get_setting(name)
if setting is None and default is not None:
setting = default
# If the setting is found, update the client secrets
if setting is not None:
updates = {name : setting}
update_client_secrets(backend=self.client_name,
updates=updates)
return setting
|
a wrapper to get_and_update but if not successful will print an error and exit.
|
def required_get_and_update(self, name, default=None):
'''a wrapper to get_and_update, but if not successful, will print an
error and exit.
'''
setting = self._get_and_update_setting(name, default=None)
if setting in [None, ""]:
bot.exit('You must export %s' % name)
return setting
|
Just update a setting doesn t need to be returned.
|
def update_setting(self, name, value):
'''Just update a setting, doesn't need to be returned.
'''
if value is not None:
updates = {name : value}
update_client_secrets(backend=self.client_name,
updates=updates)
|
use a parsed names dictionary from get_image_name ( above ) to return the path in storage based on the user s preferences
|
def get_storage_name(self, names, remove_dir=False):
'''use a parsed names dictionary from get_image_name (above) to return
the path in storage based on the user's preferences
Parameters
==========
names: the output from parse_image_name
'''
storage_folder = os.path.dirname(names['storage'])
# If the client doesn't have a database, default to PWD
if not hasattr(self, 'storage'):
return os.path.basename(names['storage'])
storage_folder = "%s/%s" %(self.storage, storage_folder)
mkdir_p(storage_folder)
file_name = names['storage'].replace('/','-')
storage_path = "%s/%s" %(self.storage, file_name)
if remove_dir is True:
return file_name
return storage_path
|
Authorize a client based on encrypting the payload with the client token which should be matched on the receiving server
|
def authorize(self, names, payload=None, request_type="push"):
'''Authorize a client based on encrypting the payload with the client
token, which should be matched on the receiving server'''
if self.secrets is not None:
if "registry" in self.secrets:
# Use the payload to generate a digest push|collection|name|tag|user
timestamp = generate_timestamp()
credential = generate_credential(self.secrets['registry']['username'])
credential = "%s/%s/%s" %(request_type,credential,timestamp)
if payload is None:
payload = "%s|%s|%s|%s|%s|" %(request_type,
names['collection'],
timestamp,
names['image'],
names['tag'])
signature = generate_signature(payload,self.secrets['registry']['token'])
return "SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s" %(credential,signature)
|
push an image to your Storage. If the collection doesn t exist it is created. Parameters ========== path: should correspond to an absolute image path ( or derive it ) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker
|
def push(self, path, name, tag=None):
'''push an image to your Storage. If the collection doesn't exist,
it is created.
Parameters
==========
path: should correspond to an absolute image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Parse image names
names = parse_image_name(remove_uri(name), tag=tag)
# Get the size of the file
file_size = os.path.getsize(path)
chunk_size = 4 * 1024 * 1024
storage_path = "/%s" %names['storage']
# Create / get the collection
collection = self._get_or_create_collection(names['collection'])
# The image name is the name followed by tag
image_name = os.path.basename(names['storage'])
# prepare the progress bar
progress = 0
bot.show_progress(progress, file_size, length=35)
# Put the (actual) container into the collection
with open(path, 'rb') as F:
self.conn.put_object(names['collection'], image_name,
contents= F.read(),
content_type='application/octet-stream')
# Finish up
bot.show_progress(iteration=file_size,
total=file_size,
length=35,
carriage_return=True)
# Newline to finish download
sys.stdout.write('\n')
|
trigger a build on Google Cloud ( storage then compute ) given a name recipe and Github URI where the recipe can be found. Parameters ========== name: should be the complete uri that the user has requested to push. commit: a commit to use not required and can be parsed from URI repo: should correspond to a Github URL or ( if undefined ) used local repo. tag: a user specified tag to take preference over tag in name config: The local config file to use. If the file doesn t exist then we attempt looking up the config based on the name. recipe: If defined limit builder to build a single recipe
|
def build(self, repo,
config=None,
name=None,
commit=None,
tag="latest",
recipe="Singularity",
preview=False):
'''trigger a build on Google Cloud (storage then compute) given a name
recipe, and Github URI where the recipe can be found.
Parameters
==========
name: should be the complete uri that the user has requested to push.
commit: a commit to use, not required, and can be parsed from URI
repo: should correspond to a Github URL or (if undefined) used local repo.
tag: a user specified tag, to take preference over tag in name
config: The local config file to use. If the file doesn't exist, then
we attempt looking up the config based on the name.
recipe: If defined, limit builder to build a single recipe
'''
bot.debug("BUILD %s" % repo)
# Ensure that repo exists (200 response)
if not self._healthy(repo):
sys.exit(1)
config = self._load_build_config(config)
# If name not provided, parse name based on repository
if name is None:
name = '/'.join(repo.split('/')[-2:])
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name))
# First priority - user has provided a tag
names['tag'] = tag or names['tag']
# If we still don't have custom tag, check the recipe
if names['tag'] == "latest" and recipe != "Singularity":
tag = get_recipe_tag(recipe)
names = parse_image_name(remove_uri(name), tag=tag)
# The commit is the version (after the @)
commit = commit or names['version']
# Setup the build
config = self._setup_build(name=names['url'], recipe=recipe,
repo=repo, config=config,
tag=tag, commit=commit)
# The user only wants to preview the configuration
if preview is True:
return config
# Otherwise, run the build!
return self._run_build(config)
|
list builders or instances for the project. They should start with sregistry - builder
|
def list_builders(self, project=None, zone='us-west1-a'):
'''list builders, or instances for the project. They should start with
sregistry-builder
Parameters
==========
project: specify a project, will default to environment first
zone: the zone to use, defaults to us-west1-a if environment not set
'''
builders = []
instances = self._get_instances(project, zone)
for instance in instances['items']:
builders.append([instance['name'], instance['status']])
bot.info("[google-compute] Found %s instances" %(len(builders)))
bot.table(builders)
bot.newline()
|
list templates in the builder bundle library. If a name is provided look it up
|
def list_templates(self, name=None):
'''list templates in the builder bundle library. If a name is provided,
look it up
Parameters
==========
name: the name of a template to look up
'''
configs = self._get_templates()
rows = []
# DETAIL: The user wants to retrieve a particular configuration
if name:
matches = self._load_templates(name)
bot.debug('Found %s matches for %s' %(len(matches), name))
for match in matches:
print(json.dumps(match, indent=4, sort_keys=True))
# LISTING: If we don't have a specific name, just show all
else:
for config in configs['data']:
rows.append([config['name']])
bot.table(rows)
|
list templates in the builder bundle library. If a name is provided look it up
|
def get_templates(self):
'''list templates in the builder bundle library. If a name is provided,
look it up
'''
base = 'https://singularityhub.github.io/builders'
base = self._get_and_update_setting('SREGISTRY_BUILDER_REPO', base)
base = "%s/configs.json" %base
return self._get(base)
|
load a particular template based on a name. We look for a name IN data so the query name can be a partial string of the full name.
|
def load_templates(self, name):
'''load a particular template based on a name. We look for a name IN data,
so the query name can be a partial string of the full name.
Parameters
==========
name: the name of a template to look up
'''
configs = self._get_templates()
templates = []
# The user wants to retrieve a particular configuration
matches = [x for x in configs['data'] if name in x['name']]
if len(matches) > 0:
for match in matches:
response = self._get(match['id'])
templates.append(response)
return templates
bot.info('No matches found for %s' %name)
|
get instances will return the ( unparsed ) list of instances for functions for the user. This is primarily used by get_builders to print a list of builder instances.
|
def get_instances(self, project=None, zone='us-west1-a'):
'''get instances will return the (unparsed) list of instances, for
functions for the user. This is primarily used by get_builders
to print a list of builder instances.
Parameters
==========
project: specify a project, will default to environment first
zone: the zone to use, defaults to us-west1-a if environment not set
'''
project = self._get_project(project)
zone = self._get_zone(zone)
return self._compute_service.instances().list(project=project,
zone=zone).execute()
|
get the ip_address of an inserted instance. Will try three times with delay to give the instance time to start up.
|
def get_ipaddress(self, name, retries=3, delay=3):
'''get the ip_address of an inserted instance. Will try three times with
delay to give the instance time to start up.
Parameters
==========
name: the name of the instance to get the ip address for.
retries: the number of retries before giving up
delay: the delay between retry
Note from @vsoch: this function is pretty nasty.
'''
for rr in range(retries):
# Retrieve list of instances
instances = self._get_instances()
for instance in instances['items']:
if instance['name'] == name:
# Iterate through network interfaces
for network in instance['networkInterfaces']:
if network['name'] == 'nic0':
# Access configurations
for subnet in network['accessConfigs']:
if subnet['name'] == 'External NAT':
if 'natIP' in subnet:
return subnet['natIP']
sleep(delay)
bot.warning('Did not find IP address, check Cloud Console!')
|
load a google compute config meaning that we have the following cases:
|
def load_build_config(self, config=None):
'''load a google compute config, meaning that we have the following cases:
1. the user has not provided a config file directly, we look in env.
2. the environment is not set, so we use a reasonable default
3. if the final string is not found as a file, we look for it in library
4. we load the library name, or the user file, else error
Parameters
==========
config: the config file the user has provided, or the library URI
'''
# If the config is already a dictionary, it's loaded
if isinstance(config, dict):
bot.debug('Config is already loaded.')
return config
# if the config is not defined, look in environment, then choose a default
if config is None:
config = self._get_and_update_setting('SREGISTRY_COMPUTE_CONFIG',
'google/compute/ubuntu/securebuild-2.4.3')
# If the config is a file, we read it
elif os.path.exists(config):
return read_json(config)
# otherwise, try to look it up in library
configs = self._load_templates(config)
if configs is None:
bot.error('%s is not a valid config. %s' %name)
sys.exit(1)
bot.info('Found config %s in library!' %config)
config = configs[0]
return config
|
setup the build based on the selected configuration file meaning producing the configuration file filled in based on the user s input Parameters ========== config: the complete configuration file provided by the client template: an optional custom start script to use tag: a user specified tag for the build derived from uri or manual recipe: a recipe if defined overrides recipe set in config. commit: a commit to check out if needed start_script: the start script to use if not defined defaults to apt ( or manager ) base in main/ templates/ build
|
def setup_build(self, name, repo, config, tag=None, commit=None,
recipe="Singularity", startup_script=None):
'''setup the build based on the selected configuration file, meaning
producing the configuration file filled in based on the user's input
Parameters
==========
config: the complete configuration file provided by the client
template: an optional custom start script to use
tag: a user specified tag for the build, derived from uri or manual
recipe: a recipe, if defined, overrides recipe set in config.
commit: a commit to check out, if needed
start_script: the start script to use, if not defined
defaults to apt (or manager) base in main/templates/build
'''
manager = self._get_and_update_setting('SREGISTRY_BUILDER_MANAGER', 'apt')
startup_script = get_build_template(startup_script, manager)
# Read in the config to know what we can edit
config = self._load_build_config(config)
if not config:
bot.error('Cannot find config, check path or URI.')
sys.exit(1)
# Ensure that the builder config is intended for the client
self._client_tagged(config['data']['tags'])
# Compute settings that are parsed into runscript via metadata
defaults = config['data']['metadata']
selfLink = config['links']['self']
# Make sure the builder repository and folder is passed forward
builder_repo = config['data']['repo']
builder_bundle = config['data']['path']
builder_id = config['data']['id']
config = config['data']['config']
# Config settings from the environment, fall back to defaults
image_project = defaults.get('GOOGLE_COMPUTE_PROJECT', 'debian-cloud')
image_family = defaults.get('GOOGLE_COMPUTE_IMAGE_FAMILY', 'debian-8')
# Generate names, description is for repo, name is random
instance_name = "%s-builder %s" %(name.replace('/','-'), selfLink)
robot_name = RobotNamer().generate()
project = self._get_project()
zone = self._get_zone()
# Machine Type
machine_type = defaults.get('SREGISTRY_BUILDER_machine_type', 'n1-standard-1')
machine_type = "zones/%s/machineTypes/%s" %(zone, machine_type)
# Disk Size
disk_size = defaults.get('SREGISTRY_BUILDER_disk_size', '100')
# Get the image type
image_response = self._compute_service.images().getFromFamily(
project=image_project,
family=image_family).execute()
source_disk_image = image_response['selfLink']
storage_bucket = self._bucket_name
# Add the machine parameters to the config
config['name'] = robot_name
config['description'] = instance_name
config['machineType'] = machine_type
config['disks'].append({
"autoDelete": True,
"boot": True,
"initializeParams": { 'sourceImage': source_disk_image,
'diskSizeGb': disk_size }
})
# Metadata base
metadata = {'items':
[{ 'key': 'startup-script',
'value': startup_script },
# Storage Settings from Host
{ 'key':'SREGISTRY_BUILDER_STORAGE_BUCKET',
'value':self._bucket_name }]}
# Runtime variables take priority over defaults from config
# and so here we update the defaults with runtime
# ([defaults], [config-key], [runtime-setting])
# User Repository
defaults = setconfig(defaults, 'SREGISTRY_USER_REPO', repo)
# Container Namespace (without tag/version)
defaults = setconfig(defaults, 'SREGISTRY_CONTAINER_NAME', name)
# User Repository Commit
defaults = setconfig(defaults, 'SREGISTRY_USER_COMMIT', commit)
# User Repository Branch
defaults = setconfig(defaults, 'SREGISTRY_USER_BRANCH', "master")
# User Repository Tag
defaults = setconfig(defaults, 'SREGISTRY_USER_TAG', tag)
# Builder repository url
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_REPO', builder_repo)
# Builder commit
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_COMMIT')
# Builder default runscript
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_RUNSCRIPT', "run.sh")
# Builder repository url
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_BRANCH', "master")
# Builder id in repository
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_ID', builder_id)
# Builder repository relative folder path
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_BUNDLE', builder_bundle)
# Number of extra hours to debug
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_DEBUGHOURS', "4")
# Hours to kill running job
defaults = setconfig(defaults, 'SREGISTRY_BUILDER_KILLHOURS', "10")
# Recipe set at runtime
defaults = setconfig(defaults, 'SINGULARITY_RECIPE', recipe)
# Branch of Singularity to install
defaults = setconfig(defaults, 'SINGULARITY_BRANCH')
# Singularity commit to use (if needed)
defaults = setconfig(defaults, 'SINGULARITY_COMMIT')
# Singularity Repo to Use
defaults = setconfig(defaults,'SINGULARITY_REPO',
'https://github.com/cclerget/singularity.git')
# Update metadata config object
seen = ['SREGISTRY_BUILDER_STORAGE_BUCKET', 'startup-script']
for key, value in defaults.items():
# This also appends empty values, they are meaningful
if value not in seen:
entry = { "key": key, 'value': value }
metadata['items'].append(entry)
seen.append(key)
config['metadata'] = metadata
return config
|
setconfig will update a lookup to give priority based on the following: 1. If both values are None we set the value to None 2. If the currently set ( the config. json ) is set but not runtime use config 3. If the runtime is set but not config. json we use runtime 4. If both are set we use runtime
|
def setconfig(lookup, key, value=None):
'''setconfig will update a lookup to give priority based on the following:
1. If both values are None, we set the value to None
2. If the currently set (the config.json) is set but not runtime, use config
3. If the runtime is set but not config.json, we use runtime
4. If both are set, we use runtime
'''
lookup[key] = value or lookup.get(key)
return lookup
|
run a build meaning inserting an instance. Retry if there is failure
|
def run_build(self, config):
'''run a build, meaning inserting an instance. Retry if there is failure
Parameters
==========
config: the configuration dictionary generated by setup_build
'''
project = self._get_project()
zone = self._get_zone()
bot.custom(prefix='INSTANCE', message=config['name'], color="CYAN")
bot.info(config['description'])
response = self._compute_service.instances().insert(project=project,
zone=zone,
body=config).execute()
# Direct the user to the web portal with log
ipaddress = self._get_ipaddress(config['name'])
bot.info('Robot Logger: http://%s' %ipaddress)
bot.info('Allow a few minutes for web server install, beepboop!')
return response
|
a show all search that doesn t require a query
|
def search_all(self):
'''a "show all" search that doesn't require a query'''
results = set()
# Here we get names of collections, and then look up containers
for container in self.conn.get_account()[1]:
# The result here is just the name
for result in self.conn.get_container(container['name'])[1]:
results.add('%s/%s' %(container['name'], result['name']))
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
bot.info("Collections")
bot.table([[x] for x in list(results)])
return list(results)
|
search for a specific container. This function would likely be similar to the above but have different filter criteria from the user ( based on the query )
|
def container_query(self, query):
'''search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
'''
results = set()
query = remove_uri(query)
# Here we get names of collections, and then look up containers
for container in self.conn.get_account()[1]:
# The result here is just the name
for result in self.conn.get_container(container['name'])[1]:
if query in collection['name']:
results.add('%s/%s' %(container['name'], result['name']))
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
bot.info("Collections")
bot.table([list(results)])
return list(results)
|
return a list of containers determined by finding the metadata field type with value container. We alert the user to no containers if results is empty and exit
|
def list_containers(self):
'''return a list of containers, determined by finding the metadata field
"type" with value "container." We alert the user to no containers
if results is empty, and exit
{'metadata': {'items':
[
{'key': 'type', 'value': 'container'}, ...
]
}
}
'''
results = []
for image in self._bucket.list_blobs():
if image.metadata is not None:
if "type" in image.metadata:
if image.metadata['type'] == "container":
results.append(image)
if len(results) == 0:
bot.info("No containers found, based on metadata type:container")
return results
|
a list all search that doesn t require a query. Here we return to the user all objects that have custom metadata value of container
|
def search_all(self):
'''a "list all" search that doesn't require a query. Here we return to
the user all objects that have custom metadata value of "container"
IMPORTANT: the upload function adds this metadata. For a container to
be found by the client, it must have the type as container in metadata.
'''
results = self._list_containers()
bot.info("[gs://%s] Containers" %self._bucket_name)
rows = []
for i in results:
size = round(i.size / (1024*1024.0))
size = ("%s MB" %size).rjust(10)
rows.append([size, i.metadata['name']])
bot.table(rows)
return rows
|
search for a specific container. This function would likely be similar to the above but have different filter criteria from the user ( based on the query )
|
def container_query(self, query, quiet=False):
'''search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
'''
results = self._list_containers()
matches = []
for result in results:
for key,val in result.metadata.items():
if query in val and result not in matches:
matches.append(result)
if not quiet:
bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches)))
for image in matches:
size = round(image.size / (1024*1024.0))
bot.custom(prefix=image.name, color="CYAN")
bot.custom(prefix='id: ', message=image.id)
bot.custom(prefix='uri: ', message=image.metadata['name'])
bot.custom(prefix='updated:', message=image.updated)
bot.custom(prefix='size: ', message=' %s MB' %(size))
bot.custom(prefix='md5: ', message=image.md5_hash)
if "public_url" in image.metadata:
public_url = image.metadata['public_url']
bot.custom(prefix='url: ', message=public_url)
bot.newline()
return matches
|
a show all search that doesn t require a query
|
def search_all(self):
'''a "show all" search that doesn't require a query'''
results = []
# Parse through folders (collections):
for entry in self.dbx.files_list_folder('').entries:
# Parse through containers
for item in self.dbx.files_list_folder(entry.path_lower).entries:
name = item.name.replace('.simg','')
results.append([ "%s/%s" % (entry.name, name) ])
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
bot.info("Collections")
bot.table(results)
return results
|
the list command corresponds with listing images for an external resource. This is different from listing images that are local to the database which should be done with images
|
def main(args,parser,subparser):
'''the list command corresponds with listing images for an external
resource. This is different from listing images that are local to the
database, which should be done with "images"
'''
from sregistry.main import get_client
cli = get_client(quiet=args.quiet)
for query in args.query:
if query in ['','*']:
query = None
cli.ls(query=query)
|
pull an image from google storage based on the identifier Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
|
def pull(self, images, file_name=None, save=True, **kwargs):
'''pull an image from google storage, based on the identifier
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
if not isinstance(images,list):
images = [images]
bot.debug('Execution of PULL for %s images' %len(images))
# If used internally we want to return a list to the user.
finished = []
for image in images:
q = parse_image_name(remove_uri(image))
# Use container search to find the container based on uri
bot.info('Searching for %s in gs://%s' %(q['tag_uri'],self._bucket_name))
matches = self._container_query(q['tag_uri'], quiet=True)
if len(matches) == 0:
bot.info('No matching containers found.')
sys.exit(0)
# If the user didn't provide a file, make one based on the names
if file_name is None:
file_name = q['storage_version'].replace('/','-')
# We give the first match, the uri should be unique and known
image = matches[0]
image_file = self.download(url=image.media_link,
file_name=file_name,
show_progress=True)
# If the user is saving to local storage, you need to assumble the uri
# here in the expected format <collection>/<namespace>:<tag>@<version>
if save is True:
image_uri = q['tag_uri']
# Update metadata with selfLink
metadata = image.metadata
# Rename public URL to URL so it's found by add client
if "public_url" in metadata:
metadata['url'] = metadata['public_url']
metadata['selfLink'] = image.self_link
container = self.add(image_path=image_file,
image_uri=image_uri,
metadata=metadata,
url=image.media_link)
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' % image_file)
bot.custom(prefix="Success!", message=image_file)
finished.append(image_file)
if len(finished) == 1:
finished = finished[0]
return finished
|
sharing an image means sending a remote share from an image you control to a contact usually an email.
|
def main(args, parser, subparser):
'''sharing an image means sending a remote share from an image you
control to a contact, usually an email.
'''
from sregistry.main import get_client
images = args.image
if not isinstance(images,list):
images = [images]
for image in images:
print(image)
# Detect any uri, and refresh client if necessary
cli = get_client(image, quiet=args.quiet)
cli.announce(args.command)
cli.share(image, share_to=args.share_to)
|
initialize the database with the default database path or custom of
|
def init_db(self, db_path):
'''initialize the database, with the default database path or custom of
the format sqlite:////scif/data/expfactory.db
The custom path can be set with the environment variable SREGISTRY_DATABASE
when a user creates the client, we must initialize this db
the database should use the .singularity cache folder to cache
layers and images, and .singularity/sregistry.db as a database
'''
# Database Setup, use default if uri not provided
self.database = 'sqlite:///%s' % db_path
self.storage = SREGISTRY_STORAGE
bot.debug("Database located at %s" % self.database)
self.engine = create_engine(self.database, convert_unicode=True)
self.session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
Base.query = self.session.query_property()
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
Base.metadata.create_all(bind=self.engine)
self.Base = Base
|
generate a uri on the fly from database parameters if one is not saved with the initial model ( it should be but might not be possible )
|
def get_uri(self):
'''generate a uri on the fly from database parameters if one is not
saved with the initial model (it should be, but might not be possible)
'''
uri = "%s/%s:%s" %(self.collection.name, self.name, self.tag)
if self.version not in [None,'']:
uri = "%s@%s" %(uri, self.version)
return uri
|
get default build template.
|
def get_build_template():
'''get default build template.
'''
base = get_installdir()
name = "%s/main/templates/build/singularity-cloudbuild.json" % base
if os.path.exists(name):
bot.debug("Found template %s" %name)
return read_json(name)
bot.warning("Template %s not found." % name)
|
query will show images determined by the extension of img or simg.
|
def search(self, query=None, args=None):
'''query will show images determined by the extension of img
or simg.
Parameters
==========
query: the container name (path) or uri to search for
args.endpoint: can be an endpoint id and optional path, e.g.:
--endpoint 6881ae2e-db26-11e5-9772-22000b9da45e:.singularity'
--endpoint 6881ae2e-db26-11e5-9772-22000b9da45e'
if not defined, we show the user endpoints to choose from
Usage
=====
If endpoint is defined with a query, then we search the given endpoint
for a container of interested (designated by ending in .img or .simg
If no endpoint is provided but instead just a query, we use the query
to search endpoints.
'''
# No query is defined
if query is None:
# Option 1: No query or endpoints lists all shared and personal
if args.endpoint is None:
bot.info('Listing shared endpoints. Add query to expand search.')
return self._list_endpoints()
# Option 2: An endpoint without query will just list containers there
else:
return self._list_endpoint(args.endpoint)
# Option 3: A query without an endpoint will search endpoints for it
if args.endpoint is None:
bot.info('You must specify an endpoint id to query!')
return self._list_endpoints(query)
# Option 4: A query with an endpoint will search the endpoint for pattern
return self._list_endpoint(endpoint=args.endpoint,
query=query)
|
list all endpoints providing a list of endpoints to the user to better filter the search. This function takes no arguments as the user has not provided an endpoint id or query.
|
def list_endpoints(self, query=None):
'''list all endpoints, providing a list of endpoints to the user to
better filter the search. This function takes no arguments,
as the user has not provided an endpoint id or query.
'''
bot.info('Please select an endpoint id to query from')
endpoints = self._get_endpoints(query)
# Iterate through endpoints to provide user a list
bot.custom(prefix="Globus", message="Endpoints", color="CYAN")
rows = []
for kind,eps in endpoints.items():
for epid,epmeta in eps.items():
rows.append([epid, '[%s]' %kind, epmeta['name']])
bot.table(rows)
return rows
|
An endpoint is required here to list files within. Optionally we can take a path relative to the endpoint root.
|
def list_endpoint(self, endpoint, query=None):
'''An endpoint is required here to list files within. Optionally, we can
take a path relative to the endpoint root.
Parameters
==========
endpoint: a single endpoint ID or an endpoint id and relative path.
If no path is provided, we use '', which defaults to scratch.
query: if defined, limit files to those that have query match
'''
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
# Separate endpoint id from the desired path
endpoint, path = self._parse_endpoint_name(endpoint)
# Get a list of files at endpoint, under specific path
try:
result = self.transfer_client.operation_ls(endpoint, path=path)
except TransferAPIError as err:
# Tell the user what went wrong!
bot.custom(prefix='ERROR', message=err, color='RED')
sys.exit(1)
rows = []
for filey in result:
# Highlight container contenders with purple
name = filey['name']
if query is None or query in name:
if name.endswith('img'):
name = bot.addColor('PURPLE',name)
rows.append([filey['type'],
filey['permissions'],
str(filey['size']),
name ])
if len(rows) > 0:
rows = [["type","[perm]","[size]","[name]"]] + rows
bot.custom(prefix="Endpoint Listing %s" %path, message='', color="CYAN")
bot.table(rows)
else:
bot.info('No content was found at the selected endpoint.')
return rows
|
share will use the client to get a shareable link for an image of choice. the functions returns a url of choice to send to a recipient.
|
def share(self, query, share_to=None):
'''share will use the client to get a shareable link for an image of choice.
the functions returns a url of choice to send to a recipient.
'''
names = parse_image_name(remove_uri(query))
# Dropbox path is the path in storage with a slash
dropbox_path = '/%s' % names['storage']
# First ensure that exists
if self.exists(dropbox_path) is True:
# Create new shared link
try:
share = self.dbx.sharing_create_shared_link_with_settings(dropbox_path)
# Already exists!
except ApiError as err:
share = self.dbx.sharing_create_shared_link(dropbox_path)
bot.info(share.url)
return share.url
|
set the API base or default to use Docker Hub. The user is able to set the base api version and protocol via a settings file of environment variables: SREGISTRY_NVIDIA_BASE: defaults to nvcr. io SREGISTRY_NVIDIA_TOKEN: defaults to $oauthtoken SREGISTRY_NVIDIA_VERSION: defaults to v2 SREGISTRY_NVIDIA_NO_HTTPS: defaults to not set ( so https )
|
def _set_base(self):
'''set the API base or default to use Docker Hub. The user is able
to set the base, api version, and protocol via a settings file
of environment variables:
SREGISTRY_NVIDIA_BASE: defaults to nvcr.io
SREGISTRY_NVIDIA_TOKEN: defaults to $oauthtoken
SREGISTRY_NVIDIA_VERSION: defaults to v2
SREGISTRY_NVIDIA_NO_HTTPS: defaults to not set (so https)
'''
base = self._get_setting('SREGISTRY_NVIDIA_BASE')
version = self._get_setting('SREGISTRY_NVIDIA_VERSION')
if base is None:
base = "nvcr.io"
if version is None:
version = "v2"
nohttps = self._get_setting('SREGISTRY_NVIDIA_NOHTTPS')
if nohttps is None:
nohttps = "https://"
else:
nohttps = "http://"
# <protocol>://<base>/<version>
self.base = "%s%s/%s" %(nohttps, base.strip('/'), version)
|
update secrets will take a secrets credential file either located at. sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. For the case of using Docker Hub if we find a. docker secrets file we update from there.
|
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. For the case of
using Docker Hub, if we find a .docker secrets file, we update
from there.
'''
# If the user has defined secrets, use them
token = self._required_get_and_update('SREGISTRY_NVIDIA_TOKEN')
username = self._get_and_update_setting('SREGISTRY_NVIDIA_USERNAME')
if username is None:
username = "$oauthtoken"
# Option 1: the user exports username and password
if token is not None:
auth = basic_auth_header(username, token)
self.headers.update(auth)
|
pull an image from a Globus endpoint. The user must have the default local endpoint set up. For example:
|
def pull(self, images, file_name=None, save=True, **kwargs):
'''pull an image from a Globus endpoint. The user must have the default
local endpoint set up. For example:
6881ae2e-db26-11e5-9772-22000b9da45e:.singularity/shub/sherlock_vep.simg
Parameters
==========
images: refers to the globus endpoint id and image path.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add(). For globus this is the only option, and
we don't have control over when this happens.
Returns
=======
finished: a single container path, or list of paths
'''
# Ensure we have a transfer client
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
if not isinstance(images,list):
images = [images]
bot.debug('Execution of PULL for %s images' %len(images))
finished = []
for image in images:
# Split the name into endpoint and rest
endpoint, remote = self._parse_endpoint_name(image)
source = self.transfer_client.get_endpoint(endpoint)
name = os.path.basename(remote)
q = parse_image_name(name, default_collection=source['name'])
# The user must have a personal endpoint
endpoints = self._get_endpoints()
if len(endpoints['my-endpoints']) == 0:
bot.error('You must have a personal endpoint to transfer the container')
sys.exit(1)
# Take the first endpoint that is active
dest = None
for eid,contender in endpoints['my-endpoints'].items():
if contender['gcp_connected'] is True:
dest = contender
break
# Exit if none are active, required!
if dest is None:
bot.error('No activated local endpoints online! Start to transfer')
sys.exit(1)
# We need to know the full path of the endpoint
base = self._get_endpoint_path(dest['id'])
storage_folder = '%s/%s' %(base, q['collection'])
self._create_endpoint_folder(dest['id'], storage_folder)
label = "Singularity Registry Pull"
tdata = globus_sdk.TransferData(self.transfer_client,
source['id'],
dest['id'],
label=label,
sync_level="checksum")
image = os.path.join(base, q['storage'])
tdata.add_item(remote, image)
bot.info('Requesting transfer to %s' %q['storage'])
transfer_result = self.transfer_client.submit_transfer(tdata)
bot.info(transfer_result['message'])
finished.append(transfer_result)
if len(finished) == 1:
finished = finished[0]
return finished
|
if the user has specified settings to provide a cache for credentials files initialize it. The root for the folder is created if it doesn t exist. The path for the specific client is returned and it s not assumed to be either a folder or a file ( this is up to the developer of the client ).
|
def get_credential_cache():
'''if the user has specified settings to provide a cache for credentials
files, initialize it. The root for the folder is created if it doesn't
exist. The path for the specific client is returned, and it's
not assumed to be either a folder or a file (this is up to the
developer of the client).
'''
from sregistry.defaults import ( CREDENTIAL_CACHE, SREGISTRY_CLIENT )
client_credential_cache = None
# Check 1: user can disable a credential cache on the client level
if CREDENTIAL_CACHE is not None:
env = 'SREGISTRY_DISABLE_CREDENTIAL_%s' %SREGISTRY_CLIENT.upper()
if os.environ.get(env) is not None:
bot.debug('[%s] cache disabled' %SREGISTRY_CLIENT)
CREDENTIAL_CACHE = None
# Check 2: user can disable a credential cache on the client level
if CREDENTIAL_CACHE is not None:
if not os.path.exists(CREDENTIAL_CACHE):
mkdir_p(CREDENTIAL_CACHE)
client_credential_cache = '%s/%s' %(CREDENTIAL_CACHE, SREGISTRY_CLIENT)
if client_credential_cache is not None:
bot.debug('credentials cache')
return client_credential_cache
|
update client secrets will update the data structure for a particular authentication. This should only be used for a ( quasi permanent ) token or similar. The secrets file if found is updated and saved by default.
|
def update_client_secrets(backend, updates, secrets=None, save=True):
'''update client secrets will update the data structure for a particular
authentication. This should only be used for a (quasi permanent) token
or similar. The secrets file, if found, is updated and saved by default.
'''
if secrets is None:
secrets = read_client_secrets()
if backend not in secrets:
secrets[backend] = {}
secrets[backend].update(updates)
# The update typically includes a save
if save is True:
secrets_file = get_secrets_file()
if secrets_file is not None:
write_json(secrets,secrets_file)
return secrets
|
for private or protected registries a client secrets file is required to be located at. sregistry. If no secrets are found we use default of Singularity Hub and return a dummy secrets.
|
def read_client_secrets():
'''for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
'''
client_secrets = _default_client_secrets()
# If token file not provided, check environment
secrets = get_secrets_file()
# If exists, load
if secrets is not None:
client_secrets = read_json(secrets)
# Otherwise, initialize
else:
from sregistry.defaults import SREGISTRY_CLIENT_SECRETS
write_json(client_secrets, SREGISTRY_CLIENT_SECRETS)
return client_secrets
|
init client will check if the user has defined a bucket that differs from the default use the application credentials to get the bucket and then instantiate the client.
|
def _init_client(self):
'''init client will check if the user has defined a bucket that
differs from the default, use the application credentials to
get the bucket, and then instantiate the client.
'''
# Get storage and compute services
self._get_services()
env = 'SREGISTRY_GOOGLE_STORAGE_BUCKET'
self._bucket_name = self._get_and_update_setting(env)
# If the user didn't set in environment, use default
if self._bucket_name is None:
self._bucket_name = 'sregistry-%s' %os.environ['USER']
self._get_bucket()
|
get version 1 of the google compute and storage service
|
def _get_services(self, version='v1'):
'''get version 1 of the google compute and storage service
Parameters
==========
version: version to use (default is v1)
'''
self._bucket_service = storage.Client()
creds = GoogleCredentials.get_application_default()
self._storage_service = discovery_build('storage', version, credentials=creds)
self._compute_service = discovery_build('compute', version, credentials=creds)
|
get a bucket based on a bucket name. If it doesn t exist create it.
|
def _get_bucket(self):
'''get a bucket based on a bucket name. If it doesn't exist, create it.
'''
# Case 1: The bucket already exists
try:
self._bucket = self._bucket_service.get_bucket(self._bucket_name)
# Case 2: The bucket needs to be created
except google.cloud.exceptions.NotFound:
self._bucket = self._bucket_service.create_bucket(self._bucket_name)
# Case 3: The bucket name is already taken
except:
bot.error('Cannot get or create %s' %self._bucket_name)
sys.exit(1)
return self._bucket
|
delete object will delete a file from a bucket
|
def delete_object(service, bucket_name, object_name):
'''delete object will delete a file from a bucket
Parameters
==========
storage_service: the service obtained with get_storage_service
bucket_name: the name of the bucket
object_name: the "name" parameter of the object.
'''
try:
operation = service.objects().delete(bucket=bucket_name,
object=object_name).execute()
except HttpError as e:
pass
operation = e
return operation
|
delete an image from Google Storage.
|
def delete(self, name):
'''delete an image from Google Storage.
Parameters
==========
name: the name of the file (or image) to delete
'''
bot.debug("DELETE %s" % name)
for file_object in files:
if isinstance(file_object, dict):
if "kind" in file_object:
if file_object['kind'] == "storage#object":
object_name = "/".join(file_object['id'].split('/')[:-1])
object_name = re.sub('%s/' %self._bucket['name'],'', object_name,1)
delete_object(service=self._bucket_service,
bucket_name=bucket['name'],
object_name=object_name)
|
destroy an instance meaning take down the instance and stop the build.
|
def destroy(self, name):
'''destroy an instance, meaning take down the instance and stop the build.
Parameters
==========
name: the name of the instance to stop building.
'''
instances = self._get_instances()
project = self._get_project()
zone = self._get_zone()
found = False
if 'items' in instances:
for instance in instances['items']:
if instance['name'] == name:
found = True
break
if found:
bot.info('Killing instance %s' %name)
return self._compute_service.instances().delete(project=project,
zone=zone,
instance=name).execute()
|
get_subparser will get a dictionary of subparsers to help with printing help
|
def get_subparsers(parser):
'''get_subparser will get a dictionary of subparsers, to help with printing help
'''
actions = [action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
subparsers = dict()
for action in actions:
# get all subparsers and print help
for choice, subparser in action.choices.items():
subparsers[choice] = subparser
return subparsers
|
main is the entrypoint to the sregistry client. The flow works to first to determine the subparser in use based on the command. The command then imports the correct main ( files imported in this folder ) associated with the action of choice. When the client is imported it is actually importing a return of the function get_client () under sregistry/ main which plays the job of sniffing the environment to determine what flavor of client the user wants to activate. Installed within a singularity image this start up style maps well to Standard Container Integration Format ( SCIF ) apps where each client is a different entrypoint activated based on the environment variables.
|
def main():
'''main is the entrypoint to the sregistry client. The flow works to first
to determine the subparser in use based on the command. The command then
imports the correct main (files imported in this folder) associated with
the action of choice. When the client is imported, it is actually importing
a return of the function get_client() under sregistry/main, which plays
the job of "sniffing" the environment to determine what flavor of client
the user wants to activate. Installed within a singularity image, this
start up style maps well to Standard Container Integration Format (SCIF)
apps, where each client is a different entrypoint activated based on the
environment variables.
'''
from sregistry.main import Client as cli
parser = get_parser()
subparsers = get_subparsers(parser)
def help(return_code=0):
'''print help, including the software version and active client
and exit with return code.
'''
version = sregistry.__version__
name = cli.client_name
print("\nSingularity Registry Global Client v%s [%s]" %(version, name))
parser.print_help()
sys.exit(return_code)
# If the user didn't provide any arguments, show the full help
if len(sys.argv) == 1:
help()
try:
args = parser.parse_args()
except:
sys.exit(0)
if args.debug is False:
os.environ['MESSAGELEVEL'] = "DEBUG"
# Show the version and exit
if args.command == "version":
print(sregistry.__version__)
sys.exit(0)
from sregistry.logger import bot
# Does the user want a shell?
if args.command == "add": from .add import main
elif args.command == "backend": from .backend import main
elif args.command == "build": from .build import main
elif args.command == "get": from .get import main
elif args.command == "delete": from .delete import main
elif args.command == "inspect": from .inspect import main
elif args.command == "images": from .images import main
elif args.command == "labels": from .labels import main
elif args.command == "mv": from .mv import main
elif args.command == "push": from .push import main
elif args.command == "pull": from .pull import main
elif args.command == "rename": from .rename import main
elif args.command == "rm": from .rm import main
elif args.command == "rmi": from .rmi import main
elif args.command == "search": from .search import main
elif args.command == "share": from .share import main
elif args.command == "shell": from .shell import main
# Pass on to the correct parser
return_code = 0
try:
main(args=args,
parser=parser,
subparser=subparsers[args.command])
sys.exit(return_code)
except UnboundLocalError:
return_code = 1
help(return_code)
|
Generate a robot name. Inspiration from Haikunator but much more poorly implemented ; )
|
def generate(self, delim='-', length=4, chars='0123456789'):
'''
Generate a robot name. Inspiration from Haikunator, but much more
poorly implemented ;)
Parameters
==========
delim: Delimiter
length: TokenLength
chars: TokenChars
'''
descriptor = self._select(self._descriptors)
noun = self._select(self._nouns)
numbers = ''.join((self._select(chars) for _ in range(length)))
return delim.join([descriptor, noun, numbers])
|
mkdir_p attempts to get the same functionality as mkdir - p: param path: the path to create.
|
def mkdir_p(path):
'''mkdir_p attempts to get the same functionality as mkdir -p
:param path: the path to create.
'''
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
bot.error("Error creating path %s, exiting." % path)
sys.exit(1)
|
get a temporary file with an optional prefix. By default will be created in/ tmp unless SREGISTRY_TMPDIR is set. By default the file is closed ( and just a name returned ).
|
def get_tmpfile(requested_tmpdir=None, prefix=""):
'''get a temporary file with an optional prefix. By default will be
created in /tmp unless SREGISTRY_TMPDIR is set. By default, the file
is closed (and just a name returned).
Parameters
==========
requested_tmpdir: an optional requested temporary directory, first
priority as is coming from calling function.
prefix: Given a need for a sandbox (or similar), prefix the file
with this string.
'''
# First priority for the base goes to the user requested.
tmpdir = get_tmpdir(requested_tmpdir)
# If tmpdir is set, add to prefix
if tmpdir is not None:
prefix = os.path.join(tmpdir, os.path.basename(prefix))
fd, tmp_file = tempfile.mkstemp(prefix=prefix)
os.close(fd)
return tmp_file
|
get a temporary directory for an operation. If SREGISTRY_TMPDIR is set return that. Otherwise return the output of tempfile. mkdtemp
|
def get_tmpdir(requested_tmpdir=None, prefix="", create=True):
'''get a temporary directory for an operation. If SREGISTRY_TMPDIR
is set, return that. Otherwise, return the output of tempfile.mkdtemp
Parameters
==========
requested_tmpdir: an optional requested temporary directory, first
priority as is coming from calling function.
prefix: Given a need for a sandbox (or similar), we will need to
create a subfolder *within* the SREGISTRY_TMPDIR.
create: boolean to determine if we should create folder (True)
'''
from sregistry.defaults import SREGISTRY_TMPDIR
# First priority for the base goes to the user requested.
tmpdir = requested_tmpdir or SREGISTRY_TMPDIR
prefix = prefix or "sregistry-tmp"
prefix = "%s.%s" %(prefix, next(tempfile._get_candidate_names()))
tmpdir = os.path.join(tmpdir, prefix)
if not os.path.exists(tmpdir) and create is True:
os.mkdir(tmpdir)
return tmpdir
|
extract a tar archive to a specified output folder
|
def extract_tar(archive, output_folder, handle_whiteout=False):
'''extract a tar archive to a specified output folder
Parameters
==========
archive: the archive file to extract
output_folder: the output folder to extract to
handle_whiteout: use docker2oci variation to handle whiteout files
'''
from .terminal import run_command
# Do we want to remove whiteout files?
if handle_whiteout is True:
return _extract_tar(archive, output_folder)
# If extension is .tar.gz, use -xzf
args = '-xf'
if archive.endswith(".tar.gz"):
args = '-xzf'
# Just use command line, more succinct.
command = ["tar", args, archive, "-C", output_folder, "--exclude=dev/*"]
if not bot.is_quiet():
print("Extracting %s" % archive)
return run_command(command)
|
use blob2oci to handle whiteout files for extraction. Credit for this script goes to docker2oci by Olivier Freyermouth and see script folder for license.
|
def _extract_tar(archive, output_folder):
'''use blob2oci to handle whiteout files for extraction. Credit for this
script goes to docker2oci by Olivier Freyermouth, and see script
folder for license.
Parameters
==========
archive: the archive to extract
output_folder the output folder (sandbox) to extract to
'''
from .terminal import ( run_command, which )
result = which('blob2oci')
if result['return_code'] != 0:
bot.error('Cannot find blob2oci script on path, exiting.')
sys.exit(1)
script = result['message']
command = ['exec' ,script, '--layer', archive, '--extract', output_folder]
if not bot.is_quiet():
print("Extracting %s" % archive)
return run_command(command)
|
create_memory_tar will take a list of files ( each a dictionary with name permission and content ) and write the tarfile ( a sha256 sum name is used ) to the output_folder. If there is no output folde specified the tar is written to a temporary folder.
|
def create_tar(files, output_folder=None):
'''create_memory_tar will take a list of files (each a dictionary
with name, permission, and content) and write the tarfile
(a sha256 sum name is used) to the output_folder.
If there is no output folde specified, the
tar is written to a temporary folder.
'''
if output_folder is None:
output_folder = tempfile.mkdtemp()
finished_tar = None
additions = []
contents = []
for entity in files:
info = tarfile.TarInfo(name=entity['name'])
info.mode = entity['mode']
info.mtime = int(datetime.datetime.now().strftime('%s'))
info.uid = entity["uid"]
info.gid = entity["gid"]
info.uname = entity["uname"]
info.gname = entity["gname"]
# Get size from stringIO write
filey = io.StringIO()
content = None
try: # python3
info.size = filey.write(entity['content'])
content = io.BytesIO(entity['content'].encode('utf8'))
except Exception: # python2
info.size = int(filey.write(entity['content'].decode('utf-8')))
content = io.BytesIO(entity['content'].encode('utf8'))
pass
if content is not None:
addition = {'content': content,
'info': info}
additions.append(addition)
contents.append(content)
# Now generate the sha256 name based on content
if len(additions) > 0:
hashy = get_content_hash(contents)
finished_tar = "%s/sha256:%s.tar.gz" % (output_folder, hashy)
# Warn the user if it already exists
if os.path.exists(finished_tar):
msg = "metadata file %s already exists " % finished_tar
msg += "will over-write."
bot.debug(msg)
# Add all content objects to file
tar = tarfile.open(finished_tar, "w:gz")
for a in additions:
tar.addfile(a["info"], a["content"])
tar.close()
else:
msg = "No contents, environment or labels"
msg += " for tarfile, will not generate."
bot.debug(msg)
return finished_tar
|
get_content_hash will return a hash for a list of content ( bytes/ other )
|
def get_content_hash(contents):
'''get_content_hash will return a hash for a list of content (bytes/other)
'''
hasher = hashlib.sha256()
for content in contents:
if isinstance(content, io.BytesIO):
content = content.getvalue()
if not isinstance(content, bytes):
content = bytes(content)
hasher.update(content)
return hasher.hexdigest()
|
find the SHA256 hash string of a file
|
def get_file_hash(filename):
'''find the SHA256 hash string of a file
'''
hasher = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest()
|
write_json will ( optionally pretty print ) a json object to file
|
def write_json(json_obj, filename, mode="w", print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
Parameters
==========
json_obj: the dict to print to json
filename: the output file to write to
pretty_print: if True, will use nicer formatting
'''
with open(filename, mode) as filey:
if print_pretty:
filey.writelines(print_json(json_obj))
else:
filey.writelines(json.dumps(json_obj))
return filename
|
write_file will open a file filename and write content content and properly close the file
|
def read_file(filename, mode="r", readlines=True):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename, mode) as filey:
if readlines is True:
content = filey.readlines()
else:
content = filey.read()
return content
|
read_json reads in a json file and returns the data structure as dict.
|
def read_json(filename, mode='r'):
'''read_json reads in a json file and returns
the data structure as dict.
'''
with open(filename, mode) as filey:
data = json.load(filey)
return data
|
clean up will delete a list of files only if they exist
|
def clean_up(files):
'''clean up will delete a list of files, only if they exist
'''
if not isinstance(files, list):
files = [files]
for f in files:
if os.path.exists(f):
bot.verbose3("Cleaning up %s" % f)
os.remove(f)
|
pull an image from a docker hub. This is a ( less than ideal ) workaround that actually does the following:
|
def pull(self, images, file_name=None, save=True, force=False, **kwargs):
'''pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
if not isinstance(images,list):
images = [images]
bot.debug('Execution of PULL for %s images' %len(images))
# If used internally we want to return a list to the user.
finished = []
for image in images:
q = parse_image_name( remove_uri(image),
default_collection='aws' )
image_file = self._pull(file_name=file_name,
save=save,
force=force,
names=q,
kwargs=kwargs)
finished.append(image_file)
if len(finished) == 1:
finished = finished[0]
return finished
|
pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
|
def pull(self, images, file_name=None, save=True, **kwargs):
'''pull an image from a singularity registry
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
# Here we take an entire list or a single image by ensuring we have a list
# This makes the client flexible to command line or internal function use,
# for one or more images.
if not isinstance(images,list):
images = [images]
bot.debug('Execution of PULL for %s images' %len(images))
# If used internally we want to return a list to the user.
finished = []
for image in images:
q = parse_image_name(remove_uri(image))
# Verify image existence, and obtain id
url = "..." # write your custom endpoint URL here
bot.debug('Retrieving manifest at %s' %url)
# You can use the client get function to retrieve a url manifest
manifest = self._get(url)
# it's good practice to add the url as a `selfLink`
manifest['selfLink'] = url
# Make sure to parse the response (manifest) in case it's not what
# you expect!
# If the user didn't provide a file, make one based on the names
if file_name is None:
file_name = q['storage'].replace('/','-')
# You can then use the client download function to get the url
# for some image in your manifest. In this example, it's in the `image`
# field and we want to show the progress bar.
image_file = self.download(url=manifest['image'],
file_name=file_name,
show_progress=True)
# If the user is saving to local storage, you need to assumble the uri
# here in the expected format <collection>/<namespace>:<tag>@<version>
if save is True:
image_uri = "%s/%s:%s" %(manifest['collection'],
manifest['name'],
manifest['tag'])
# Importantly, the client add function will take the image file, the
# uri, the download link, and any relevant metadata (dictionary)
# for the database
container = self.add(image_path = image_file, # the file path
image_uri = image_uri, # the full uri
image_name = file_name, # a custom name?
metadata = manifest,
url = manifest['image'])
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' %image_file)
bot.custom(prefix="Success!", message=image_file)
finished.append(image_file)
if len(finished) == 1:
finished = finished[0]
return finished
|
push an image to an S3 endpoint
|
def push(self, path, name, tag=None):
'''push an image to an S3 endpoint'''
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Extract the metadata
names = parse_image_name(remove_uri(name), tag=tag)
image_size = os.path.getsize(path) >> 20
# Create extra metadata, this is how we identify the image later
# *important* bug in boto3 will return these capitalized
# see https://github.com/boto/boto3/issues/1709
metadata = {'sizemb': "%s" % image_size,
'client': 'sregistry' }
self.bucket.upload_file(path, names['storage_uri'], {"Metadata": metadata })
|
get a collection if it exists. If it doesn t exist create it first.
|
def get_or_create_collection(self, name):
'''get a collection if it exists. If it doesn't exist, create it first.
Parameters
==========
name: the collection name, usually parsed from get_image_names()['name']
'''
from sregistry.database.models import Collection
collection = self.get_collection(name)
# If it doesn't exist, create it
if collection is None:
collection = Collection(name=name)
self.session.add(collection)
self.session.commit()
return collection
|
get a collection if it exists otherwise return None.
|
def get_collection(self, name):
'''get a collection, if it exists, otherwise return None.
'''
from sregistry.database.models import Collection
return Collection.query.filter(Collection.name == name).first()
|
get a container otherwise return None.
|
def get_container(self, name, collection_id, tag="latest", version=None):
'''get a container, otherwise return None.
'''
from sregistry.database.models import Container
if version is None:
container = Container.query.filter_by(collection_id = collection_id,
name = name,
tag = tag).first()
else:
container = Container.query.filter_by(collection_id = collection_id,
name = name,
tag = tag,
version = version).first()
return container
|
Do a get for a container and then a collection and then return None if no result is found. Parameters ========== name: should coincide with either the collection name or the container name with the collection. A query is done first for the collection and then the container and the path to the image file returned.
|
def get(self, name, quiet=False):
'''Do a get for a container, and then a collection, and then return None
if no result is found.
Parameters
==========
name: should coincide with either the collection name, or the container
name with the collection. A query is done first for the collection,
and then the container, and the path to the image file returned.
'''
from sregistry.database.models import Collection, Container
names = parse_image_name( remove_uri (name) )
# First look for a collection (required)
collection = self.get_collection(name=names['collection'])
container = None
if collection is not None:
container = self.get_container(collection_id=collection.id,
name=names['image'],
tag=names['tag'],
version=names['version'])
if container is not None and quiet is False:
# The container image file exists [local]
if container.image is not None:
print(container.image)
# The container has a url (but not local file)
elif container.url is not None:
print(container.url)
else:
bot.info('No storage file found for %s' %name)
return container
|
List local images in the database optionally with a query.
|
def images(self, query=None):
'''List local images in the database, optionally with a query.
Paramters
=========
query: a string to search for in the container or collection name|tag|uri
'''
from sregistry.database.models import Collection, Container
rows = []
if query is not None:
like = "%" + query + "%"
containers = Container.query.filter(or_(Container.name == query,
Container.tag.like(like),
Container.uri.like(like),
Container.name.like(like))).all()
else:
containers = Container.query.all()
if len(containers) > 0:
message = " [date] [client]\t[uri]"
bot.custom(prefix='Containers:', message=message, color="RED")
for c in containers:
uri = c.get_uri()
created_at = c.created_at.strftime('%B %d, %Y')
rows.append([created_at, " [%s]" %c.client, uri])
bot.table(rows)
return containers
|
Inspect a local image in the database which typically includes the basic fields in the model.
|
def inspect(self, name):
'''Inspect a local image in the database, which typically includes the
basic fields in the model.
'''
print(name)
container = self.get(name)
if container is not None:
collection = container.collection.name
fields = container.__dict__.copy()
fields['collection'] = collection
fields['metrics'] = json.loads(fields['metrics'])
del fields['_sa_instance_state']
fields['created_at'] = str(fields['created_at'])
print(json.dumps(fields, indent=4, sort_keys=True))
return fields
|
rename performs a move but ensures the path is maintained in storage
|
def rename(self, image_name, path):
'''rename performs a move, but ensures the path is maintained in storage
Parameters
==========
image_name: the image name (uri) to rename to.
path: the name to rename (basename is taken)
'''
container = self.get(image_name, quiet=True)
if container is not None:
if container.image is not None:
# The original directory for the container stays the same
dirname = os.path.dirname(container.image)
# But we derive a new filename and uri
names = parse_image_name( remove_uri (path) )
storage = os.path.join( self.storage,
os.path.dirname(names['storage']) )
# This is the collection folder
if not os.path.exists(storage):
os.mkdir(storage)
# Here we get the new full path, rename the container file
fullpath = os.path.abspath(os.path.join(dirname, names['storage']))
container = self.cp(move_to=fullpath,
container=container,
command="rename")
# On successful rename of file, update the uri
if container is not None:
container.uri = names['uri']
self.session.commit()
return container
bot.warning('%s not found' %(image_name))
|
Move an image from it s current location to a new path. Removing the image from organized storage is not the recommended approach however is still a function wanted by some.
|
def mv(self, image_name, path):
'''Move an image from it's current location to a new path.
Removing the image from organized storage is not the recommended approach
however is still a function wanted by some.
Parameters
==========
image_name: the parsed image name.
path: the location to move the image to
'''
container = self.get(image_name, quiet=True)
if container is not None:
name = container.uri or container.get_uri()
image = container.image or ''
# Only continue if image file exists
if os.path.exists(image):
# Default assume directory, use image name and path fully
filename = os.path.basename(image)
filedir = os.path.abspath(path)
# If it's a file, use filename provided
if not os.path.isdir(path):
filename = os.path.basename(path)
filedir = os.path.dirname(path)
# If directory is empty, assume $PWD
if filedir == '':
filedir = os.getcwd()
# Copy to the fullpath from the storage
fullpath = os.path.abspath(os.path.join(filedir,filename))
return self.cp(move_to=fullpath,
container=container,
command="move")
bot.warning('%s not found' %(image_name))
|
_cp is the shared function between mv ( move ) and rename and performs the move and returns the updated container Parameters ========== image_name: an image_uri to look up a container in the database container: the container object to move ( must have a container. image move_to: the full path to move it to
|
def cp(self, move_to, image_name=None, container=None, command="copy"):
'''_cp is the shared function between mv (move) and rename, and performs
the move, and returns the updated container
Parameters
==========
image_name: an image_uri to look up a container in the database
container: the container object to move (must have a container.image
move_to: the full path to move it to
'''
if container is None and image_name is None:
bot.error('A container or image_name must be provided to %s' %command)
sys.exit(1)
# If a container isn't provided, look for it from image_uri
if container is None:
container = self.get(image_name, quiet=True)
image = container.image or ''
if os.path.exists(image):
filedir = os.path.dirname(move_to)
# If the two are the same, doesn't make sense
if move_to == image:
bot.warning('%s is already the name.' %image)
sys.exit(1)
# Ensure directory exists
if not os.path.exists(filedir):
bot.error('%s does not exist. Ensure exists first.' %filedir)
sys.exit(1)
# Ensure writable for user
if not os.access(filedir, os.W_OK):
bot.error('%s is not writable' %filedir)
sys.exit(1)
original = os.path.basename(image)
try:
shutil.move(image, move_to)
container.image = move_to
self.session.commit()
bot.info('[%s] %s => %s' %(command, original, move_to))
return container
except:
bot.error('Cannot %s %s to %s' %(command, original, move_to))
sys.exit(1)
bot.warning('''Not found! Please pull %s and then %s to the appropriate
location.''' %(container.uri, command))
|
Remove an image from the database and filesystem.
|
def rmi(self, image_name):
'''Remove an image from the database and filesystem.
'''
container = self.rm(image_name, delete=True)
if container is not None:
bot.info("[rmi] %s" % container)
|
Remove an image from the database akin to untagging the image. This does not delete the file from the cache unless delete is set to True ( as called by rmi ).
|
def rm(self, image_name, delete=False):
'''Remove an image from the database, akin to untagging the image. This
does not delete the file from the cache, unless delete is set to True
(as called by rmi).
'''
container = self.get(image_name)
if container is not None:
name = container.uri or container.get_uri()
image = container.image
self.session.delete(container)
self.session.commit()
if image is not None:
if os.path.exists(image) and delete is True:
os.remove(container.image)
return image
bot.info("[rm] %s" % name)
|
get or create a container including the collection to add it to. This function can be used from a file on the local system or via a URL that has been downloaded. Either way if one of url version or image_file is not provided the model is created without it. If a version is not provided but a file path is then the file hash is used.
|
def add(self, image_path=None,
image_uri=None,
image_name=None,
url=None,
metadata=None,
save=True,
copy=False):
'''get or create a container, including the collection to add it to.
This function can be used from a file on the local system, or via a URL
that has been downloaded. Either way, if one of url, version, or image_file
is not provided, the model is created without it. If a version is not
provided but a file path is, then the file hash is used.
Parameters
==========
image_path: full path to image file
image_name: if defined, the user wants a custom name (and not based on uri)
metadata: any extra metadata to keep for the image (dict)
save: if True, move the image to the cache if it's not there
copy: If True, copy the image instead of moving it.
image_name: a uri that gets parsed into a names object that looks like:
{'collection': 'vsoch',
'image': 'hello-world',
'storage': 'vsoch/hello-world-latest.img',
'tag': 'latest',
'version': '12345'
'uri': 'vsoch/hello-world:latest@12345'}
After running add, the user will take some image in a working
directory, add it to the database, and have it available for search
and use under SREGISTRY_STORAGE/<collection>/<container>
If the container was retrieved from a webby place, it should have version
If no version is found, the file hash is used.
'''
from sregistry.database.models import (
Container,
Collection
)
# We can only save if the image is provided
if image_path is not None:
if not os.path.exists(image_path) and save is True:
bot.error('Cannot find %s' %image_path)
sys.exit(1)
# An image uri is required for version, tag, etc.
if image_uri is None:
bot.error('You must provide an image uri <collection>/<namespace>')
sys.exit(1)
names = parse_image_name( remove_uri(image_uri) )
bot.debug('Adding %s to registry' % names['uri'])
# If Singularity is installed, inspect image for metadata
metadata = self.get_metadata(image_path, names=names)
collection = self.get_or_create_collection(names['collection'])
# Get a hash of the file for the version, or use provided
version = names.get('version')
if version == None:
if image_path != None:
version = get_image_hash(image_path)
else:
version = '' # we can't determine a version, not in API/no file
names = parse_image_name( remove_uri(image_uri), version=version )
# If save, move to registry storage first
if save is True and image_path is not None:
# If the user hasn't defined a custom name
if image_name is None:
image_name = self._get_storage_name(names)
if copy is True:
copyfile(image_path, image_name)
else:
shutil.move(image_path, image_name)
image_path = image_name
# Just in case the client didn't provide it, see if we have in metadata
if url is None and "url" in metadata:
url = metadata['url']
# First check that we don't have one already!
container = self.get_container(name=names['image'],
collection_id=collection.id,
tag=names['tag'],
version=version)
# The container did not exist, create it
if container is None:
action = "new"
container = Container(metrics=json.dumps(metadata),
name=names['image'],
image=image_path,
client=self.client_name,
tag=names['tag'],
version=version,
url=url,
uri=names['uri'],
collection_id=collection.id)
self.session.add(container)
collection.containers.append(container)
# The container existed, update it.
else:
action="update"
metrics=json.loads(container.metrics)
metrics.update(metadata)
container.url= url
container.client=self.client_name
if image_path is not None:
container.image=image_path
container.metrics=json.dumps(metrics)
self.session.commit()
bot.info("[container][%s] %s" % (action,names['uri']))
return container
|
push an image to Singularity Registry
|
def push(self, path, name, tag=None):
'''push an image to Singularity Registry'''
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Interaction with a registry requires secrets
self.require_secrets()
# Extract the metadata
names = parse_image_name(remove_uri(name), tag=tag)
image_size = os.path.getsize(path) >> 20
# COLLECTION ###################################################################
# If the registry is provided in the uri, use it
if names['registry'] == None:
names['registry'] = self.base
# If the base doesn't start with http or https, add it
names = self._add_https(names)
# Prepare push request, this will return a collection ID if permission
url = '%s/push/' % names['registry']
auth_url = '%s/upload/chunked_upload' % names['registry']
SREGISTRY_EVENT = self.authorize(request_type="push",
names=names)
# Data fields for collection
fields = { 'collection': names['collection'],
'name':names['image'],
'tag': names['tag']}
headers = { 'Authorization': SREGISTRY_EVENT }
r = requests.post(auth_url, json=fields, headers=headers)
# Always tell the user what's going on!
message = self._read_response(r)
print('\n[1. Collection return status {0} {1}]'.format(r.status_code, message))
# Get the collection id, if created, and continue with upload
if r.status_code != 200:
sys.exit(1)
# UPLOAD #######################################################################
url = '%s/upload' % names['registry'].replace('/api','')
bot.debug('Seting upload URL to {0}'.format(url))
cid = r.json()['cid']
upload_to = os.path.basename(names['storage'])
SREGISTRY_EVENT = self.authorize(request_type="upload",
names=names)
encoder = MultipartEncoder(fields={'SREGISTRY_EVENT': SREGISTRY_EVENT,
'name': names['image'],
'collection': str(cid),
'tag': names['tag'],
'file1': (upload_to, open(path, 'rb'), 'text/plain')})
progress_callback = create_callback(encoder, self.quiet)
monitor = MultipartEncoderMonitor(encoder, progress_callback)
headers = {'Content-Type': monitor.content_type,
'Authorization': SREGISTRY_EVENT }
try:
r = requests.post(url, data=monitor, headers=headers)
r.raise_for_status()
message = r.json()['message']
print('\n[Return status {0} {1}]'.format(r.status_code, message))
except requests.HTTPError as e:
print('\nUpload failed: {0}.'.format(e))
except KeyboardInterrupt:
print('\nUpload cancelled.')
except Exception as e:
print(e)
|
take a recipe and return the complete header line. If remove_header is True only return the value.
|
def parse_header(recipe, header="from", remove_header=True):
'''take a recipe, and return the complete header, line. If
remove_header is True, only return the value.
Parameters
==========
recipe: the recipe file
headers: the header key to find and parse
remove_header: if true, remove the key
'''
parsed_header = None
fromline = [x for x in recipe.split('\n') if "%s:" %header in x.lower()]
# Case 1: We did not find the fromline
if len(fromline) == 0:
return ""
# Case 2: We found it!
if len(fromline) > 0:
fromline = fromline[0]
parsed_header = fromline.strip()
# Does the user want to clean it up?
if remove_header is True:
parsed_header = fromline.split(':', 1)[-1].strip()
return parsed_header
|
find recipes will use a list of base folders files or patterns over a subset of content to find recipe files ( indicated by Starting with Singularity Parameters ========== base: if defined consider folders recursively below this level.
|
def find_recipes(folders, pattern=None, base=None):
'''find recipes will use a list of base folders, files,
or patterns over a subset of content to find recipe files
(indicated by Starting with Singularity
Parameters
==========
base: if defined, consider folders recursively below this level.
'''
# If the user doesn't provide a list of folders, use $PWD
if folders is None:
folders = os.getcwd()
if not isinstance(folders,list):
folders = [folders]
manifest = dict()
for base_folder in folders:
# If we find a file, return the one file
custom_pattern = None
if os.path.isfile(base_folder): # updates manifest
manifest = find_single_recipe(filename=base_folder,
pattern=pattern,
manifest=manifest)
continue
# The user likely provided a custom pattern
elif not os.path.isdir(base_folder):
custom_pattern = base_folder.split('/')[-1:][0]
base_folder = "/".join(base_folder.split('/')[0:-1])
# If we don't trigger loop, we have directory
manifest = find_folder_recipes(base_folder=base_folder,
pattern=custom_pattern or pattern,
manifest=manifest,
base=base)
return manifest
|
find folder recipes will find recipes based on a particular pattern. Parameters ========== base_folder: the base folder to recursively walk pattern: a default pattern to search for manifest: an already started manifest base: if defined consider folders under this level recursively.
|
def find_folder_recipes(base_folder,
pattern="Singularity",
manifest=None,
base=None):
'''find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively.
'''
# The user is not appending to an existing manifest
if manifest is None:
manifest = dict()
for root, dirnames, filenames in os.walk(base_folder):
for filename in fnmatch.filter(filenames, pattern):
container_path = os.path.join(root, filename)
if base is not None:
container_base = container_path.replace(base,'').strip('/')
collection = container_base.split('/')[0]
recipe = os.path.basename(container_base)
container_uri = "%s/%s" %(collection,recipe)
else:
container_uri = '/'.join(container_path.strip('/').split('/')[-2:])
add_container = True
# Add the most recently updated container
if container_uri in manifest:
if manifest[container_uri]['modified'] > os.path.getmtime(container_path):
add_container = False
if add_container:
manifest[container_uri] = {'path': os.path.abspath(container_path),
'modified':os.path.getmtime(container_path)}
return manifest
|
find_single_recipe will parse a single file and if valid return an updated manifest
|
def find_single_recipe(filename, pattern="Singularity", manifest=None):
'''find_single_recipe will parse a single file, and if valid,
return an updated manifest
Parameters
==========
filename: the filename to assess for a recipe
pattern: a default pattern to search for
manifest: an already started manifest
'''
if pattern is None:
pattern = "Singularity*"
recipe = None
file_basename = os.path.basename(filename)
if fnmatch.fnmatch(file_basename, pattern):
recipe = {'path': os.path.abspath(filename),
'modified':os.path.getmtime(filename)}
# If we already have the recipe, only add if more recent
if manifest is not None and recipe is not None:
container_uri = '/'.join(filename.split('/')[-2:])
if container_uri in manifest:
if manifest[container_uri]['modified'] < os.path.getmtime(filename):
manifest[container_uri] = recipe
else:
manifest[container_uri] = recipe
return manifest
return recipe
|
trigger a build on Google Cloud ( builder then storage ) given a name recipe and Github URI where the recipe can be found. Parameters ========== recipe: the local recipe to build. name: should be the complete uri that the user has requested to push. context: the dependency files needed for the build. If not defined only the recipe is uploaded. preview: if True preview but don t run the build
|
def build(self, name,
recipe="Singularity",
context=None,
preview=False):
'''trigger a build on Google Cloud (builder then storage) given a name
recipe, and Github URI where the recipe can be found.
Parameters
==========
recipe: the local recipe to build.
name: should be the complete uri that the user has requested to push.
context: the dependency files needed for the build. If not defined, only
the recipe is uploaded.
preview: if True, preview but don't run the build
Environment
===========
SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION: the version of Singularity
to use, defaults to 3.0.2-slim
SREGISTRY_GOOGLE_BUILD_CLEANUP: after build, delete intermediate
dependencies in cloudbuild bucket.
'''
bot.debug("BUILD %s" % recipe)
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name))
# Load the build configuration
config = self._load_build_config(name=names['uri'], recipe=recipe)
build_package = [recipe]
if context not in [None, '', []]:
# If the user gives a ., include recursive $PWD
if '.' in context:
context = glob(os.getcwd() + '/**/*', recursive=True)
build_package = build_package + context
package = create_build_package(build_package)
# Does the package already exist? If the user cached, it might
destination='source/%s' % os.path.basename(package)
blob = self._build_bucket.blob(destination)
# if it doesn't exist, upload it
if not blob.exists() and preview is False:
bot.log('Uploading build package!')
manifest = self._upload(source=package,
bucket=self._build_bucket,
destination=destination)
else:
bot.log('Build package found in %s.' % self._build_bucket.name)
# The source should point to the bucket with the .tar.gz, latest generation
config["source"]["storageSource"]['bucket'] = self._build_bucket.name
config["source"]["storageSource"]['object'] = destination
# If not a preview, run the build and return the response
if preview is False:
config = self._run_build(config, self._bucket, names)
# If the user wants to cache cloudbuild files, this will be set
if not self._get_and_update_setting('SREGISTRY_GOOGLE_BUILD_CACHE'):
blob.delete()
# Clean up either way, return config or response
shutil.rmtree(os.path.dirname(package))
return config
|
given a list of files copy them to a temporary folder compress into a. tar. gz and rename based on the file hash. Return the full path to the. tar. gz in the temporary folder.
|
def create_build_package(package_files):
'''given a list of files, copy them to a temporary folder,
compress into a .tar.gz, and rename based on the file hash.
Return the full path to the .tar.gz in the temporary folder.
Parameters
==========
package_files: a list of files to include in the tar.gz
'''
# Ensure package files all exist
for package_file in package_files:
if not os.path.exists(package_file):
bot.exit('Cannot find %s.' % package_file)
bot.log('Generating build package for %s files...' % len(package_files))
build_dir = get_tmpdir(prefix="sregistry-build")
build_tar = '%s/build.tar.gz' % build_dir
tar = tarfile.open(build_tar, "w:gz")
# Create the tar.gz
for package_file in package_files:
tar.add(package_file)
tar.close()
# Get hash (sha256), and rename file
sha256 = get_file_hash(build_tar)
hash_tar = "%s/%s.tar.gz" %(build_dir, sha256)
shutil.move(build_tar, hash_tar)
return hash_tar
|
load a google compute config meaning that we start with a template and mimic the following example cloudbuild. yaml:
|
def load_build_config(self, name, recipe):
'''load a google compute config, meaning that we start with a template,
and mimic the following example cloudbuild.yaml:
steps:
- name: "singularityware/singularity:${_SINGULARITY_VERSION}"
args: ['build', 'julia-centos-another.sif', 'julia.def']
artifacts:
objects:
location: 'gs://sregistry-gcloud-build-vanessa'
paths: ['julia-centos-another.sif']
Parameters
==========
recipe: the local recipe file for the builder.
name: the name of the container, based on the uri
'''
version_envar = 'SREGISTRY_GOOGLE_BUILD_SINGULARITY_VERSION'
version = self._get_and_update_setting(version_envar, '3.0.2-slim')
config = get_build_template()
# Name is in format 'dinosaur/container-latest'
# The command to give the builder, with image name
container_name = '%s.sif' % name.replace('/','-', 1)
config['steps'][0]['name'] = 'singularityware/singularity:%s' % version
config['steps'][0]['args'] = ['build', container_name, recipe]
config["artifacts"]["objects"]["location"] = "gs://%s" % self._bucket_name
config["artifacts"]["objects"]["paths"] = [container_name]
return config
|
run a build meaning creating a build. Retry if there is failure
|
def run_build(self, config, bucket, names):
'''run a build, meaning creating a build. Retry if there is failure
'''
project = self._get_project()
# prefix, message, color
bot.custom('PROJECT', project, "CYAN")
bot.custom('BUILD ', config['steps'][0]['name'], "CYAN")
response = self._build_service.projects().builds().create(body=config,
projectId=project).execute()
build_id = response['metadata']['build']['id']
status = response['metadata']['build']['status']
bot.log("build %s: %s" % (build_id, status))
start = time.time()
while status not in ['COMPLETE', 'FAILURE', 'SUCCESS']:
time.sleep(15)
response = self._build_service.projects().builds().get(id=build_id,
projectId=project).execute()
build_id = response['id']
status = response['status']
bot.log("build %s: %s" % (build_id, status))
end = time.time()
bot.log('Total build time: %s seconds' % (round(end - start, 2)))
# If successful, update blob metadata and visibility
if status == 'SUCCESS':
# Does the user want to keep the container private?
env = 'SREGISTRY_GOOGLE_STORAGE_PRIVATE'
blob = bucket.blob(response['artifacts']['objects']['paths'][0])
# Make Public, if desired
if self._get_and_update_setting(env) == None:
blob.make_public()
response['public_url'] = blob.public_url
# Add the metadata directly to the object
update_blob_metadata(blob, response, config, bucket, names)
response['media_link'] = blob.media_link
response['size'] = blob.size
response['file_hash'] = blob.md5_hash
return response
|
a specific function to take a blob along with a SUCCESS response from Google build the original config and update the blob metadata with the artifact file name dependencies and image hash.
|
def update_blob_metadata(blob, response, config, bucket, names):
'''a specific function to take a blob, along with a SUCCESS response
from Google build, the original config, and update the blob
metadata with the artifact file name, dependencies, and image hash.
'''
manifest = os.path.basename(response['results']['artifactManifest'])
manifest = json.loads(bucket.blob(manifest).download_as_string())
metadata = {'file_hash': manifest['file_hash'][0]['file_hash'][0]['value'],
'artifactManifest': response['results']['artifactManifest'],
'location': manifest['location'],
'storageSourceBucket': config['source']['storageSource']['bucket'],
'storageSourceObject': config['source']['storageSource']['object'],
'buildCommand': ' '.join(config['steps'][0]['args']),
'builder': config['steps'][0]['name'],
'media_link': blob.media_link,
'self_link': blob.self_link,
'size': blob.size,
'name': names['tag_uri'],
'type': "container"} # identifier that the blob is a container
blob.metadata = metadata
blob._properties['metadata'] = metadata
blob.patch()
|
query a Singularity registry for a list of images. If query is None collections are listed.
|
def search(self, query=None, args=None):
'''query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
'''
# You can optionally better parse the image uri (query), but not
# necessary
# names = parse_image_name(remove_uri(query))
if query is not None:
# Here you might do a function that is a general list
# Note that this means adding the function Client in __init__
return self._container_query(query)
# or default to listing (searching) all things.
return self._search_all()
|
a show all search that doesn t require a query
|
def search_all(self):
'''a "show all" search that doesn't require a query'''
# This should be your apis url for a search
url = '...'
# paginte get is what it sounds like, and what you want for multiple
# pages of results
results = self._paginate_get(url)
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
bot.info("Collections")
# Here is how to create a simple table. You of course must parse your
# custom result and form the fields in the table to be what you think
# are important!
rows = []
for result in results:
if "containers" in result:
for c in result['containers']:
rows.append([ c['uri'],
c['detail'] ])
bot.table(rows)
return rows
|
return a collection and repo name and tag for an image file. Parameters ========= image_name: a user provided string indicating a collection image and optionally a tag. tag: optionally specify tag as its own argument over - rides parsed image tag defaults: use defaults latest for tag and library for collection. base: if defined remove from image_name appropriate if the user gave a registry url base that isn t part of namespace. lowercase: turn entire URI to lowercase ( default is True )
|
def parse_image_name(image_name,
tag=None,
version=None,
defaults=True,
ext="sif",
default_collection="library",
default_tag="latest",
base=None,
lowercase=True):
'''return a collection and repo name and tag
for an image file.
Parameters
=========
image_name: a user provided string indicating a collection,
image, and optionally a tag.
tag: optionally specify tag as its own argument
over-rides parsed image tag
defaults: use defaults "latest" for tag and "library"
for collection.
base: if defined, remove from image_name, appropriate if the
user gave a registry url base that isn't part of namespace.
lowercase: turn entire URI to lowercase (default is True)
'''
# Save the original string
original = image_name
if base is not None:
image_name = image_name.replace(base,'').strip('/')
# If a file is provided, remove extension
image_name = re.sub('[.](img|simg|sif)','', image_name)
# Parse the provided name
uri_regexes = [ _reduced_uri,
_default_uri,
_docker_uri ]
for r in uri_regexes:
match = r.match(image_name)
if match:
break
if not match:
bot.exit('Could not parse image "%s"! Exiting.' % image)
# Get matches
registry = match.group('registry')
collection = match.group('collection')
repo_name = match.group('repo')
repo_tag = match.group('tag')
version = match.group('version')
# A repo_name is required
assert(repo_name)
# If a collection isn't provided
collection = set_default(collection, default_collection, defaults)
repo_tag = set_default(repo_tag, default_tag, defaults)
# The collection, name must be all lowercase
if lowercase:
collection = collection.lower().rstrip('/')
repo_name = repo_name.lower()
repo_tag = repo_tag.lower()
else:
collection = collection.rstrip('/')
if version != None:
version = version.lower()
# Piece together the uri base
if registry == None:
uri = "%s/%s" % (collection, repo_name)
else:
uri = "%s/%s/%s" % (registry, collection, repo_name)
url = uri
# Tag is defined
if repo_tag != None:
uri = "%s-%s" % (uri, repo_tag)
tag_uri = "%s:%s" % (url, repo_tag)
# Version is defined
storage_version = None
if version is not None:
uri = "%s@%s" % (uri, version)
tag_uri = "%s@%s" % (tag_uri, version)
storage_version = "%s@%s.%s" % (tag_uri, version, ext)
# A second storage URI honors the tag (:) separator
storage = "%s.%s" %(uri, ext)
storage_uri = "%s.%s" %(tag_uri, ext)
result = {"collection": collection,
"original": original,
"registry": registry,
"image": repo_name,
"url": url,
"tag": repo_tag,
"version": version,
"storage": storage,
"storage_uri": storage_uri,
"storage_version": storage_version or storage_uri,
"tag_uri": tag_uri,
"uri": uri}
return result
|
format_container_name will take a name supplied by the user remove all special characters ( except for those defined by special - characters and return the new image name.
|
def format_container_name(name, special_characters=None):
'''format_container_name will take a name supplied by the user,
remove all special characters (except for those defined by "special-characters"
and return the new image name.
'''
if special_characters is None:
special_characters = []
return ''.join(e.lower()
for e in name if e.isalnum() or e in special_characters)
|
get the uri for an image if within acceptable Parameters ========== image: the image uri in the format <uri >:// <registry >/ <namespace >: <tag >
|
def get_uri(image):
'''get the uri for an image, if within acceptable
Parameters
==========
image: the image uri, in the format <uri>://<registry>/<namespace>:<tag>
'''
# Ensure we have a string
image = image or ''
# Find uri prefix, including ://
regexp = re.compile('^.+://')
uri = regexp.match(image)
if uri is not None:
uri = (uri.group().lower()
.replace('_','-')
.replace('://',''))
accepted_uris = ['aws',
'docker',
'http', 'https', # Must be allowed for pull
'dropbox',
'gitlab',
'globus',
'google-build',
'google-storage',
'google-drive',
'hub',
'nvidia',
'registry',
's3',
'swift']
# Allow for Singularity compatability
if "shub" in uri: uri = "hub"
if uri not in accepted_uris:
bot.warning('%s is not a recognized uri.' % uri)
uri = None
return uri
|
pull an image from storage using Swift. The image is found based on the storage uri Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
|
def pull(self, images, file_name=None, save=True, **kwargs):
'''pull an image from storage using Swift. The image is found based on the
storage uri
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
force = False
if "force" in kwargs:
force = kwargs['force']
if not isinstance(images, list):
images = [images]
bot.debug('Execution of PULL for %s images' % len(images))
# If used internally we want to return a list to the user.
finished = []
for image in images:
names = parse_image_name(remove_uri(image))
# First try to get the collection
collection = self._get_collection(names['collection'])
if collection is None:
bot.error('Collection %s does not exist.' % names['collection'])
# Show the user collections he/she does have access to
collections = self.get_collections()
if collections:
bot.info('Collections available to you: \n%s' %'\n'.join(collections))
sys.exit(1)
# Determine if the container exists in storage
image_name = os.path.basename(names['storage'])
try:
obj_tuple = self.conn.get_object(names['collection'], image_name)
except ClientException:
bot.exit('%s does not exist.' % names['storage'])
# Give etag as version if version not defined
if names['version'] == None:
names['version'] = obj_tuple[0]['etag']
# If the user didn't provide a file, make one based on the names
if file_name is None:
file_name = self._get_storage_name(names)
# If the file already exists and force is False
if os.path.exists(file_name) and force is False:
bot.error('Image exists! Remove first, or use --force to overwrite')
sys.exit(1)
# Write to file
with open(file_name, 'wb') as filey:
filey.write(obj_tuple[1])
# If we save to storage, the uri is the dropbox_path
if save is True:
names.update(obj_tuple[0])
container = self.add(image_path = file_name,
image_uri = names['uri'],
metadata = names)
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' %image_file)
bot.custom(prefix="Success!", message=image_file)
finished.append(image_file)
else:
bot.error('%s does not exist. Try sregistry search to see images.' % path)
if len(finished) == 1:
finished = finished[0]
return finished
|
push an image to Google Cloud Drive meaning uploading it path: should correspond to an absolte image path ( or derive it ) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker
|
def push(self, path, name, tag=None):
'''push an image to Google Cloud Drive, meaning uploading it
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
# The root of the drive for containers (the parent folder)
parent = self._get_or_create_folder(self._base)
image = None
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
names = parse_image_name(remove_uri(name),tag=tag)
if names['version'] is None:
version = get_image_hash(path)
names = parse_image_name(remove_uri(name), tag=tag, version=version)
# Update metadata with names, flatten to only include labels
metadata = self.get_metadata(path, names=names)
metadata = metadata['data']
metadata.update(names)
metadata.update(metadata['attributes']['labels'])
del metadata['attributes']
file_metadata = {
'name': names['storage'],
'mimeType' : 'application/octet-stream',
'parents': [parent['id']],
'properties': metadata
}
media = MediaFileUpload(path,resumable=True)
try:
bot.spinner.start()
image = self._service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
# Add a thumbnail!
thumbnail = get_thumbnail()
with open(thumbnail, "rb") as f:
body = { "contentHints": {
"thumbnail": { "image": base64.urlsafe_b64encode(f.read()).decode('utf8'),
"mimeType": "image/png" }
}}
image = self._service.files().update(fileId=image['id'],
body = body).execute()
bot.spinner.stop()
print(image['name'])
except HttpError:
bot.error('Error uploading %s' %path)
pass
return image
|
set the API base or default to use Docker Hub. The user is able to set the base api version and protocol via a settings file of environment variables:
|
def _set_base(self, zone=None):
'''set the API base or default to use Docker Hub. The user is able
to set the base, api version, and protocol via a settings file
of environment variables:
'''
if hasattr(self.aws._client_config, 'region_name'):
zone = self.aws._client_config.region_name
aws_id = self._required_get_and_update('SREGISTRY_AWS_ID')
aws_zone = self._required_get_and_update('SREGISTRY_AWS_ZONE', zone)
version = self._get_setting('SREGISTRY_AWS_VERSION', 'v2')
base = self._get_setting('SREGISTRY_AWS_BASE')
if base is None:
base = "%s.dkr.ecr.%s.amazonaws.com" % (aws_id, aws_zone)
nohttps = self._get_setting('SREGISTRY_AWS_NOHTTPS')
if nohttps is None:
nohttps = "https://"
else:
nohttps = "http://"
# <protocol>://<base>/<version>
self.base = "%s%s/%s" %(nohttps, base.strip('/'), version)
|
update secrets will take a secrets credential file either located at. sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. For the case of using Docker Hub if we find a. docker secrets file we update from there.
|
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. For the case of
using Docker Hub, if we find a .docker secrets file, we update
from there.
'''
bot.debug('Creating aws client...')
try:
from awscli.clidriver import create_clidriver
except:
bot.exit('Please install pip install sregistry[aws]')
driver = create_clidriver()
self.aws = driver.session.create_client('ecr')
|
get_logging_level will configure a logging to standard out based on the user s selected level which should be in an environment variable called MESSAGELEVEL. if MESSAGELEVEL is not set the maximum level ( 5 ) is assumed ( all messages ).
|
def get_logging_level():
'''get_logging_level will configure a logging to standard out based on the user's
selected level, which should be in an environment variable called
MESSAGELEVEL. if MESSAGELEVEL is not set, the maximum level
(5) is assumed (all messages).
'''
level = os.environ.get("MESSAGELEVEL", INFO)
# User knows logging levels and set one
if isinstance(level, int):
return level
# Otherwise it's a string
if level == "CRITICAL":
return CRITICAL
elif level == "ABORT":
return ABORT
elif level == "ERROR":
return ERROR
elif level == "WARNING":
return WARNING
elif level == "LOG":
return LOG
elif level == "INFO":
return INFO
elif level == "QUIET":
return QUIET
elif level.startswith("VERBOSE"):
return VERBOSE3
elif level == "LOG":
return LOG
elif level == "DEBUG":
return DEBUG
return level
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.