INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
download an image layer (. tar. gz ) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/ download functions of the parent client.
def download_task(url, headers, destination, download_type='layer'): '''download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp. ''' # Update the user what we are doing bot.verbose("Downloading %s from %s" % (download_type, url)) # Step 1: Download the layer atomically file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names())) tar_download = download(url, file_name, headers=headers) try: shutil.move(tar_download, destination) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return destination
post will use requests to get a particular url
def post(url,data=None,return_json=True): '''post will use requests to get a particular url ''' bot.debug("POST %s" %url) return call(url, headers=headers, func=requests.post, data=data, return_json=return_json)
get will use requests to get a particular url
def get(url,headers=None,token=None,data=None,return_json=True): '''get will use requests to get a particular url ''' bot.debug("GET %s" %url) return call(url, headers=headers, func=requests.get, data=data, return_json=return_json)
call will issue the call and issue a refresh token given a 401 response and if the client has a _update_token function
def call(url, func, data=None, headers=None, return_json=True, stream=False, retry=True): '''call will issue the call, and issue a refresh token given a 401 response, and if the client has a _update_token function Parameters ========== func: the function (eg, post, get) to call url: the url to send file to headers: headers for the request data: additional data to add to the request return_json: return json if successful ''' if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') if data is not None: if not isinstance(data,dict): data = json.dumps(data) response = func(url=url, headers=headers, data=data, verify=not DISABLE_SSL_CHECK, stream=stream) # Errored response, try again with refresh if response.status_code in [500, 502]: bot.error("Beep boop! %s: %s" %(response.reason, response.status_code)) sys.exit(1) # Errored response, try again with refresh if response.status_code == 404: bot.error("Beep boop! %s: %s" %(response.reason, response.status_code)) sys.exit(1) # Errored response, try again with refresh if response.status_code == 401: # If client has method to update token, try it once if retry is True: # A result of None indicates no update to the call headers = update_token(response, headers) return call(url, func, data=data, headers=headers, return_json=return_json, stream=stream, retry=False) bot.error("Your credentials are expired! %s: %s" %(response.reason, response.status_code)) sys.exit(1) elif response.status_code == 200: if return_json: try: response = response.json() except ValueError: bot.error("The server returned a malformed response.") sys.exit(1) return response
require secrets ensures that the client has the secrets file and specifically has one or more parameters defined. If params is None only a check is done for the file.
def require_secrets(self, params=None): '''require secrets ensures that the client has the secrets file, and specifically has one or more parameters defined. If params is None, only a check is done for the file. Parameters ========== params: a list of keys to lookup in the client secrets, eg: secrets[client_name][params1] should not be in [None,''] or not set ''' name = self.client_name # Check 1: the client must have secrets, period has_secrets = True # Secrets file not asked for (incorrectly) but still wanted # The client shouldn't be calling this function if didn't init secrets if not hasattr(self,'secrets'): has_secrets = False # Secret file was not found, period elif hasattr(self,'secrets'): if self.secrets is None: has_secrets = False # The client isn't defined in the secrets file elif self.client_name not in self.secrets: has_secrets = False # Missing file or client secrets, fail if has_secrets is False: message = '%s requires client secrets.' %name bot.error(message) sys.exit(1) # Check 2: we have secrets and lookup, do we have all needed params? if params is not None: # Assume list so we can always parse through if not isinstance(params,list): params = [params] for param in params: # The parameter is not a key for the client if param not in self.secrets[name]: has_secrets = False # The parameter is a key, but empty or undefined elif self.secrets[name][param] in [None,'']: has_secrets=False # Missing parameter, exit on fail if has_secrets is False: message = 'Missing %s in client secrets.' %param bot.error(message) sys.exit(1)
auth flow is a function to present the user with a url to retrieve some token/ code and then copy paste it back in the terminal.
def auth_flow(self, url): '''auth flow is a function to present the user with a url to retrieve some token/code, and then copy paste it back in the terminal. Parameters ========== url should be a url that is generated for the user to go to and accept getting a credential in the browser. ''' print('Please go to this URL and login: {0}'.format(url)) get_input = getattr(__builtins__, 'raw_input', input) message = 'Please enter the code you get after login here: ' code = get_input(message).strip() return code
stream to a temporary file rename on successful completion
def download(url, file_name, headers=None, show_progress=True): '''stream to a temporary file, rename on successful completion Parameters ========== file_name: the file name to stream to url: the url to stream from headers: additional headers to add ''' fd, tmp_file = tempfile.mkstemp(prefix=("%s.tmp." % file_name)) os.close(fd) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') verify = not DISABLE_SSL_CHECK response = stream(url, headers=headers, stream_to=tmp_file) shutil.move(tmp_file, file_name) return file_name
stream is a get that will stream to file_name. Since this is a worker task it differs from the client provided version in that it requires headers.
def stream(url, headers, stream_to=None, retry=True): '''stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. ''' bot.debug("GET %s" % url) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') # Ensure headers are present, update if not response = requests.get(url, headers=headers, verify=not DISABLE_SSL_CHECK, stream=True) # If we get permissions error, one more try with updated token if response.status_code in [401, 403]: headers = update_token(headers) return stream(url, headers, stream_to, retry=False) # Successful Response elif response.status_code == 200: # Keep user updated with Progress Bar content_size = None if 'Content-Length' in response.headers: progress = 0 content_size = int(response.headers['Content-Length']) bot.show_progress(progress,content_size,length=35) chunk_size = 1 << 20 with open(stream_to,'wb') as filey: for chunk in response.iter_content(chunk_size=chunk_size): filey.write(chunk) if content_size is not None: progress+=chunk_size bot.show_progress(iteration=progress, total=content_size, length=35, carriage_return=False) # Newline to finish download sys.stdout.write('\n') return stream_to bot.error("Problem with stream, response %s" %(response.status_code)) sys.exit(1)
update_token uses HTTP basic authentication to attempt to authenticate given a 401 response. We take as input previous headers and update them.
def update_token(headers): '''update_token uses HTTP basic authentication to attempt to authenticate given a 401 response. We take as input previous headers, and update them. Parameters ========== response: the http request response to parse for the challenge. ''' try: from awscli.clidriver import create_clidriver except: bot.exit('Please install pip install sregistry[aws]') driver = create_clidriver() aws = driver.session.create_client('ecr') tokens = aws.get_authorization_token() token = tokens['authorizationData'][0]['authorizationToken'] try: token = {"Authorization": "Basic %s" % token} headers.update(token) except Exception: bot.error("Error getting token.") sys.exit(1) return headers
create a folder at the drive root. If the folder already exists it is simply returned.
def get_or_create_folder(self, folder): '''create a folder at the drive root. If the folder already exists, it is simply returned. folder = self._get_or_create_folder(self._base) $ folder {'id': '1pXR5S8wufELh9Q-jDkhCoYu-BL1NqN9y'} ''' q = "mimeType='application/vnd.google-apps.folder' and name='%s'" %folder response = self._service.files().list(q=q, spaces='drive').execute().get('files',[]) # If no folder is found, create it! if len(response) == 0: folder = self._create_folder(folder) else: folder = response[0] return folder
create a folder with a particular name. Be careful if the folder already exists you can still create one ( a different one ) with the equivalent name!
def create_folder(self, folder): '''create a folder with a particular name. Be careful, if the folder already exists you can still create one (a different one) with the equivalent name! ''' folder_metadata = { 'name': os.path.basename(folder), 'mimeType': 'application/vnd.google-apps.folder' } created = self._service.files().create(body=folder_metadata, fields='id').execute() return created
attempt to read the detail provided by the response. If none default to using the reason
def _read_response(self,response, field="detail"): '''attempt to read the detail provided by the response. If none, default to using the reason''' try: message = json.loads(response._content.decode('utf-8'))[field] except: message = response.reason return message
get or return the s3 bucket name. If not yet defined via an environment variable or setting we create a name with the pattern. sregistry - <robotnamer > - <1234 >
def get_bucket_name(self): '''get or return the s3 bucket name. If not yet defined via an environment variable or setting, we create a name with the pattern. sregistry-<robotnamer>-<1234> You can use the following environment variables to determine interaction with the bucket: SREGISTRY_S3_BUCKET: the bucket name (all lowercase, no underscore) ''' # Get bucket name bucket_name = 'sregistry-%s' % RobotNamer().generate() self.bucket_name = self._get_and_update_setting('SREGISTRY_S3_BUCKET', bucket_name)
given a bucket name and a client that is initialized get or create the bucket.
def get_bucket(self): '''given a bucket name and a client that is initialized, get or create the bucket. ''' for attr in ['bucket_name', 's3']: if not hasattr(self, attr): bot.exit('client is missing attribute %s' %(attr)) # See if the bucket is already existing self.bucket = None for bucket in self.s3.buckets.all(): if bucket.name == self.bucket_name: self.bucket = bucket # If the bucket doesn't exist, create it if self.bucket is None: self.bucket = self.s3.create_bucket(Bucket=self.bucket_name) bot.info('Created bucket %s' % self.bucket.name ) return self.bucket
use the user provided endpoint and keys ( from environment ) to connect to the resource. We can share the aws environment variables:
def get_resource(self): '''use the user provided endpoint and keys (from environment) to connect to the resource. We can share the aws environment variables: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html ''' # If base is not defined, assume using aws client if self.base != None: # s3.ServiceResource() self.s3 = boto3.resource('s3', endpoint_url=self.base, aws_access_key_id=self._id, aws_secret_access_key=self._key, config=boto3.session.Config(signature_version=self._signature)) else: # We will need to test options for reading credentials here self.s3 = boto3.client('s3')
update secrets will update/ get the base for the server along with the bucket name defaulting to sregistry.
def _update_secrets(self, base=None): '''update secrets will update/get the base for the server, along with the bucket name, defaulting to sregistry. ''' # We are required to have a base, either from environment or terminal self.base = self._get_and_update_setting('SREGISTRY_S3_BASE', self.base) self._id = self._required_get_and_update('AWS_ACCESS_KEY_ID') self._key = self._required_get_and_update('AWS_SECRET_ACCESS_KEY') # Get the desired S3 signature. Default is the current "s3v4" signature. # If specified, user can request "s3" (v2 old) signature self._signature = self._get_and_update_setting('SREGISTRY_S3_SIGNATURE') if self._signature == 's3': # Requested signature is S3 V2 self._signature = 's3' else: # self._signature is not set or not set to s3 (v2), default to s3v4 self._signature = 's3v4' # Define self.bucket_name, self.s3, then self.bucket self.get_bucket_name() self.get_resource() self.get_bucket()
for push pull and other api interactions the user can optionally define a custom registry. If the registry name doesn t include http or https add it. Parameters ========== q: the parsed image query ( names ) including the original
def _add_https(self, q): '''for push, pull, and other api interactions, the user can optionally define a custom registry. If the registry name doesn't include http or https, add it. Parameters ========== q: the parsed image query (names), including the original ''' # If image uses http or https, add back if not q['registry'].startswith('http'): if q['original'].startswith('http:'): q['registry'] = 'http://%s' % q['registry'] elif q['original'].startswith('https:'): q['registry'] = 'https://%s' % q['registry'] # Otherwise, guess from the user's environment else: prefix = 'https://' # The user can set an environment variable to specify nohttps nohttps = os.environ.get('SREGISTRY_REGISTRY_NOHTTPS') if nohttps != None: prefix = 'http://' q['registry'] = '%s%s' %(prefix, q['registry']) return q
update secrets will take a secrets credential file either located at. sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base.
def _update_secrets(self): '''update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. ''' self.secrets = read_client_secrets() if self.secrets is not None: if "registry" in self.secrets: if "base" in self.secrets['registry']: self.base = self.secrets['registry']['base'] self._update_base()
init_ cliends will obtain the tranfer and access tokens and then use them to create a transfer client.
def _init_clients(self): '''init_ cliends will obtain the tranfer and access tokens, and then use them to create a transfer client. ''' self._client = globus_sdk.NativeAppAuthClient(self._client_id) self._load_secrets()
load the secrets credentials file with the Globus OAuthTokenResponse
def _load_secrets(self): '''load the secrets credentials file with the Globus OAuthTokenResponse ''' # Second priority: load from cache self.auth = self._get_and_update_setting('GLOBUS_AUTH_RESPONSE') self.transfer = self._get_and_update_setting('GLOBUS_TRANSFER_RESPONSE')
return boolean True or False if the client tokens ( self. _auth and self. _transfer ) need updating.
def _tokens_need_update(self): '''return boolean True or False if the client tokens (self._auth and self._transfer) need updating. ''' # Assumes that auth and transfer have same refresh time needs_update = True if self.auth is not None: if self.auth['expires_at_seconds'] > time.time(): needs_update = False return needs_update
Present the client with authentication flow to get tokens from code. This simply updates the client _response to be used to get tokens for auth and transfer ( both use access_token as index ). We call this not on client initialization but when the client is actually needed.
def _update_tokens(self): '''Present the client with authentication flow to get tokens from code. This simply updates the client _response to be used to get tokens for auth and transfer (both use access_token as index). We call this not on client initialization, but when the client is actually needed. ''' self._client.oauth2_start_flow(refresh_tokens=True) authorize_url = self._client.oauth2_get_authorize_url() print('Please go to this URL and login: {0}'.format(authorize_url)) auth_code = raw_input( 'Please enter the code you get after login here: ').strip() # Save to client self._response = self._client.oauth2_exchange_code_for_tokens(auth_code) self.auth = self._response.by_resource_server['auth.globus.org'] self.transfer = self._response.by_resource_server['transfer.api.globus.org'] self._update_setting('GLOBUS_TRANSFER_RESPONSE', self.transfer) self._update_setting('GLOBUS_AUTH_RESPONSE', self.auth)
return logs for a particular container. The logs file is equivalent to the name but with extension. log. If there is no name the most recent log is returned.
def logs(self, name=None): '''return logs for a particular container. The logs file is equivalent to the name, but with extension .log. If there is no name, the most recent log is returned. Parameters ========== name: the container name to print logs for. ''' content = None results = self._list_logs() print(results) # If we are searching for a name if name is not None: for result in results: matches = False # Case 1: the name is in the storage path if name in result.name: matches=True # Case 2: match in metadata for key,val in result.metadata.items(): if name in val: matches = True if matches is True: content = self._print_log(result.name) # Otherwise return the last else: if len(results) > 0: latest = results[0] # Get the most recent for result in results: if result.time_created >= latest.time_created: latest = result content = self._print_log(result.name) return content
return a list of logs. We return any file that ends in. log
def list_logs(self): '''return a list of logs. We return any file that ends in .log ''' results = [] for image in self._bucket.list_blobs(): if image.name.endswith('log'): results.append(image) if len(results) == 0: bot.info("No containers found, based on extension .log") return results
helper function to retrieve a particular log and print. Parameters ========== name: the name of the log to retrieve
def print_log(self, logname): '''helper function to retrieve a particular log, and print. Parameters ========== name: the name of the log to retrieve ''' content = None # Try to retrieve the blob (log) if exists logfile = self._bucket.get_blob(logname) print(logname) if logfile: bot.info('[%s]' %logname) content = requests.get(logfile.media_link).text print(content) return content
split an endpoint name by colon as the user can provide an endpoint name separated from a path:
def parse_endpoint_name(self, endpoint): '''split an endpoint name by colon, as the user can provide an endpoint name separated from a path: Parameters ========== endpoint 12345:/path/on/remote ''' parts = [x for x in endpoint.split(':') if x] endpoint = parts[0] if len(parts) == 1: path = '' else: path = '/'.join(parts[1:]) return endpoint, path
create an endpoint folder catching the error if it exists.
def create_endpoint_folder(self, endpoint_id, folder): '''create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create ''' try: res = self.transfer_client.operation_mkdir(endpoint_id, folder) bot.info("%s --> %s" %(res['message'], folder)) except TransferAPIError: bot.info('%s already exists at endpoint' %folder)
return the first fullpath to a folder in the endpoint based on expanding the user s home from the globus config file. This function is fragile but I don t see any other way to do it. Parameters ========== endpoint_id: the endpoint id to look up the path for
def get_endpoint_path(self, endpoint_id): '''return the first fullpath to a folder in the endpoint based on expanding the user's home from the globus config file. This function is fragile but I don't see any other way to do it. Parameters ========== endpoint_id: the endpoint id to look up the path for ''' config = os.path.expanduser("~/.globusonline/lta/config-paths") if not os.path.exists(config): bot.error('%s not found for a local Globus endpoint.') sys.exit(1) path = None # Read in the config and get the root path config = [x.split(',')[0] for x in read_file(config)] for path in config: if os.path.exists(path): break # If we don't have an existing path, exit if path is None: bot.error('No path was found for a local Globus endpoint.') sys.exit(1) return path
return a transfer client for the user
def init_transfer_client(self): '''return a transfer client for the user''' if self._tokens_need_update(): self._update_tokens() access_token = self.transfer['access_token'] # Createe Refresh Token Authorizer authorizer = globus_sdk.RefreshTokenAuthorizer( self.transfer['refresh_token'], self._client, access_token=self.transfer['access_token'], expires_at=self.transfer['expires_at_seconds']) self.transfer_client = globus_sdk.TransferClient(authorizer=authorizer)
use a transfer client to get a specific endpoint based on an endpoint id. Parameters ========== endpoint_id: the endpoint_id to retrieve
def get_endpoint(self, endpoint_id): '''use a transfer client to get a specific endpoint based on an endpoint id. Parameters ========== endpoint_id: the endpoint_id to retrieve ''' endpoint = None if not hasattr(self, 'transfer_client'): self._init_transfer_client() try: endpoint = self.transfer_client.get_endpoint(endpoint_id).data except TransferAPIError: bot.info('%s does not exist.' %endpoint_id) return endpoint
use a transfer client to get endpoints. If a search term is included we use it to search a scope of all in addition to personal and shared endpoints. Endpoints are organized by type ( my - endpoints shared - with - me optionally all ) and then id.
def get_endpoints(self, query=None): '''use a transfer client to get endpoints. If a search term is included, we use it to search a scope of "all" in addition to personal and shared endpoints. Endpoints are organized by type (my-endpoints, shared-with-me, optionally all) and then id. Parameters ========== query: an endpoint search term to add to a scope "all" search. If not defined, no searches are done with "all" ''' self.endpoints = {} if not hasattr(self, 'transfer_client'): self._init_transfer_client() # We assume the user wants to always see owned and shared scopes = {'my-endpoints':None, 'shared-with-me': None} # If the user provides query, add to search if query is not None: scopes.update({'all': query}) for scope, q in scopes.items(): self.endpoints[scope] = {} for ep in self.transfer_client.endpoint_search(q, filter_scope=scope): ep = ep.__dict__['_data'] self.endpoints[scope][ep['id']] = ep # Alert the user not possible without personal lookup if len(self.endpoints['my-endpoints']) == 0: bot.warning('No personal endpoint found for local transfer.') bot.warning('https://www.globus.org/globus-connect-personal') return self.endpoints
return a list of containers. Since Google Drive definitely has other kinds of files we look for containers in a special sregistry folder ( meaning the parent folder is sregistry ) and with properties of type as container.
def list_containers(self): '''return a list of containers. Since Google Drive definitely has other kinds of files, we look for containers in a special sregistry folder, (meaning the parent folder is sregistry) and with properties of type as container. ''' # Get or create the base folder = self._get_or_create_folder(self._base) next_page = None containers = [] # Parse the base for all containers, possibly over multiple pages while True: query = "mimeType='application/octet-stream'" # ensures container query += " and properties has { key='type' and value='container' }" query += " and '%s' in parents" %folder['id'] # ensures in parent folder response = self._service.files().list(q=query, spaces='drive', fields='nextPageToken, files(id, name, properties)', pageToken=next_page).execute() containers += response.get('files', []) # If there is a next page, keep going! next_page = response.get('nextPageToken') if not next_page: break if len(containers) == 0: bot.info("No containers found, based on properties type:container") sys.exit(1) return containers
a list all search that doesn t require a query. Here we return to the user all objects that have custom properties value type set to container which is set when the image is pushed.
def search_all(self): '''a "list all" search that doesn't require a query. Here we return to the user all objects that have custom properties value type set to container, which is set when the image is pushed. IMPORTANT: the upload function adds this metadata. For a container to be found by the client, it must have the properties value with type as container. It also should have a "uri" in properties to show the user, otherwise the user will have to query / download based on the id ''' results = self._list_containers() matches = [] bot.info("[drive://%s] Containers" %self._base) rows = [] for i in results: # Fallback to the image name without the extension uri = i['name'].replace('.simg','') # However the properties should include the uri if 'properties' in i: if 'uri' in i['properties']: uri = i['properties']['uri'] rows.append([i['id'],uri]) # Give the user back a uri i['uri'] = uri matches.append(i) bot.custom(prefix=" [drive://%s]" %self._base, message="\t\t[id]\t[uri]", color="PURPLE") bot.table(rows) return matches
search for a specific container. This function is the same as the search all but instead of showing all results filters them down based on user criteria ( the query )
def container_query(self, query, quiet=False): '''search for a specific container. This function is the same as the search all, but instead of showing all results, filters them down based on user criteria (the query) ''' results = self._list_containers() matches = [] for result in results: is_match = False if query in result['id']: is_match = True elif query in result['name']: is_match = True else: for key,val in result['properties'].items(): if query in val and is_match is False: is_match = True break if is_match is True: matches.append(result) if not quiet: bot.info("[drive://%s] Found %s containers" %(self._base,len(matches))) for image in matches: # If the image has properties, show to the user if 'properties' in image: image.update(image['properties']) bot.info(image['uri']) for key in sorted(image, key=len): val = image[key] if isinstance(val,str): bot.custom(prefix=key.ljust(10), message=val, color="CYAN") bot.newline() return matches
print the status for all or one of the backends.
def status(backend): '''print the status for all or one of the backends. ''' print('[backend status]') settings = read_client_secrets() print('There are %s clients found in secrets.' %len(settings)) if 'SREGISTRY_CLIENT' in settings: print('active: %s' %settings['SREGISTRY_CLIENT']) update_secrets(settings) else: print('There is no active client.')
add the variable to the config
def add(backend, variable, value, force=False): '''add the variable to the config ''' print('[add]') settings = read_client_secrets() # If the variable begins with the SREGISTRY_<CLIENT> don't add it prefix = 'SREGISTRY_%s_' %backend.upper() if not variable.startswith(prefix): variable = '%s%s' %(prefix, variable) # All must be uppercase variable = variable.upper() bot.info("%s %s" %(variable, value)) # Does the setting already exist? if backend in settings: if variable in settings[backend] and force is False: previous = settings[backend][variable] bot.error('%s is already set as %s. Use --force to override.' %(variable, previous)) sys.exit(1) if backend not in settings: settings[backend] = {} settings[backend][variable] = value update_secrets(settings)
remove a variable from the config if found.
def remove(backend, variable): '''remove a variable from the config, if found. ''' print('[remove]') settings = read_client_secrets() # If the variable begins with the SREGISTRY_<CLIENT> don't add it prefixed = variable prefix = 'SREGISTRY_%s_' %backend.upper() if not variable.startswith(prefix): prefixed = '%s%s' %(prefix, variable) # All must be uppercase variable = variable.upper() bot.info(variable) # Does the setting already exist? if backend in settings: if variable in settings[backend]: del settings[backend][variable] if prefixed in settings[backend]: del settings[backend][prefixed] update_secrets(settings)
activate a backend by adding it to the. sregistry configuration file.
def activate(backend): '''activate a backend by adding it to the .sregistry configuration file. ''' settings = read_client_secrets() if backend is not None: settings['SREGISTRY_CLIENT'] = backend update_secrets(settings) print('[activate] %s' %backend)
delete a backend and update the secrets file
def delete_backend(backend): '''delete a backend, and update the secrets file ''' settings = read_client_secrets() if backend in settings: del settings[backend] # If the backend was the active client, remove too if 'SREGISTRY_CLIENT' in settings: if settings['SREGISTRY_CLIENT'] == backend: del settings['SREGISTRY_CLIENT'] update_secrets(settings) print('[delete] %s' %backend) else: if backend is not None: print('%s is not a known client.' %backend) else: print('Please specify a backend to delete.')
return a list of backends installed for the user which is based on the config file keys found present Parameters ========== backend: a specific backend to list. If defined just list parameters.
def list_backends(backend=None): '''return a list of backends installed for the user, which is based on the config file keys found present Parameters ========== backend: a specific backend to list. If defined, just list parameters. ''' settings = read_client_secrets() # Backend names are the keys backends = list(settings.keys()) backends = [b for b in backends if b!='SREGISTRY_CLIENT'] if backend in backends: bot.info(backend) print(json.dumps(settings[backend], indent=4, sort_keys=True)) else: if backend is not None: print('%s is not a known client.' %backend) bot.info("Backends Installed") print('\n'.join(backends))
pull an image from a dropbox. The image is found based on the storage uri Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a dropbox. The image is found based on the storage uri Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' force = False if "force" in kwargs: force = kwargs['force'] if not isinstance(images, list): images = [images] bot.debug('Execution of PULL for %s images' %len(images)) # If used internally we want to return a list to the user. finished = [] for image in images: names = parse_image_name(remove_uri(image)) # Dropbox path is the path in storage with a slash dropbox_path = '/%s' % names['storage'] # If the user didn't provide a file, make one based on the names if file_name is None: file_name = self._get_storage_name(names) # If the file already exists and force is False if os.path.exists(file_name) and force is False: bot.error('Image exists! Remove first, or use --force to overwrite') sys.exit(1) # First ensure that exists if self.exists(dropbox_path) is True: # _stream is a function to stream using the response to start metadata, response = self.dbx.files_download(dropbox_path) image_file = self._stream(response, stream_to=file_name) # parse the metadata (and add inspected image) metadata = self._get_metadata(image_file, metadata) # If we save to storage, the uri is the dropbox_path if save is True: container = self.add(image_path = image_file, image_uri = dropbox_path.strip('/'), metadata = metadata, url = response.url) # When the container is created, this is the path to the image image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' %image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) else: bot.error('%s does not exist. Try sregistry search to see images.' %path) if len(finished) == 1: finished = finished[0] return finished
push an image to your Dropbox Parameters ========== path: should correspond to an absolute image path ( or derive it ) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker
def push(self, path, name, tag=None): '''push an image to your Dropbox Parameters ========== path: should correspond to an absolute image path (or derive it) name: should be the complete uri that the user has requested to push. tag: should correspond with an image tag. This is provided to mirror Docker if the image is less than 150MB, the standard file_upload is used. If larger, the image is uploaded in chunks with a progress bar. ''' path = os.path.abspath(path) bot.debug("PUSH %s" % path) if not os.path.exists(path): bot.error('%s does not exist.' %path) sys.exit(1) # here is an exampole of getting metadata for a container names = parse_image_name(remove_uri(name), tag=tag) metadata = self.get_metadata(path, names=names) # Get the size of the file file_size = os.path.getsize(path) chunk_size = 4 * 1024 * 1024 storage_path = "/%s" %names['storage'] # This is MB # image_size = os.path.getsize(path) >> 20 # prepare the progress bar progress = 0 bot.show_progress(progress, file_size, length=35) # If image is smaller than 150MB, use standard upload with open(path, 'rb') as F: if file_size <= chunk_size: self.dbx.files_upload(F.read(), storage_path) # otherwise upload in chunks else: start = self.dbx.files_upload_session_start(F.read(chunk_size)) cursor = dropbox.files.UploadSessionCursor(session_id=start.session_id, offset=F.tell()) commit = dropbox.files.CommitInfo(path=storage_path) while F.tell() < file_size: progress+=chunk_size # Finishing up the file, less than chunk_size to go if ((file_size - F.tell()) <= chunk_size): self.dbx.files_upload_session_finish(F.read(chunk_size), cursor, commit) # Finishing up the file, less than chunk_size to go else: self.dbx.files_upload_session_append(F.read(chunk_size), cursor.session_id, cursor.offset) cursor.offset = F.tell() # Update the progress bar bot.show_progress(iteration=progress, total=file_size, length=35, carriage_return=False) # Finish up bot.show_progress(iteration=file_size, total=file_size, length=35, carriage_return=True) # Newline to finish download sys.stdout.write('\n')
update a base based on an image name meaning detecting a particular registry and if necessary updating the self. base. When the image name is parsed the base will be given to remove the registry.
def _update_base(self, image): ''' update a base based on an image name, meaning detecting a particular registry and if necessary, updating the self.base. When the image name is parsed, the base will be given to remove the registry. ''' base = None # Google Container Cloud if "gcr.io" in image: base = 'gcr.io' self._set_base(default_base=base) self._update_secrets() return base
set the API base or default to use Docker Hub. The user is able to set the base api version and protocol via a settings file of environment variables: SREGISTRY_DOCKERHUB_BASE: defaults to index. docker. io SREGISTRY_DOCKERHUB_VERSION: defaults to v1 SREGISTRY_DOCKERHUB_NO_HTTPS: defaults to not set ( so https )
def _set_base(self, default_base=None): '''set the API base or default to use Docker Hub. The user is able to set the base, api version, and protocol via a settings file of environment variables: SREGISTRY_DOCKERHUB_BASE: defaults to index.docker.io SREGISTRY_DOCKERHUB_VERSION: defaults to v1 SREGISTRY_DOCKERHUB_NO_HTTPS: defaults to not set (so https) ''' base = self._get_setting('SREGISTRY_DOCKERHUB_BASE') version = self._get_setting('SREGISTRY_DOCKERHUB_VERSION') # If we re-set the base after reading the image if base is None: if default_base is None: base = "index.docker.io" else: base = default_base if version is None: version = "v2" nohttps = self._get_setting('SREGISTRY_DOCKERHUB_NOHTTPS') if nohttps is None: nohttps = "https://" else: nohttps = "http://" # <protocol>://<base>/<version> self._base = "%s%s" %(nohttps, base) self._version = version self.base = "%s%s/%s" %(nohttps, base.strip('/'), version)
update secrets will take a secrets credential file either located at. sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. For the case of using Docker Hub if we find a. docker secrets file we update from there.
def _update_secrets(self): '''update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. For the case of using Docker Hub, if we find a .docker secrets file, we update from there. ''' # If the user has defined secrets, use them credentials = self._get_setting('SREGISTRY_DOCKERHUB_SECRETS') # First try for SINGULARITY exported, then try sregistry username = self._get_setting('SINGULARITY_DOCKER_USERNAME') password = self._get_setting('SINGULARITY_DOCKER_PASSWORD') username = self._get_setting('SREGISTRY_DOCKERHUB_USERNAME', username) password = self._get_setting('SREGISTRY_DOCKERHUB_PASSWORD', password) # Option 1: the user exports username and password auth = None if username is not None and password is not None: auth = basic_auth_header(username, password) self.headers.update(auth) # Option 2: look in .docker config file if credentials is not None and auth is None: if os.path.exists(credentials): credentials = read_json(credentials) # Find a matching auth in .docker config if "auths" in credentials: for auths, params in credentials['auths'].items(): if self._base in auths: if 'auth' in params: auth = "Basic %s" % params['auth'] self.headers['Authorization'] = auth # Also update headers if 'HttpHeaders' in credentials: for key, value in credentials['HttpHeaders'].items(): self.headers[key] = value else: bot.warning('Credentials file set to %s, but does not exist.')
share will use the client to get an image based on a query and then the link with an email or endpoint ( share_to ) of choice.
def share(self, query, share_to): '''share will use the client to get an image based on a query, and then the link with an email or endpoint (share_to) of choice. ''' images = self._container_query(query, quiet=True) if len(images) == 0: bot.error('Cannot find a remote image matching %s' %query) sys.exit(0) image = images[0] def callback(request_id, response, exception): if exception: # Handle error print(exception) else: share_id = response.get('id') bot.info('Share to %s complete: %s!' %(share_to, share_id)) batch = self._service.new_batch_http_request(callback=callback) user_permission = { 'type': 'user', 'role': 'reader', 'emailAddress': share_to } batch.add(self._service.permissions().create( fileId=image['id'], body=user_permission, fields='id', )) batch.execute() return image
pull an image from google drive based on a query ( uri or id ) Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from google drive, based on a query (uri or id) Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] bot.debug('Execution of PULL for %s images' %len(images)) # If used internally we want to return a list to the user. finished = [] for image in images: q = parse_image_name( remove_uri(image) ) # Use container search to find the container based on uri bot.info('Searching for %s in drive://%s' %(q['uri'],self._base)) matches = self._container_query(q['uri'], quiet=True) if len(matches) == 0: bot.info('No matching containers found.') sys.exit(0) # If the user didn't provide a file, make one based on the names if file_name is None: file_name = q['storage'].replace('/','-') # We give the first match, the uri should be unique and known image = matches[0] request = self._service.files().get_media(fileId=image['id']) with open(file_name, 'wb') as fh: downloader = MediaIoBaseDownload(fh, request) done = False bar = None # Download and update the user with progress bar while done is False: status, done = downloader.next_chunk() response = None # Create bar on first call if bar is None: total = status.total_size / (1024*1024.0) bar = ProgressBar(expected_size=total, filled_char='=', hide=self.quiet) bar.show(status.resumable_progress / (1024*1024.0)) # If the user is saving to local storage, you need to assumble the uri # here in the expected format <collection>/<namespace>:<tag>@<version> if save is True: image_uri = q['uri'] if "uri" in image: image_uri = image['uri'] # Update metadata with selfLink image['selfLink'] = downloader._uri container = self.add(image_path = image_file, image_uri = image_uri, metadata = image, url=downloader._uri) # When the container is created, this is the path to the image image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' %image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) if len(finished) == 1: finished = finished[0] return finished
generate a base64 encoded header to ask for a token. This means base64 encoding a username and password and adding to the Authorization header to identify the client.
def basic_auth_header(username, password): '''generate a base64 encoded header to ask for a token. This means base64 encoding a username and password and adding to the Authorization header to identify the client. Parameters ========== username: the username password: the password ''' s = "%s:%s" % (username, password) if sys.version_info[0] >= 3: s = bytes(s, 'utf-8') credentials = base64.b64encode(s).decode('utf-8') else: credentials = base64.b64encode(s) auth = {"Authorization": "Basic %s" % credentials} return auth
use an endpoint specific payload and client secret to generate a signature for the request
def generate_signature(payload, secret): '''use an endpoint specific payload and client secret to generate a signature for the request''' payload = _encode(payload) secret = _encode(secret) return hmac.new(secret, digestmod=hashlib.sha256, msg=payload).hexdigest()
basic_auth_header will return a base64 encoded header object to: param username: the username
def generate_credential(s): '''basic_auth_header will return a base64 encoded header object to :param username: the username ''' if sys.version_info[0] >= 3: s = bytes(s, 'utf-8') credentials = base64.b64encode(s).decode('utf-8') else: credentials = base64.b64encode(s) return credentials
Authorize a client based on encrypting the payload with the client secret timestamp and other metadata
def generate_header_signature(secret, payload, request_type): '''Authorize a client based on encrypting the payload with the client secret, timestamp, and other metadata ''' # Use the payload to generate a digest push|collection|name|tag|user timestamp = generate_timestamp() credential = "%s/%s" %(request_type,timestamp) signature = generate_signature(payload,secret) return "SREGISTRY-HMAC-SHA256 Credential=%s,Signature=%s" %(credential,signature)
delete request use with caution
def delete(self, url, headers=None, return_json=True, default_headers=True): '''delete request, use with caution ''' bot.debug('DELETE %s' %url) return self._call(url, headers=headers, func=requests.delete, return_json=return_json, default_headers=default_headers)
head request typically used for status code retrieval etc.
def head(self, url): '''head request, typically used for status code retrieval, etc. ''' bot.debug('HEAD %s' %url) return self._call(url, func=requests.head)
determine if a resource is healthy based on an accepted response ( 200 ) or redirect ( 301 )
def healthy(self, url): '''determine if a resource is healthy based on an accepted response (200) or redirect (301) Parameters ========== url: the URL to check status for, based on the status_code of HEAD ''' response = requests.get(url) status_code = response.status_code if status_code != 200: bot.error('%s, response status code %s.' %(url, status_code)) return False return True
post will use requests to get a particular url
def post(self,url, headers=None, data=None, return_json=True, default_headers=True): '''post will use requests to get a particular url ''' bot.debug("POST %s" %url) return self._call(url, headers=headers, func=requests.post, data=data, return_json=return_json, default_headers=default_headers)
get will use requests to get a particular url
def get(self,url, headers=None, token=None, data=None, return_json=True, default_headers=True, quiet=False): '''get will use requests to get a particular url ''' bot.debug("GET %s" %url) return self._call(url, headers=headers, func=requests.get, data=data, return_json=return_json, default_headers=default_headers, quiet=quiet)
paginate_call is a wrapper for get to paginate results
def paginate_get(self, url, headers=None, return_json=True, start_page=None): '''paginate_call is a wrapper for get to paginate results ''' geturl = '%s&page=1' %(url) if start_page is not None: geturl = '%s&page=%s' %(url,start_page) results = [] while geturl is not None: result = self._get(url, headers=headers, return_json=return_json) # If we have pagination: if isinstance(result, dict): if 'results' in result: results = results + result['results'] geturl = result['next'] # No pagination is a list else: return result return results
verify will return a True or False to determine to verify the requests call or not. If False we should the user a warning message as this should not be done in production!
def verify(self): ''' verify will return a True or False to determine to verify the requests call or not. If False, we should the user a warning message, as this should not be done in production! ''' from sregistry.defaults import DISABLE_SSL_CHECK if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') return not DISABLE_SSL_CHECK
call will issue the call and issue a refresh token given a 401 response and if the client has a _update_token function
def call(self, url, func, data=None, headers=None, return_json=True, stream=False, retry=True, default_headers=True, quiet=False): '''call will issue the call, and issue a refresh token given a 401 response, and if the client has a _update_token function Parameters ========== func: the function (eg, post, get) to call url: the url to send file to headers: if not None, update the client self.headers with dictionary data: additional data to add to the request return_json: return json if successful default_headers: use the client's self.headers (default True) ''' if data is not None: if not isinstance(data, dict): data = json.dumps(data) heads = dict() if default_headers is True: heads = self.headers.copy() if headers is not None: if isinstance(headers, dict): heads.update(headers) response = func(url=url, headers=heads, data=data, verify=self._verify(), stream=stream) # Errored response, try again with refresh if response.status_code in [500, 502]: bot.error("Beep boop! %s: %s" %(response.reason, response.status_code)) sys.exit(1) # Errored response, try again with refresh if response.status_code == 404: # Not found, we might want to continue on if quiet is False: bot.error("Beep boop! %s: %s" %(response.reason, response.status_code)) sys.exit(1) # Errored response, try again with refresh if response.status_code == 401: # If client has method to update token, try it once if retry is True and hasattr(self,'_update_token'): # A result of None indicates no update to the call self._update_token(response) return self._call(url, func, data=data, headers=headers, return_json=return_json, stream=stream, retry=False) bot.error("Your credentials are expired! %s: %s" %(response.reason, response.status_code)) sys.exit(1) elif response.status_code == 200: if return_json: try: response = response.json() except ValueError: bot.error("The server returned a malformed response.") sys.exit(1) return response
pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] # Interaction with a registry requires secrets self.require_secrets() bot.debug('Execution of PULL for %s images' %len(images)) finished = [] for image in images: q = parse_image_name(remove_uri(image)) # If a custom registry is not set, use default base if q['registry'] == None: q['registry'] = self.base # Ensure https is added back to the registry uri q = self._add_https(q) # All custom registries need api appended if not q['registry'].endswith('api'): q['registry'] = '%s/api' % q['registry'] # Verify image existence, and obtain id url = "%s/container/%s/%s:%s" %(q['registry'], q['collection'], q['image'], q['tag']) bot.debug('Retrieving manifest at %s' % url) try: manifest = self._get(url) except SSLError: bot.exit('Issue with %s, try exporting SREGISTRY_REGISTRY_NOHTTPS.' % url) # Private container collection if isinstance(manifest, Response): # Requires token if manifest.status_code == 403: SREGISTRY_EVENT = self.authorize(request_type="pull", names=q) headers = {'Authorization': SREGISTRY_EVENT } self._update_headers(headers) manifest = self._get(url) # Still denied if isinstance(manifest, Response): if manifest.status_code == 403: manifest = 403 if isinstance(manifest, int): if manifest == 400: bot.error('Bad request (400). Is this a private container?') elif manifest == 404: bot.error('Container not found (404)') elif manifest == 403: bot.error('Unauthorized (403)') sys.exit(1) # Successful pull if "image" in manifest: # Add self link to manifest manifest['selfLink'] = url if file_name is None: file_name = q['storage'].replace('/','-') # Show progress if not quiet image_file = self.download(url=manifest['image'], file_name=file_name, show_progress=not self.quiet) # If the user is saving to local storage if save is True: image_uri = "%s/%s:%s" %(manifest['collection'], manifest['name'], manifest['tag']) container = self.add(image_path = image_file, image_uri = image_uri, metadata = manifest, url = manifest['image']) image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' %image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) if len(finished) == 1: finished = finished[0] return finished
pull an image from a s3 storage Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a s3 storage Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] bot.debug('Execution of PULL for %s images' %len(images)) finished = [] for image in images: image = remove_uri(image) names = parse_image_name(image) if file_name is None: file_name = names['storage'].replace('/','-') # Assume the user provided the correct uri to start uri = names['storage_uri'] # First try to get the storage uri directly. try: self.bucket.download_file(uri, file_name) # If we can't find the file, help the user except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": # Case 1, image not found, but not error with API bot.error('Cannot find %s!' % name) # Try to help the user with suggestions results = self._search_all(query=image) if len(results) > 0: bot.info('Did you mean:\n' % '\n'.join(results)) sys.exit(1) else: # Case 2: error with request, exit. bot.exit('Error downloading image %s' % image) # if we get down here, we have a uri found = None for obj in self.bucket.objects.filter(Prefix=image): if image in obj.key: found = obj # If we find the object, get metadata metadata = {} if found != None: metadata = found.get()['Metadata'] # Metadata bug will capitalize all fields, workaround is to lowercase # https://github.com/boto/boto3/issues/1709 metadata = dict((k.lower(), v) for k, v in metadata.items()) metadata.update(names) # If the user is saving to local storage if save is True and os.path.exists(file_name): container = self.add(image_path = file_name, image_uri = names['tag_uri'], metadata = metadata) file_name = container.image # If the image was pulled to either if os.path.exists(file_name): bot.custom(prefix="Success!", message = file_name) finished.append(file_name) if len(finished) == 1: finished = finished[0] return finished
delete an image to Singularity Registry
def remove(self, image, force=False): '''delete an image to Singularity Registry''' q = parse_image_name(remove_uri(image)) # If the registry is provided in the uri, use it if q['registry'] == None: q['registry'] = self.base # If the base doesn't start with http or https, add it q = self._add_https(q) url = '%s/container/%s/%s:%s' % (q['registry'], q["collection"], q["image"], q["tag"]) SREGISTRY_EVENT = self.authorize(request_type="delete", names=q) headers = {'Authorization': SREGISTRY_EVENT } self._update_headers(fields=headers) continue_delete = True if force is False: response = input("Are you sure you want to delete %s?" % q['uri']) while len(response) < 1 or response[0].lower().strip() not in "ynyesno": response = input("Please answer yes or no: ") if response[0].lower().strip() in "no": continue_delete = False if continue_delete is True: response = self._delete(url) message = self._read_response(response) bot.info("Response %s, %s" %(response.status_code, message)) else: bot.info("Delete cancelled.")
get version by way of sregistry. version returns a lookup dictionary with several global variables without needing to import singularity
def get_lookup(): '''get version by way of sregistry.version, returns a lookup dictionary with several global variables without needing to import singularity ''' lookup = dict() version_file = os.path.join('sregistry', 'version.py') with open(version_file) as filey: exec(filey.read(), lookup) return lookup
get requirements mean reading in requirements and versions from the lookup obtained with get_lookup
def get_reqs(lookup=None, key='INSTALL_REQUIRES'): '''get requirements, mean reading in requirements and versions from the lookup obtained with get_lookup''' if lookup == None: lookup = get_lookup() install_requires = [] for module in lookup[key]: module_name = module[0] module_meta = module[1] if "exact_version" in module_meta: dependency = "%s==%s" %(module_name,module_meta['exact_version']) elif "min_version" in module_meta: if module_meta['min_version'] == None: dependency = module_name else: dependency = "%s>=%s" %(module_name,module_meta['min_version']) install_requires.append(dependency) return install_requires
get_singularity_version will determine the singularity version for a build first an environmental variable is looked at followed by using the system version.
def get_singularity_version(singularity_version=None): '''get_singularity_version will determine the singularity version for a build first, an environmental variable is looked at, followed by using the system version. Parameters ========== singularity_version: if not defined, look for in environment. If still not find, try finding via executing --version to Singularity. Only return None if not set in environment or installed. ''' if singularity_version is None: singularity_version = os.environ.get("SINGULARITY_VERSION") if singularity_version is None: try: cmd = ['singularity','--version'] output = run_command(cmd) if isinstance(output['message'],bytes): output['message'] = output['message'].decode('utf-8') singularity_version = output['message'].strip('\n') bot.info("Singularity %s being used." % singularity_version) except: singularity_version = None bot.warning("Singularity version not found, so it's likely not installed.") return singularity_version
check_install will attempt to run the singularity command and return True if installed. The command line utils will not run without this check.
def check_install(software=None, quiet=True): '''check_install will attempt to run the singularity command, and return True if installed. The command line utils will not run without this check. Parameters ========== software: the software to check if installed quiet: should we be quiet? (default True) ''' if software is None: software = "singularity" cmd = [software, '--version'] try: version = run_command(cmd,software) except: # FileNotFoundError return False if version is not None: if quiet is False and version['return_code'] == 0: version = version['message'] bot.info("Found %s version %s" % (software.upper(), version)) return True return False
get_installdir returns the installation directory of the application
def get_installdir(): '''get_installdir returns the installation directory of the application ''' return os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return the robot. png thumbnail from the database folder. if the user has exported a different image use that instead.
def get_thumbnail(): '''return the robot.png thumbnail from the database folder. if the user has exported a different image, use that instead. ''' from sregistry.defaults import SREGISTRY_THUMBNAIL if SREGISTRY_THUMBNAIL is not None: if os.path.exists(SREGISTRY_THUMBNAIL): return SREGISTRY_THUMBNAIL return "%s/database/robot.png" %get_installdir()
run_command uses subprocess to send a command to the terminal.
def run_command(cmd, sudo=False): '''run_command uses subprocess to send a command to the terminal. Parameters ========== cmd: the command to send, should be a list for subprocess error_message: the error message to give to user if fails, if none specified, will alert that command failed. ''' if sudo is True: cmd = ['sudo'] + cmd try: output = Popen(cmd, stderr=STDOUT, stdout=PIPE) except FileNotFoundError: cmd.pop(0) output = Popen(cmd, stderr=STDOUT, stdout=PIPE) t = output.communicate()[0],output.returncode output = {'message':t[0], 'return_code':t[1]} if isinstance(output['message'], bytes): output['message'] = output['message'].decode('utf-8') return output
if you want to add an extra print ( of a parameter for example ) for the user when the client initalizes write it here eg: bot. info ( [ setting ] value )
def _speak(self): '''if you want to add an extra print (of a parameter, for example) for the user when the client initalizes, write it here, eg: bot.info('[setting] value') ''' if hasattr(self, 'account'): bot.info('connected to %s' %self.account.name.display_name)
this is a wrapper around the main client. get_metadata to first parse a Dropbox FileMetadata into a dicionary then pass it on to the primary get_metadata function.
def _get_metadata(self, image_file=None, dbx_metadata=None): '''this is a wrapper around the main client.get_metadata to first parse a Dropbox FileMetadata into a dicionary, then pass it on to the primary get_metadata function. Parameters ========== image_file: the full path to the image file that had metadata extracted metadata: the Dropbox FileMetadata to parse. ''' metadata = dict() if dbx_metadata is not None: for key in dbx_metadata.__dir__(): value = getattr(dbx_metadata, key) if type(value) in [str, datetime.datetime, bool, int, float]: metadata[key.strip('_')] = value return self.get_metadata(image_file, names=metadata)
update secrets will look for a dropbox token in the environment at SREGISTRY_DROPBOX_TOKEN and if found create a client. If not an error message is returned and the client exits.
def _update_secrets(self): '''update secrets will look for a dropbox token in the environment at SREGISTRY_DROPBOX_TOKEN and if found, create a client. If not, an error message is returned and the client exits. ''' # Retrieve the user token. Exit if not found token = self._required_get_and_update('SREGISTRY_DROPBOX_TOKEN') # Create the dropbox client self.dbx = Dropbox(token) # Verify that the account is valid try: self.account = self.dbx.users_get_current_account() except AuthError as err: bot.error('Account invalid. Exiting.') sys.exit(1)
print the output to the console for the user. If the user wants the content also printed to an output file do that.
def print_output(response, output_file=None): '''print the output to the console for the user. If the user wants the content also printed to an output file, do that. Parameters ========== response: the response from the builder, with metadata added output_file: if defined, write output also to file ''' # If successful built, show container uri if response['status'] == 'SUCCESS': bucket = response['artifacts']['objects']['location'] obj = response['artifacts']['objects']['paths'][0] bot.custom("MD5HASH", response['file_hash'], 'CYAN') bot.custom("SIZE", response['size'], 'CYAN') bot.custom(response['status'], bucket + obj , 'CYAN') else: bot.custom(response['status'], 'see logs for details', 'CYAN') # Show the logs no matter what bot.custom("LOGS", response['logUrl'], 'CYAN') # Did the user make the container public? if "public_url" in response: bot.custom('URL', response['public_url'], 'CYAN') # Does the user also need writing to an output file? if output_file != None: with open(output_file, 'w') as filey: if response['status'] == 'SUCCESS': filey.writelines('MD5HASH %s\n' % response['file_hash']) filey.writelines('SIZE %s\n' % response['size']) filey.writelines('%s %s%s\n' % (response['status'], bucket, obj)) filey.writelines('LOGS %s\n' % response['logUrl']) if "public_url" in response: filey.writelines('URL %s\n' % response['public_url'])
kill is a helper function to call the kill function of the client meaning we bring down an instance.
def kill(args): '''kill is a helper function to call the "kill" function of the client, meaning we bring down an instance. ''' from sregistry.main import Client as cli if len(args.commands) > 0: for name in args.commands: cli.destroy(name) sys.exit(0)
list a specific template ( if a name is provided ) or all templates available.
def templates(args, template_name=None): '''list a specific template (if a name is provided) or all templates available. Parameters ========== args: the argparse object to look for a template name template_name: if not set, show all ''' from sregistry.main import get_client # We don't need storage/compute connections cli = get_client(init=False) if len(args.commands) > 0: template_name = args.commands.pop(0) cli.list_templates(template_name) sys.exit(0)
list a specific log for a builder or the latest log if none provided
def list_logs(args, container_name=None): '''list a specific log for a builder, or the latest log if none provided Parameters ========== args: the argparse object to look for a container name container_name: a default container name set to be None (show latest log) ''' from sregistry.main import Client as cli if len(args.commands) > 0: container_name = args.commands.pop(0) cli.logs(container_name) sys.exit(0)
get a listing of collections that the user has access to.
def get_collections(self): '''get a listing of collections that the user has access to. ''' collections = [] for container in self.conn.get_account()[1]: collections.append(container['name']) return collections
get or create a collection meaning that if the get returns None create and return the response to the user. Parameters ========== name: the name of the collection to get ( and create )
def _get_or_create_collection(self, name): '''get or create a collection, meaning that if the get returns None, create and return the response to the user. Parameters ========== name: the name of the collection to get (and create) ''' try: collection = self._get_collection(name) except: bot.info('Creating collection %s...' % name) collection = self.conn.put_container(name) return collection
update secrets will look for a user and token in the environment If we find the values cache and continue. Otherwise exit with error
def _update_secrets(self): '''update secrets will look for a user and token in the environment If we find the values, cache and continue. Otherwise, exit with error ''' # Get the swift authentication type first. That will determine what we # will need to collect for proper authentication self.config['SREGISTRY_SWIFT_AUTHTYPE'] = self._required_get_and_update( 'SREGISTRY_SWIFT_AUTHTYPE') # Check what auth version is requested and setup the connection if self.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'preauth': # Pre-Authenticated Token/URL - Use OS_AUTH_TOKEN/OS_STORAGE_URL # Retrieve the user token, user, and base. Exit if not found for envar in ['SREGISTRY_SWIFT_OS_AUTH_TOKEN', 'SREGISTRY_SWIFT_OS_STORAGE_URL' ]: self.config[envar] = self._required_get_and_update(envar) self.conn = swiftclient.Connection( preauthurl=self.config['SREGISTRY_SWIFT_OS_STORAGE_URL'], preauthtoken=self.config['SREGISTRY_SWIFT_OS_AUTH_TOKEN'] ) elif self.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'keystonev3': # Keystone v3 Authentication # Retrieve the user token, user, and base. Exit if not found for envar in ['SREGISTRY_SWIFT_USER', 'SREGISTRY_SWIFT_TOKEN', 'SREGISTRY_SWIFT_URL']: self.config[envar] = self._required_get_and_update(envar) auth_url = '%s/v3' % self.config['SREGISTRY_SWIFT_URL'] # Setting to default as a safety. No v3 environment to test # May require ENV vars for real use. - M. Moore _os_options = { 'user_domain_name': 'Default', 'project_domain_name': 'Default', 'project_name': 'Default' } # Save the connection to use for some command self.conn = swiftclient.Connection( user=self.config['SREGISTRY_SWIFT_USER'], key=self.config['SREGISTRY_SWIFT_TOKEN'], os_options=_os_options, authurl=auth_url, auth_version='3' ) elif self.config['SREGISTRY_SWIFT_AUTHTYPE'] == 'keystonev2': # Keystone v2 Authentication # Retrieve the user token, user, and base. Exit if not found for envar in ['SREGISTRY_SWIFT_USER', 'SREGISTRY_SWIFT_TOKEN', 'SREGISTRY_SWIFT_TENANT', 'SREGISTRY_SWIFT_REGION', 'SREGISTRY_SWIFT_URL']: self.config[envar] = self._required_get_and_update(envar) # More human friendly to interact with auth_url = '%s/v2.0/' % self.config['SREGISTRY_SWIFT_URL'] # Set required OpenStack options for tenant/region _os_options = { 'tenant_name': self.config['SREGISTRY_SWIFT_TENANT'], 'region_name': self.config['SREGISTRY_SWIFT_REGION'] } # Save the connection to use for some command self.conn = swiftclient.Connection( user=self.config['SREGISTRY_SWIFT_USER'], key=self.config['SREGISTRY_SWIFT_TOKEN'], os_options=_os_options, authurl=auth_url, auth_version='2' ) else: # Legacy Authentication # Retrieve the user token, user, and base. Exit if not found for envar in ['SREGISTRY_SWIFT_USER', 'SREGISTRY_SWIFT_TOKEN', 'SREGISTRY_SWIFT_URL']: self.config[envar] = self._required_get_and_update(envar) # More human friendly to interact with auth_url = '%s/auth/' % self.config['SREGISTRY_SWIFT_URL'] # Save the connection to use for some command self.conn = swiftclient.Connection( user=self.config['SREGISTRY_SWIFT_USER'], key=self.config['SREGISTRY_SWIFT_TOKEN'], authurl=auth_url, )
The user is required to have an application secrets file in his or her environment. The information isn t saved to the secrets file but the client exists with error if the variable isn t found.
def _update_secrets(self): '''The user is required to have an application secrets file in his or her environment. The information isn't saved to the secrets file, but the client exists with error if the variable isn't found. ''' env = 'GOOGLE_APPLICATION_CREDENTIALS' self._secrets = self._get_and_update_setting(env) if self._secrets is None: bot.error('You must export %s to use Google Storage client' %env) sys.exit(1)
init client will check if the user has defined a bucket that differs from the default use the application credentials to get the bucket and then instantiate the client.
def _init_client(self): '''init client will check if the user has defined a bucket that differs from the default, use the application credentials to get the bucket, and then instantiate the client. ''' # Get storage and compute services self._get_services() env = 'SREGISTRY_GOOGLE_STORAGE_BUCKET' self._bucket_name = self._get_and_update_setting(env) # If the user didn't set in environment, use default if self._bucket_name is None: self._bucket_name = 'sregistry-gcloud-build-%s' %os.environ['USER'] # The build bucket is for uploading .tar.gz files self._build_bucket_name = "%s_cloudbuild" % self._bucket_name # Main storage bucket for containers, and dependency bucket with targz self._bucket = self._get_bucket(self._bucket_name) self._build_bucket = self._get_bucket(self._build_bucket_name)
get a bucket based on a bucket name. If it doesn t exist create it.
def _get_bucket(self, bucket_name): '''get a bucket based on a bucket name. If it doesn't exist, create it. Parameters ========== bucket_name: the name of the bucket to get (or create). It should not contain google, and should be all lowercase with - or underscores. ''' # Case 1: The bucket already exists try: bucket = self._bucket_service.get_bucket(bucket_name) # Case 2: The bucket needs to be created except google.cloud.exceptions.NotFound: bucket = self._bucket_service.create_bucket(bucket_name) # Case 3: The bucket name is already taken except: bot.error('Cannot get or create %s' % bucket_name) sys.exit(1) return bucket
get the correct client depending on the driver of interest. The selected client can be chosen based on the environment variable SREGISTRY_CLIENT and later changed based on the image uri parsed If there is no preference the default is to load the singularity hub client.
def get_client(image=None, quiet=False, **kwargs): ''' get the correct client depending on the driver of interest. The selected client can be chosen based on the environment variable SREGISTRY_CLIENT, and later changed based on the image uri parsed If there is no preference, the default is to load the singularity hub client. Parameters ========== image: if provided, we derive the correct client based on the uri of an image. If not provided, we default to environment, then hub. quiet: if True, suppress most output about the client (e.g. speak) ''' from sregistry.defaults import SREGISTRY_CLIENT # Give the user a warning: if not check_install(): bot.warning('Singularity is not installed, function might be limited.') # If an image is provided, use to determine client client_name = get_uri(image) if client_name is not None: SREGISTRY_CLIENT = client_name # If no obvious credential provided, we can use SREGISTRY_CLIENT if SREGISTRY_CLIENT == 'aws': from .aws import Client elif SREGISTRY_CLIENT == 'docker': from .docker import Client elif SREGISTRY_CLIENT == 'dropbox': from .dropbox import Client elif SREGISTRY_CLIENT == 'gitlab': from .gitlab import Client elif SREGISTRY_CLIENT == 'globus': from .globus import Client elif SREGISTRY_CLIENT == 'nvidia': from .nvidia import Client elif SREGISTRY_CLIENT == 'hub': from .hub import Client elif SREGISTRY_CLIENT == 'google-drive': from .google_drive import Client elif SREGISTRY_CLIENT == 'google-compute': from .google_storage import Client elif SREGISTRY_CLIENT == 'google-storage': from .google_storage import Client elif SREGISTRY_CLIENT == 'google-build': from .google_build import Client elif SREGISTRY_CLIENT == 'registry': from .registry import Client elif SREGISTRY_CLIENT == 's3': from .s3 import Client elif SREGISTRY_CLIENT == 'swift': from .swift import Client else: from .hub import Client Client.client_name = SREGISTRY_CLIENT Client.quiet = quiet # Create credentials cache, if it doesn't exist Client._credential_cache = get_credential_cache() # Add the database, if wanted if SREGISTRY_DATABASE is not None: # These are global functions used across modules from sregistry.database import ( init_db, add, cp, get, mv, rm, rmi, images, inspect, rename, get_container, get_collection, get_or_create_collection ) # Actions Client._init_db = init_db Client.add = add Client.cp = cp Client.get = get Client.inspect = inspect Client.mv = mv Client.rename = rename Client.rm = rm Client.rmi = rmi Client.images = images # Collections Client.get_or_create_collection = get_or_create_collection Client.get_container = get_container Client.get_collection = get_collection # If no database, import dummy functions that return the equivalent else: from sregistry.database import ( add, init_db ) Client.add = add Client._init_db = init_db # Initialize the database cli = Client() if hasattr(Client, '_init_db'): cli._init_db(SREGISTRY_DATABASE) return cli
pull an image from an endpoint Parameters ========== images: refers to the uri given by the user to pull in the format <collection >/ <namespace >. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user s requested name for the file. It can optionally be None if the user wants a default. save: if True you should save the container to the database using self. add () Returns ======= finished: a single container path or list of paths
def pull(self, images, file_name=None, save=True, force=False, **kwargs): ''' pull an image from an endpoint Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] bot.debug('Execution of PULL for %s images' % len(images)) finished = [] for image in images: q = parse_image_name(remove_uri(image), lowercase=False) # Verify image existence, and obtain id url = "%s/container/%s/%s:%s" %(self.base, q['collection'], q['image'], q['tag']) bot.debug('Retrieving manifest at %s' % url) manifest = self._get(url) manifest['selfLink'] = url # If the manifest reveals a version, update names if "version" in manifest: q = parse_image_name('%s@%s' %(q['uri'], manifest['version'])) if file_name is None: file_name = self._get_storage_name(q) file_name = os.path.abspath(file_name) # Determine if the user already has the image if os.path.exists(file_name) and force is False: bot.error('Image exists! Remove first, or use --force to overwrite') sys.exit(1) show_bar = not bool(self.quiet) image_file = self.download(url=manifest['image'], file_name=os.path.basename(file_name), show_progress=show_bar) # If the user is saving to local storage if save is True: image_uri = "%s:%s@%s" %(manifest['name'], manifest['tag'], manifest['version']) container = self.add(image_path=image_file, image_uri=image_uri, image_name=file_name, metadata=manifest, url=manifest['image']) image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' %image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) # Reset file name back to None in case of multiple downloads file_name = None # If the user is only asking for one image if len(finished) == 1: finished = finished[0] return finished
give the user an ipython shell optionally with an endpoint of choice.
def ipython(args): '''give the user an ipython shell, optionally with an endpoint of choice. ''' # The client will announce itself (backend/database) unless it's get from sregistry.main import get_client client = get_client(args.endpoint) client.announce(args.command) from IPython import embed embed()
update_token uses HTTP basic authentication to get a token for Docker registry API V2 operations. We get here if a 401 is returned for a request.
def update_token(self, response): '''update_token uses HTTP basic authentication to get a token for Docker registry API V2 operations. We get here if a 401 is returned for a request. Parameters ========== response: the http request response to parse for the challenge. https://docs.docker.com/registry/spec/auth/token/ ''' not_asking_auth = "Www-Authenticate" not in response.headers if response.status_code != 401 or not_asking_auth: bot.error("Authentication error, exiting.") sys.exit(1) challenge = response.headers["Www-Authenticate"] regexp = '^Bearer\s+realm="(.+)",service="(.+)",scope="(.+)",?' match = re.match(regexp, challenge) if not match: bot.error("Unrecognized authentication challenge, exiting.") sys.exit(1) realm = match.group(1) service = match.group(2) scope = match.group(3).split(',')[0] token_url = realm + '?service=' + service + '&expires_in=900&scope=' + scope # Default headers must be False so that client's current headers not used response = self._get(token_url) try: token = response["token"] token = {"Authorization": "Bearer %s" % token} self.headers.update(token) except Exception: bot.error("Error getting token.") sys.exit(1)
get_manifests calls get_manifest for each of the schema versions including v2 and v1. Version 1 includes image layers and metadata and version 2 must be parsed for a specific manifest and the 2nd call includes the layers. If a digest is not provided latest is used.
def get_manifests(self, repo_name, digest=None): '''get_manifests calls get_manifest for each of the schema versions, including v2 and v1. Version 1 includes image layers and metadata, and version 2 must be parsed for a specific manifest, and the 2nd call includes the layers. If a digest is not provided latest is used. Parameters ========== repo_name: reference to the <username>/<repository>:<tag> to obtain digest: a tag or shasum version ''' if not hasattr(self, 'manifests'): self.manifests = {} # Obtain schema version 1 (metadata) and 2, and image config schemaVersions = ['v1', 'v2', 'config'] for schemaVersion in schemaVersions: manifest = self._get_manifest(repo_name, digest, schemaVersion) if manifest is not None: # If we don't have a config yet, try to get from version 2 manifest if schemaVersion == "v2" and "config" in manifest: bot.debug('Attempting to get config as blob in verison 2 manifest') url = self._get_layerLink(repo_name, manifest['config']['digest']) headers = {'Accept': manifest['config']['mediaType']} self.manifests['config'] = self._get(url, headers=headers) self.manifests[schemaVersion] = manifest return self.manifests
get a selfLink for the manifest for use by the client get_manifest function along with the parents pull Parameters ========== repo_name: reference to the <username >/ <repository >: <tag > to obtain digest: a tag or shasum version
def get_manifest_selfLink(self, repo_name, digest=None): ''' get a selfLink for the manifest, for use by the client get_manifest function, along with the parents pull Parameters ========== repo_name: reference to the <username>/<repository>:<tag> to obtain digest: a tag or shasum version ''' url = "%s/%s/manifests" % (self.base, repo_name) # Add a digest - a tag or hash (version) if digest is None: digest = 'latest' return "%s/%s" % (url, digest)
get_manifest should return an image manifest for a particular repo and tag. The image details are extracted when the client is generated.
def get_manifest(self, repo_name, digest=None, version="v1"): ''' get_manifest should return an image manifest for a particular repo and tag. The image details are extracted when the client is generated. Parameters ========== repo_name: reference to the <username>/<repository>:<tag> to obtain digest: a tag or shasum version version: one of v1, v2, and config (for image config) ''' accepts = {'config': "application/vnd.docker.container.image.v1+json", 'v1': "application/vnd.docker.distribution.manifest.v1+json", 'v2': "application/vnd.docker.distribution.manifest.v2+json" } url = self._get_manifest_selfLink(repo_name, digest) bot.verbose("Obtaining manifest: %s %s" % (url, version)) headers = {'Accept': accepts[version] } try: manifest = self._get(url, headers=headers, quiet=True) manifest['selfLink'] = url except: manifest = None return manifest
download layers is a wrapper to do the following for a client loaded with a manifest for an image: 1. use the manifests to retrieve list of digests ( get_digests ) 2. atomically download the list to destination ( get_layers )
def download_layers(self, repo_name, digest=None, destination=None): ''' download layers is a wrapper to do the following for a client loaded with a manifest for an image: 1. use the manifests to retrieve list of digests (get_digests) 2. atomically download the list to destination (get_layers) This function uses the MultiProcess client to download layers at the same time. ''' from sregistry.main.workers import ( Workers, download_task ) # 1. Get manifests if not retrieved if not hasattr(self, 'manifests'): self._get_manifests(repo_name, digest) # Obtain list of digets, and destination for download digests = self._get_digests() destination = self._get_download_cache(destination) # Create multiprocess download client workers = Workers() # Download each layer atomically tasks = [] layers = [] for digest in digests: targz = "%s/%s.tar.gz" % (destination, digest) # Only download if not in cache already if not os.path.exists(targz): url = "%s/%s/blobs/%s" % (self.base, repo_name, digest) tasks.append((url, self.headers, targz)) layers.append(targz) # Download layers with multiprocess workers if len(tasks) > 0: download_layers = workers.run(func=download_task, tasks=tasks) # Create the metadata tar metadata = self._create_metadata_tar(destination) if metadata is not None: layers.append(metadata) return layers
determine the user preference for atomic download of layers. If the user has set a singularity cache directory honor it. Otherwise use the Singularity default.
def get_download_cache(self, destination, subfolder='docker'): '''determine the user preference for atomic download of layers. If the user has set a singularity cache directory, honor it. Otherwise, use the Singularity default. ''' # First priority after user specification is Singularity Cache if destination is None: destination = self._get_setting('SINGULARITY_CACHEDIR', SINGULARITY_CACHE) # If not set, the user has disabled (use tmp) destination = get_tmpdir(destination) if not destination.endswith(subfolder): destination = "%s/%s" %(destination, subfolder) # Create subfolders, if don't exist mkdir_p(destination) return destination
return a list of layers from a manifest. The function is intended to work with both version 1 and 2 of the schema. All layers ( including redundant ) are returned. By default we try version 2 first then fall back to version 1.
def get_digests(self): '''return a list of layers from a manifest. The function is intended to work with both version 1 and 2 of the schema. All layers (including redundant) are returned. By default, we try version 2 first, then fall back to version 1. For version 1 manifests: extraction is reversed Parameters ========== manifest: the manifest to read_layers from ''' if not hasattr(self, 'manifests'): bot.error('Please retrieve manifests for an image first.') sys.exit(1) digests = [] reverseLayers = False schemaVersions = list(self.manifests.keys()) schemaVersions.reverse() # Select the manifest to use for schemaVersion in schemaVersions: manifest = self.manifests[schemaVersion] if manifest['schemaVersion'] == 1: reverseLayers = True # version 2 indices used by default layer_key = 'layers' digest_key = 'digest' # Docker manifest-v2-2.md#image-manifest if 'layers' in manifest: bot.debug('Image manifest version 2.2 found.') break # Docker manifest-v2-1.md#example-manifest # noqa elif 'fsLayers' in manifest: layer_key = 'fsLayers' digest_key = 'blobSum' bot.debug('Image manifest version 2.1 found.') break else: msg = "Improperly formed manifest, " msg += "layers, manifests, or fsLayers must be present" bot.error(msg) sys.exit(1) for layer in manifest[layer_key]: if digest_key in layer: bot.debug("Adding digest %s" % layer[digest_key]) digests.append(layer[digest_key]) # Reverse layer order for manifest version 1.0 if reverseLayers is True: message = 'v%s manifest, reversing layers' % schemaVersion bot.debug(message) digests.reverse() return digests
download an image layer (. tar. gz ) to a specified download folder.
def get_layer(self, image_id, repo_name, download_folder=None): '''download an image layer (.tar.gz) to a specified download folder. Parameters ========== download_folder: download to this folder. If not set, uses temp. repo_name: the image name (library/ubuntu) to retrieve ''' url = self._get_layerLink(repo_name, image_id) bot.verbose("Downloading layers from %s" % url) download_folder = get_tmpdir(download_folder) download_folder = "%s/%s.tar.gz" % (download_folder, image_id) # Update user what we are doing bot.debug("Downloading layer %s" % image_id) # Step 1: Download the layer atomically file_name = "%s.%s" % (download_folder, next(tempfile._get_candidate_names())) tar_download = self.download(url, file_name) try: shutil.move(tar_download, download_folder) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return download_folder
get_size will return the image size ( must use v. 2. 0 manifest ) Parameters ========== add_padding: if true return reported size * 5 round_up: if true round up to nearest integer return_mb: if true defaults bytes are converted to MB
def get_size(self, add_padding=True, round_up=True, return_mb=True): '''get_size will return the image size (must use v.2.0 manifest) Parameters ========== add_padding: if true, return reported size * 5 round_up: if true, round up to nearest integer return_mb: if true, defaults bytes are converted to MB ''' if not hasattr(self,'manifests'): bot.error('Please retrieve manifests for an image first.') sys.exit(1) size = 768 # default size for schemaVersion, manifest in self.manifests.items(): if "layers" in manifest: size = 0 for layer in manifest["layers"]: if "size" in layer: size += layer['size'] if add_padding is True: size = size * 5 if return_mb is True: size = size / (1024 * 1024) # 1MB = 1024*1024 bytes if round_up is True: size = math.ceil(size) size = int(size) return size
get_config returns a particular key ( default is Entrypoint ) from a VERSION 1 manifest obtained with get_manifest.
def get_config(self, key="Entrypoint", delim=None): '''get_config returns a particular key (default is Entrypoint) from a VERSION 1 manifest obtained with get_manifest. Parameters ========== key: the key to return from the manifest config delim: Given a list, the delim to use to join the entries. Default is newline ''' if not hasattr(self,'manifests'): bot.error('Please retrieve manifests for an image first.') sys.exit(1) cmd = None # If we didn't find the config value in version 2 for version in ['config', 'v1']: if cmd is None and 'config' in self.manifests: # First try, version 2.0 manifest config has upper level config manifest = self.manifests['config'] if "config" in manifest: if key in manifest['config']: cmd = manifest['config'][key] # Second try, config manifest (not from verison 2.0 schema blob) if cmd is None and "history" in manifest: for entry in manifest['history']: if 'v1Compatibility' in entry: entry = json.loads(entry['v1Compatibility']) if "config" in entry: if key in entry["config"]: cmd = entry["config"][key] # Standard is to include commands like ['/bin/sh'] if isinstance(cmd, list): if delim is not None: cmd = delim.join(cmd) bot.verbose("Found Docker config (%s) %s" % (key, cmd)) return cmd
return the environment. tar generated with the Singularity software. We first try the Linux Filesystem expected location in/ usr/ libexec If not found we detect the system archicture
def get_environment_tar(self): '''return the environment.tar generated with the Singularity software. We first try the Linux Filesystem expected location in /usr/libexec If not found, we detect the system archicture dirname $(singularity selftest 2>&1 | grep 'lib' | awk '{print $4}' | sed -e 's@\(.*/singularity\).*@\1@') ''' from sregistry.utils import ( which, run_command ) # First attempt - look at File System Hierarchy Standard (FHS) res = which('singularity')['message'] libexec = res.replace('/bin/singularity','') envtar = '%s/libexec/singularity/bootstrap-scripts/environment.tar' %libexec if os.path.exists(envtar): return envtar # Second attempt, debian distribution will identify folder try: res = which('dpkg-architecture')['message'] if res is not None: cmd = ['dpkg-architecture', '-qDEB_HOST_MULTIARCH'] triplet = run_command(cmd)['message'].strip('\n') envtar = '/usr/lib/%s/singularity/bootstrap-scripts/environment.tar' %triplet if os.path.exists(envtar): return envtar except: pass # Final, return environment.tar provided in package return "%s/environment.tar" %os.path.abspath(os.path.dirname(__file__))
create a metadata tar ( runscript and environment ) to add to the downloaded image. This function uses all functions in this section to obtain key -- > values from the manifest config and write to a. tar. gz
def create_metadata_tar(self, destination=None, metadata_folder=".singularity.d"): '''create a metadata tar (runscript and environment) to add to the downloaded image. This function uses all functions in this section to obtain key--> values from the manifest config, and write to a .tar.gz Parameters ========== metadata_folder: the metadata folder in the singularity image. default is .singularity.d ''' tar_file = None # We will add these files to it files = [] # Extract and add environment environ = self._extract_env() if environ not in [None, ""]: bot.verbose3('Adding Docker environment to metadata tar') template = get_template('tarinfo') template['name'] = './%s/env/10-docker.sh' % (metadata_folder) template['content'] = environ files.append(template) # Extract and add labels labels = self._extract_labels() if labels is not None: labels = print_json(labels) bot.verbose3('Adding Docker labels to metadata tar') template = get_template('tarinfo') template['name'] = "./%s/labels.json" % metadata_folder template['content'] = labels files.append(template) # Runscript runscript = self._extract_runscript() if runscript is not None: bot.verbose3('Adding Docker runscript to metadata tar') template = get_template('tarinfo') template['name'] = "./%s/runscript" % metadata_folder template['content'] = runscript files.append(template) if len(files) > 0: dest = self._get_download_cache(destination, subfolder='metadata') tar_file = create_tar(files, dest) else: bot.warning("No metadata will be included.") return tar_file
extract the environment from the manifest or return None. Used by functions env_extract_image and env_extract_tar
def extract_env(self): '''extract the environment from the manifest, or return None. Used by functions env_extract_image, and env_extract_tar ''' environ = self._get_config('Env') if environ is not None: if not isinstance(environ, list): environ = [environ] lines = [] for line in environ: line = re.findall("(?P<var_name>.+?)=(?P<var_value>.+)", line) line = ['export %s="%s"' % (x[0], x[1]) for x in line] lines = lines + line environ = "\n".join(lines) bot.verbose3("Found Docker container environment!") return environ
extract the runscript ( EntryPoint ) as first priority unless the user has specified to use the CMD. If Entrypoint is not defined we default to None: 1. IF SREGISTRY_DOCKERHUB_CMD is set use Cmd 2. If not set or Cmd is None/ blank try Entrypoint 3. If Entrypoint is not set use default/ bin/ bash
def extract_runscript(self): '''extract the runscript (EntryPoint) as first priority, unless the user has specified to use the CMD. If Entrypoint is not defined, we default to None: 1. IF SREGISTRY_DOCKERHUB_CMD is set, use Cmd 2. If not set, or Cmd is None/blank, try Entrypoint 3. If Entrypoint is not set, use default /bin/bash ''' use_cmd = self._get_setting('SREGISTRY_DOCKER_CMD') # Does the user want to use the CMD instead of ENTRYPOINT? commands = ["Entrypoint", "Cmd"] if use_cmd is not None: commands.reverse() # Parse through commands until we hit one for command in commands: cmd = self._get_config(command) if cmd is not None: break # Only continue if command still isn't None if cmd is not None: bot.verbose3("Adding Docker %s as Singularity runscript..." % command.upper()) # If the command is a list, join. (eg ['/usr/bin/python','hello.py'] bot.debug(cmd) if not isinstance(cmd, list): cmd = [cmd] cmd = " ".join(['"%s"' % x for x in cmd]) cmd = 'exec %s "$@"' % cmd cmd = "#!/bin/sh\n\n%s\n" % cmd return cmd bot.debug("CMD and ENTRYPOINT not found, skipping runscript.") return cmd
update the base including the URL for GitLab and the API endpoint.
def _update_base(self): '''update the base, including the URL for GitLab and the API endpoint. ''' self.base = self._get_and_update_setting('SREGISTRY_GITLAB_BASE', "https://gitlab.com/") self.api_base = "%s/api/v4" % self.base.strip('/') self.artifacts = self._get_and_update_setting('SREGISTRY_GITLAB_FOLDER', 'build') self.job = self._get_and_update_setting('SREGISTRY_GITLAB_JOB', 'build') bot.debug(' Api: %s' % self.api_base) bot.debug('Artifacts: %s' % self.artifacts) bot.debug(' Job: %s' % self.job)