INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Recursive helper for walk.
def walk_dirs(mgr, dirs): """ Recursive helper for walk. """ for directory in dirs: children = mgr.get( directory, content=True, type='directory', )['content'] dirs, files = map(sorted, _separate_dirs_files(children)) yield directory, dirs, files if dirs: for entry in walk_dirs(mgr, dirs): yield entry
Iterate over all files visible to mgr.
def walk_files(mgr): """ Iterate over all files visible to ``mgr``. """ for dir_, subdirs, files in walk_files(mgr): for file_ in files: yield file_
Iterate over the contents of all files visible to mgr.
def walk_files_with_content(mgr): """ Iterate over the contents of all files visible to ``mgr``. """ for _, _, files in walk(mgr): for f in files: yield mgr.get(f, content=True)
Re - encrypt data for all users.
def reencrypt_all_users(engine, old_crypto_factory, new_crypto_factory, logger): """ Re-encrypt data for all users. This function is idempotent, meaning that it should be possible to apply the same re-encryption process multiple times without having any effect on the database. Idempotency is achieved by first attempting to decrypt with the old crypto and falling back to the new crypto on failure. An important consequence of this strategy is that **decrypting** a database is not supported with this function, because ``NoEncryption.decrypt`` always succeeds. To decrypt an already-encrypted database, use ``unencrypt_all_users`` instead. It is, however, possible to perform an initial encryption of a database by passing a function returning a ``NoEncryption`` as ``old_crypto_factory``. Parameters ---------- engine : SQLAlchemy.engine Engine encapsulating database connections. old_crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for decryption of existing database content. new_crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for re-encryption of database content. This **must not** return instances of ``NoEncryption``. Use ``unencrypt_all_users`` if you want to unencrypt a database. logger : logging.Logger, optional A logger to user during re-encryption. See Also -------- reencrypt_user unencrypt_all_users """ logger.info("Beginning re-encryption for all users.") for user_id in all_user_ids(engine): reencrypt_single_user( engine, user_id, old_crypto=old_crypto_factory(user_id), new_crypto=new_crypto_factory(user_id), logger=logger, ) logger.info("Finished re-encryption for all users.")
Re - encrypt all files and checkpoints for a single user.
def reencrypt_single_user(engine, user_id, old_crypto, new_crypto, logger): """ Re-encrypt all files and checkpoints for a single user. """ # Use FallbackCrypto so that we're re-entrant if we halt partway through. crypto = FallbackCrypto([new_crypto, old_crypto]) reencrypt_user_content( engine=engine, user_id=user_id, old_decrypt_func=crypto.decrypt, new_encrypt_func=crypto.encrypt, logger=logger, )
Unencrypt data for all users.
def unencrypt_all_users(engine, old_crypto_factory, logger): """ Unencrypt data for all users. Parameters ---------- engine : SQLAlchemy.engine Engine encapsulating database connections. old_crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for decryption of existing database content. logger : logging.Logger, optional A logger to user during re-encryption. """ logger.info("Beginning re-encryption for all users.") for user_id in all_user_ids(engine): unencrypt_single_user( engine=engine, user_id=user_id, old_crypto=old_crypto_factory(user_id), logger=logger, ) logger.info("Finished re-encryption for all users.")
Unencrypt all files and checkpoints for a single user.
def unencrypt_single_user(engine, user_id, old_crypto, logger): """ Unencrypt all files and checkpoints for a single user. """ reencrypt_user_content( engine=engine, user_id=user_id, old_decrypt_func=old_crypto.decrypt, new_encrypt_func=lambda s: s, logger=logger, )
Temporarily write an alembic. ini file for use with alembic migration scripts.
def temp_alembic_ini(alembic_dir_location, sqlalchemy_url): """ Temporarily write an alembic.ini file for use with alembic migration scripts. """ with TemporaryDirectory() as tempdir: alembic_ini_filename = join(tempdir, 'temp_alembic.ini') with open(alembic_ini_filename, 'w') as f: f.write( ALEMBIC_INI_TEMPLATE.format( alembic_dir_location=alembic_dir_location, sqlalchemy_url=sqlalchemy_url, ) ) yield alembic_ini_filename
Upgrade the given database to revision.
def upgrade(db_url, revision): """ Upgrade the given database to revision. """ with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini: subprocess.check_call( ['alembic', '-c', alembic_ini, 'upgrade', revision] )
Returns list of authors as a comma - separated string ( with and before last author ).
def get_author_string(self, links=False): saved_args = locals() saved_args = saved_args['links'] """Returns list of authors as a comma-separated string (with 'and' before last author).""" def format_author(author): if links and author.person.slug: return '<a href="/authors/%s/">%s</a>' % (author.person.slug, author.person.full_name) return author.person.full_name if links == True or links == False: authors = map(format_author, self.authors.all()) else: authors = map(format_author, saved_args) if not authors: return "" elif len(authors) == 1: # If this is the only author, just return author name return authors[0] return ", ".join(authors[0:-1]) + " and " + authors[-1]
Returns list of authors as a comma - separated string sorted by author type ( with and before last author ).
def get_author_type_string(self): """Returns list of authors as a comma-separated string sorted by author type (with 'and' before last author).""" authorTypeString = '' aStringA = '' aStringB = '' aStringC = '' aStringD = '' authors = dict((k, list(v)) for k, v in groupby(self.authors.all(), lambda a: a.type)) for author in authors: if author == 'author': aStringA += 'Written by ' + self.get_author_string(authors['author']) if author == 'photographer': aStringB += 'Photos by ' + self.get_author_string(authors['photographer']) if author == 'illustrator': aStringC += 'Illustrations by ' + self.get_author_string(authors['illustrator']) if author == 'videographer': aStringD += 'Videos by ' + self.get_author_string(authors['videographer']) if aStringA != '': authorTypeString += aStringA if aStringB != '': authorTypeString += ', ' + aStringB if aStringC != '': authorTypeString += ', ' + aStringC if aStringD != '': authorTypeString += ', ' + aStringD return authorTypeString
Santizes the data for the given block. If block has a matching embed serializer use the to_internal_value method.
def sanitize_block(self, block): """Santizes the data for the given block. If block has a matching embed serializer, use the `to_internal_value` method.""" embed_type = block.get('type', None) data = block.get('data', {}) serializer = self.serializers.get(embed_type, None) if serializer is None: return block block['data'] = serializer.to_internal_value(data) return block
Queue an instance to be fetched from the database.
def queue_instance(self, embed_type, data): """Queue an instance to be fetched from the database.""" serializer = self.serializers.get(embed_type, None) if serializer is None: return instance_id = serializer.get_id(data) if embed_type not in self.ids: self.ids[embed_type] = [] self.ids[embed_type].append(instance_id)
Fetch all queued instances of type embed_type save results to self. instances
def load_instances(self, embed_type, ids): """Fetch all queued instances of type `embed_type`, save results to `self.instances`""" serializer = self.serializers.get(embed_type, None) if serializer is None: return self.instances[embed_type] = serializer.fetch(ids)
Insert a fetched instance into embed block.
def insert_instance(self, block): """Insert a fetched instance into embed block.""" embed_type = block.get('type', None) data = block.get('data', {}) serializer = self.serializers.get(embed_type, None) if serializer is None: return block try: instance_id = serializer.get_id(data) instance = self.instances[embed_type][instance_id] data[embed_type] = serializer.serialize(instance) except: data[embed_type] = None block['data'] = data return block
Load data in bulk for each embed block.
def load_data(self): """Load data in bulk for each embed block.""" for embed_type in self.ids.keys(): self.load_instances(embed_type, self.ids[embed_type])
Perform validation of the widget data
def validate(self, data): """Perform validation of the widget data""" from dispatch.theme import ThemeManager errors = {} if data.get('widget') is not None: try: widget = ThemeManager.Widgets.get(data['widget']) except WidgetNotFound as e: errors['widget'] = str(e) else: for field in widget.fields: field_data = data['data'].get(field.name) if field_data is not None: try: field.validate(field_data) except InvalidField as e: errors[field.name] = str(e) elif field.required: errors[field.name] = '%s is required' % field.label if errors: raise ValidationError(errors) return data
Render HTML entry point for manager app.
def admin(request): """Render HTML entry point for manager app.""" context = { 'api_url': settings.API_URL, 'app_js_bundle': 'manager-%s.js' % dispatch.__version__, 'app_css_bundle': 'manager-%s.css' % dispatch.__version__ } return render_to_response('manager/index.html', context)
Return JSON representation for this template
def to_json(self): """Return JSON representation for this template""" result = {} for field in self.fields: result[field.name] = field.to_json(self.data.get(field.name)) return result
Hides authenticated_fields if request context is missing or user is not authenticated
def hide_authenticated_fields(self): """Hides authenticated_fields if request context is missing or user is not authenticated""" authenticated_fields = getattr(self.Meta, 'authenticated_fields', []) if not self.is_authenticated(): for field in authenticated_fields: self.fields.pop(field)
Excludes fields that are included in the queryparameters
def exclude_fields(self): """Excludes fields that are included in the queryparameters""" request = self.context.get('request') if request: exclude = request.query_params.get('exclude', None) if exclude is None: return excluded_fields = exclude.split(',') for field in excluded_fields: self.fields.pop(field)
Get the latest article with the given primary key.
def get(self, *args, **kwargs): """Get the latest article with the given primary key.""" if 'pk' in kwargs: kwargs['parent'] = kwargs['pk'] kwargs['head'] = True del kwargs['pk'] """If the url requested includes the querystring parameters 'version' and 'preview_id', get the article with the specified version and preview_id. Otherwise, get the published version of the article. """ if 'request' in kwargs: request = kwargs['request'] version = request.GET.get('version', None) preview_id = request.GET.get('preview_id', None) if (version is not None) and (preview_id is not None): kwargs['revision_id'] = version kwargs['preview_id'] = preview_id del kwargs['is_published'] del kwargs['request'] return super(PublishableManager, self).get(*args, **kwargs)
Optionally restricts the returned articles by filtering against a topic query parameter in the URL.
def get_queryset(self): """Optionally restricts the returned articles by filtering against a `topic` query parameter in the URL.""" # Get base queryset from DispatchPublishableMixin queryset = self.get_publishable_queryset() # Optimize queries by prefetching related data queryset = queryset \ .select_related('featured_image', 'featured_video', 'topic', 'section', 'subsection') \ .prefetch_related( 'tags', 'featured_image__image__authors', 'authors' ) queryset = queryset.order_by('-updated_at') q = self.request.query_params.get('q', None) section = self.request.query_params.get('section', None) tags = self.request.query_params.getlist('tags', None) author = self.request.query_params.get('author', None) if q is not None: queryset = queryset.filter(headline__icontains=q) if section is not None: queryset = queryset.filter(section_id=section) if tags is not None: for tag in tags: queryset = queryset.filter(tags__id=tag) if author is not None: queryset = queryset.filter(authors__person_id=author) return queryset
Only display unpublished content to authenticated users filter by query parameter if present.
def get_queryset(self): """Only display unpublished content to authenticated users, filter by query parameter if present.""" # Get base queryset from DispatchPublishableMixin queryset = self.get_publishable_queryset() queryset = queryset.order_by('-updated_at') # Optionally filter by a query parameter q = self.request.query_params.get('q') if q: queryset = queryset.filter(title__icontains=q) return queryset
Overrides the default get_attribute method to convert None values to False.
def get_attribute(self, instance): """Overrides the default get_attribute method to convert None values to False.""" attr = super(NullBooleanField, self).get_attribute(instance) return True if attr else False
Checks that the given widget contains the required fields
def validate_widget(widget): """Checks that the given widget contains the required fields""" if not has_valid_id(widget): raise InvalidWidget("%s must contain a valid 'id' attribute" % widget.__name__) if not has_valid_name(widget): raise InvalidWidget("%s must contain a valid 'name' attribute" % widget.__name__) if not has_valid_template(widget): raise InvalidWidget("%s must contain a valid 'template' attribute" % widget.__name__) if not hasattr(widget, 'zones') or not widget.zones: raise InvalidWidget("%s must be compatible with at least one zone" % widget.__name__)
Checks that the given zone contains the required fields
def validate_zone(zone): """Checks that the given zone contains the required fields""" if not has_valid_id(zone): raise InvalidZone("%s must contain a valid 'id' attribute" % zone.__name__) if not has_valid_name(zone): raise InvalidZone("%s must contain a valid 'name' attribute" % zone.__name__)
Return True if id is a valid UUID False otherwise.
def is_valid_uuid(id): """Return True if id is a valid UUID, False otherwise.""" if not isinstance(id, basestring): return False try: val = UUID(id, version=4) except ValueError: return False return True
Returns the user s permissions.
def get_permissions(self): """Returns the user's permissions.""" permissions = '' if self.groups.filter(name='Admin').exists() or self.is_superuser: permissions = 'admin' return permissions
Modify the user s permissions.
def modify_permissions(self, permissions): """Modify the user's permissions.""" group = Group.objects.get(name='Admin') if permissions == 'admin': self.groups.add(group) else: self.groups.remove(group)
Raise a ValidationError if data does not match the author format.
def AuthorValidator(data): """Raise a ValidationError if data does not match the author format.""" if not isinstance(data, list): # Convert single instance to a list data = [data] for author in data: if 'person' not in author: raise ValidationError('An author must contain a person.') if 'type' in author and not isinstance(author['type'], basestring): # If type is defined, it should be a string raise ValidationError('The author type must be a string.')
Save widget data for this zone.
def save(self, validated_data): """Save widget data for this zone.""" (zone, created) = ZoneModel.objects.get_or_create(zone_id=self.id) zone.widget_id = validated_data['widget'] zone.data = validated_data['data'] # Call widget before-save hook on nested widgets for key in list(zone.data.keys()): if isinstance(zone.data[key], dict) and ('id' in zone.data[key].keys()) and ('data' in zone.data[key].keys()): zone.data[key]['data'] = self.before_save(zone.data[key]['id'], zone.data[key]['data']) # Call widget before-save hook zone.data = self.before_save(zone.widget_id, zone.data) return zone.save()
Returns data from each field.
def get_data(self): """Returns data from each field.""" result = {} for field in self.fields: result[field.name] = self.data.get(field.name) return result
Prepare widget data for template.
def prepare_data(self): """Prepare widget data for template.""" result = {} for field in self.fields: data = self.data.get(field.name) result[field.name] = field.prepare_data(data) return result
Renders the widget as HTML.
def render(self, data=None, add_context=None): """Renders the widget as HTML.""" template = loader.get_template(self.template) if not data: data = self.context(self.prepare_data()) if add_context is not None: for key, value in add_context.iteritems(): if key in self.accepted_keywords: data[key] = value return template.render(data)
Returns artilce/ page content as HTML
def content_to_html(content, article_id): """Returns artilce/page content as HTML""" def render_node(html, node, index): """Renders node as HTML""" if node['type'] == 'paragraph': return html + '<p>%s</p>' % node['data'] else: if node['type'] == 'ad': id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id) dfp_type = 'Intra_Article_' + str(index + 1) size = 'banner' if node['data'] == 'mobile': size = 'box' newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>' return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString try: if node['type'] == 'poll': node['type'] = 'widget' node['data']['data'] = node['data'] return html + embeds.render(node['type'], node['data']) except EmbedException: return html html = '' index = 0 for node in content: html = render_node(html, node, index) if (node['type'] == 'ad'): index += 1 # return mark_safe(reduce(render_node, content, '')) return mark_safe(html)
Returns article/ page content as JSON
def content_to_json(content): """Returns article/page content as JSON""" def render_node(node): """Renders node as JSON""" if node['type'] == 'paragraph': return node else: return { 'type': node['type'], 'data': embeds.to_json(node['type'], node['data']) } return map(render_node, content)
Retrieves the settings for this integration as a dictionary.
def get_settings(cls, show_hidden=False): """ Retrieves the settings for this integration as a dictionary. Removes all hidden fields if show_hidden=False """ settings = Integration.objects.get_settings(cls.ID) if not show_hidden: for field in cls.HIDDEN_FIELDS: settings.pop(field, None) return settings
Receive OAuth callback request from Facebook.
def callback(cls, user, query): """Receive OAuth callback request from Facebook.""" # Get settings for this integration settings = cls.get_settings(show_hidden=True) fb = Facebook() payload = { 'client_id': settings['client_id'], 'client_secret': settings['client_secret'], 'code': query['code'], 'redirect_uri': cls.REDIRECT_URI } try: # Authenticate with Facebook fb.get_access_token(payload) # Fetch pages belonging to authenticated user pages = fb.list_pages('me') except FacebookAPIError, e: raise IntegrationCallbackError(e.message) return { 'pages': pages }
Return settings for given integration as a dictionary.
def get_settings(self, integration_id): """Return settings for given integration as a dictionary.""" try: integration = self.get(integration_id=integration_id) return json.loads(integration.settings) except (self.model.DoesNotExist, ValueError): return {}
Updates settings for given integration.
def update_settings(self, integration_id, settings): """Updates settings for given integration.""" (integration, created) = self.get_or_create(integration_id=integration_id) try: current_settings = json.loads(integration.settings) except ValueError: current_settings = {} current_settings.update(settings) integration.settings = json.dumps(current_settings) integration.save()
Handles requests to the user signup page.
def signup(request, uuid=None): """Handles requests to the user signup page.""" invite = get_object_or_404(Invite.objects.all(), id=uuid) if invite.expiration_date < timezone.now(): invite.delete() raise Http404('This page does not exist.') if request.method == 'POST': form = SignUpForm(request.POST) if form.is_valid(): user = form.save(commit=False) user.email = invite.email user.person = invite.person user.save() if invite.permissions == 'admin': group = Group.objects.get(name='Admin') user.groups.add(group) invite.delete() return redirect('dispatch-admin') else: return render( request, 'registration/signup.html', { 'form': form, 'email': invite.email } ) else: form = SignUpForm() return render( request, 'registration/signup.html', { 'form': form, 'email': invite.email } )
Returns the HTML produced from enclosing each item in contents in a tag of type tagname
def maptag(tagname, contents): """Returns the HTML produced from enclosing each item in `contents` in a tag of type `tagname`""" return u''.join(tag(tagname, item) for item in contents)
Renders the contents of the zone with given zone_id.
def zone(zone_id, **kwargs): """Renders the contents of the zone with given zone_id.""" try: zone = ThemeManager.Zones.get(zone_id) except ZoneNotFound: return '' try: return zone.widget.render(add_context=kwargs) except (WidgetNotFound, AttributeError): pass return ''
Handles the saving/ updating of a Publishable instance.
def save(self, revision=True, *args, **kwargs): """ Handles the saving/updating of a Publishable instance. Arguments: revision - if True, a new version of this Publishable will be created. """ if revision: # If this is a revision, set it to be the head of the list and increment the revision id self.head = True self.revision_id += 1 previous_revision = self.get_previous_revision() if not self.is_parent(): # If this is a revision, delete the old head of the list. type(self).objects \ .filter(parent=self.parent, head=True) \ .update(head=None) # Clear the instance id to force Django to save a new instance. # Both fields (pk, id) required for this to work -- something to do with model inheritance self.pk = None self.id = None # New version is unpublished by default self.is_published = None # Set created_at to current time, but only for first version if not self.created_at: self.created_at = timezone.now() self.updated_at = timezone.now() if revision: self.updated_at = timezone.now() super(Publishable, self).save(*args, **kwargs) # Update the parent foreign key if not self.parent: self.parent = self super(Publishable, self).save(update_fields=['parent']) if revision: # Set latest version for all articles type(self).objects \ .filter(parent=self.parent) \ .update(latest_version=self.revision_id) self.latest_version = self.revision_id return self
Handles saving the featured image.
def save_featured_image(self, data): """ Handles saving the featured image. If data is None, the featured image will be removed. `data` should be dictionary with the following format: { 'image_id': int, 'caption': str, 'credit': str } """ attachment = self.featured_image if data is None: if attachment: attachment.delete() self.featured_image = None return if data['image_id'] is None: if attachment: attachment.delete() self.featured_image = None return if not attachment: attachment = ImageAttachment() attachment.image_id = data.get('image_id', attachment.image_id) attachment.caption = data.get('caption', None) attachment.credit = data.get('credit', None) instance_type = str(type(self)).lower() setattr(attachment, instance_type, self) attachment.save() self.featured_image = attachment
Save the subsection to the parent article
def save_subsection(self, subsection_id): """ Save the subsection to the parent article """ Article.objects.filter(parent_id=self.parent.id).update(subsection_id=subsection_id)
Returns the file extension.
def get_extension(self): """Returns the file extension.""" ext = os.path.splitext(self.img.name)[1] if ext: # Remove period from extension return ext[1:] return ext
Returns the medium size image URL.
def get_medium_url(self): """Returns the medium size image URL.""" if self.is_gif(): return self.get_absolute_url() return '%s%s-%s.jpg' % (settings.MEDIA_URL, self.get_name(), 'medium')
Custom save method to process thumbnails and save image dimensions.
def save(self, **kwargs): """Custom save method to process thumbnails and save image dimensions.""" is_new = self.pk is None if is_new: # Make filenames lowercase self.img.name = self.img.name.lower() # Call super method super(Image, self).save(**kwargs) if is_new and self.img: data = self.img.read() if not data: return image = Img.open(StringIO.StringIO(data)) self.width, self.height = image.size super(Image, self).save() name = self.get_name() ext = self.get_extension() for size in self.SIZES.keys(): self.save_thumbnail(image, self.SIZES[size], name, size, ext)
Processes and saves a resized thumbnail version of the image.
def save_thumbnail(self, image, size, name, label, file_type): """Processes and saves a resized thumbnail version of the image.""" width, height = size (imw, imh) = image.size # If image is larger than thumbnail size, resize image if (imw > width) or (imh > height): image.thumbnail(size, Img.ANTIALIAS) # Attach new thumbnail label to image filename name = "%s-%s.jpg" % (name, label) # Image.save format takes JPEG not jpg if file_type in self.JPG_FORMATS: file_type = 'JPEG' # Write new thumbnail to StringIO object image_io = StringIO.StringIO() image.save(image_io, format=file_type, quality=75) # Convert StringIO object to Django File object thumb_file = InMemoryUploadedFile(image_io, None, name, 'image/jpeg', image_io.len, None) # Save the new file to the default storage system default_storage.save(name, thumb_file)
Initialize the app for use with this: class: ~flask_mysqldb. MySQL class. This is called automatically if app is passed to: meth: ~MySQL. __init__.
def init_app(self, app): """Initialize the `app` for use with this :class:`~flask_mysqldb.MySQL` class. This is called automatically if `app` is passed to :meth:`~MySQL.__init__`. :param flask.Flask app: the application to configure for use with this :class:`~flask_mysqldb.MySQL` class. """ app.config.setdefault('MYSQL_HOST', 'localhost') app.config.setdefault('MYSQL_USER', None) app.config.setdefault('MYSQL_PASSWORD', None) app.config.setdefault('MYSQL_DB', None) app.config.setdefault('MYSQL_PORT', 3306) app.config.setdefault('MYSQL_UNIX_SOCKET', None) app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10) app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None) app.config.setdefault('MYSQL_USE_UNICODE', True) app.config.setdefault('MYSQL_CHARSET', 'utf8') app.config.setdefault('MYSQL_SQL_MODE', None) app.config.setdefault('MYSQL_CURSORCLASS', None) if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown)
Attempts to connect to the MySQL server.
def connection(self): """Attempts to connect to the MySQL server. :return: Bound MySQL connection object if successful or ``None`` if unsuccessful. """ ctx = _app_ctx_stack.top if ctx is not None: if not hasattr(ctx, 'mysql_db'): ctx.mysql_db = self.connect return ctx.mysql_db
Wraps a fileobj in a bandwidth limited stream wrapper
def get_bandwith_limited_stream(self, fileobj, transfer_coordinator, enabled=True): """Wraps a fileobj in a bandwidth limited stream wrapper :type fileobj: file-like obj :param fileobj: The file-like obj to wrap :type transfer_coordinator: s3transfer.futures.TransferCoordinator param transfer_coordinator: The coordinator for the general transfer that the wrapped stream is a part of :type enabled: boolean :param enabled: Whether bandwidth limiting should be enabled to start """ stream = BandwidthLimitedStream( fileobj, self._leaky_bucket, transfer_coordinator, self._time_utils) if not enabled: stream.disable_bandwidth_limiting() return stream
Read a specified amount
def read(self, amount): """Read a specified amount Reads will only be throttled if bandwidth limiting is enabled. """ if not self._bandwidth_limiting_enabled: return self._fileobj.read(amount) # We do not want to be calling consume on every read as the read # amounts can be small causing the lock of the leaky bucket to # introduce noticeable overhead. So instead we keep track of # how many bytes we have seen and only call consume once we pass a # certain threshold. self._bytes_seen += amount if self._bytes_seen < self._bytes_threshold: return self._fileobj.read(amount) self._consume_through_leaky_bucket() return self._fileobj.read(amount)
Consume an a requested amount
def consume(self, amt, request_token): """Consume an a requested amount :type amt: int :param amt: The amount of bytes to request to consume :type request_token: RequestToken :param request_token: The token associated to the consumption request that is used to identify the request. So if a RequestExceededException is raised the token should be used in subsequent retry consume() request. :raises RequestExceededException: If the consumption amount would exceed the maximum allocated bandwidth :rtype: int :returns: The amount consumed """ with self._lock: time_now = self._time_utils.time() if self._consumption_scheduler.is_scheduled(request_token): return self._release_requested_amt_for_scheduled_request( amt, request_token, time_now) elif self._projected_to_exceed_max_rate(amt, time_now): self._raise_request_exceeded_exception( amt, request_token, time_now) else: return self._release_requested_amt(amt, time_now)
Schedules a wait time to be able to consume an amount
def schedule_consumption(self, amt, token, time_to_consume): """Schedules a wait time to be able to consume an amount :type amt: int :param amt: The amount of bytes scheduled to be consumed :type token: RequestToken :param token: The token associated to the consumption request that is used to identify the request. :type time_to_consume: float :param time_to_consume: The desired time it should take for that specific request amount to be consumed in regardless of previously scheduled consumption requests :rtype: float :returns: The amount of time to wait for the specific request before actually consuming the specified amount. """ self._total_wait += time_to_consume self._tokens_to_scheduled_consumption[token] = { 'wait_duration': self._total_wait, 'time_to_consume': time_to_consume, } return self._total_wait
Processes a scheduled consumption request that has completed
def process_scheduled_consumption(self, token): """Processes a scheduled consumption request that has completed :type token: RequestToken :param token: The token associated to the consumption request that is used to identify the request. """ scheduled_retry = self._tokens_to_scheduled_consumption.pop(token) self._total_wait = max( self._total_wait - scheduled_retry['time_to_consume'], 0)
Get the projected rate using a provided amount and time
def get_projected_rate(self, amt, time_at_consumption): """Get the projected rate using a provided amount and time :type amt: int :param amt: The proposed amount to consume :type time_at_consumption: float :param time_at_consumption: The proposed time to consume at :rtype: float :returns: The consumption rate if that amt and time were consumed """ if self._last_time is None: return 0.0 return self._calculate_exponential_moving_average_rate( amt, time_at_consumption)
Record the consumption rate based off amount and time point
def record_consumption_rate(self, amt, time_at_consumption): """Record the consumption rate based off amount and time point :type amt: int :param amt: The amount that got consumed :type time_at_consumption: float :param time_at_consumption: The time at which the amount was consumed """ if self._last_time is None: self._last_time = time_at_consumption self._current_rate = 0.0 return self._current_rate = self._calculate_exponential_moving_average_rate( amt, time_at_consumption) self._last_time = time_at_consumption
Downloads the object s contents to a file
def download_file(self, bucket, key, filename, extra_args=None, expected_size=None): """Downloads the object's contents to a file :type bucket: str :param bucket: The name of the bucket to download from :type key: str :param key: The name of the key to download from :type filename: str :param filename: The name of a file to download to. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type expected_size: int :param expected_size: The expected size in bytes of the download. If provided, the downloader will not call HeadObject to determine the object's size and use the provided value instead. The size is needed to determine whether to do a multipart download. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the download """ self._start_if_needed() if extra_args is None: extra_args = {} self._validate_all_known_args(extra_args) transfer_id = self._transfer_monitor.notify_new_transfer() download_file_request = DownloadFileRequest( transfer_id=transfer_id, bucket=bucket, key=key, filename=filename, extra_args=extra_args, expected_size=expected_size, ) logger.debug( 'Submitting download file request: %s.', download_file_request) self._download_request_queue.put(download_file_request) call_args = CallArgs( bucket=bucket, key=key, filename=filename, extra_args=extra_args, expected_size=expected_size) future = self._get_transfer_future(transfer_id, call_args) return future
Poll for the result of a transfer
def poll_for_result(self, transfer_id): """Poll for the result of a transfer :param transfer_id: Unique identifier for the transfer :return: If the transfer succeeded, it will return the result. If the transfer failed, it will raise the exception associated to the failure. """ self._transfer_states[transfer_id].wait_till_done() exception = self._transfer_states[transfer_id].exception if exception: raise exception return None
Calculate the range parameter for multipart downloads/ copies
def calculate_range_parameter(part_size, part_index, num_parts, total_size=None): """Calculate the range parameter for multipart downloads/copies :type part_size: int :param part_size: The size of the part :type part_index: int :param part_index: The index for which this parts starts. This index starts at zero :type num_parts: int :param num_parts: The total number of parts in the transfer :returns: The value to use for Range parameter on downloads or the CopySourceRange parameter for copies """ # Used to calculate the Range parameter start_range = part_index * part_size if part_index == num_parts - 1: end_range = '' if total_size is not None: end_range = str(total_size - 1) else: end_range = start_range + part_size - 1 range_param = 'bytes=%s-%s' % (start_range, end_range) return range_param
Retrieves callbacks from a subscriber
def get_callbacks(transfer_future, callback_type): """Retrieves callbacks from a subscriber :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future the subscriber is associated to. :type callback_type: str :param callback_type: The type of callback to retrieve from the subscriber. Valid types include: * 'queued' * 'progress' * 'done' :returns: A list of callbacks for the type specified. All callbacks are preinjected with the transfer future. """ callbacks = [] for subscriber in transfer_future.meta.call_args.subscribers: callback_name = 'on_' + callback_type if hasattr(subscriber, callback_name): callbacks.append( functools.partial( getattr(subscriber, callback_name), future=transfer_future ) ) return callbacks
Gets a dictionary filtered by whitelisted keys
def get_filtered_dict(original_dict, whitelisted_keys): """Gets a dictionary filtered by whitelisted keys :param original_dict: The original dictionary of arguments to source keys and values. :param whitelisted_key: A list of keys to include in the filtered dictionary. :returns: A dictionary containing key/values from the original dictionary whose key was included in the whitelist """ filtered_dict = {} for key, value in original_dict.items(): if key in whitelisted_keys: filtered_dict[key] = value return filtered_dict
Decrement the count by one
def decrement(self): """Decrement the count by one""" with self._lock: if self._count == 0: raise RuntimeError( 'Counter is at zero. It cannot dip below zero') self._count -= 1 if self._is_finalized and self._count == 0: self._callback()
Finalize the counter
def finalize(self): """Finalize the counter Once finalized, the counter never be incremented and the callback can be invoked once the count reaches zero """ with self._lock: self._is_finalized = True if self._count == 0: self._callback()
Checks to see if a file is a special UNIX file.
def is_special_file(cls, filename): """Checks to see if a file is a special UNIX file. It checks if the file is a character special device, block special device, FIFO, or socket. :param filename: Name of the file :returns: True if the file is a special file. False, if is not. """ # If it does not exist, it must be a new file so it cannot be # a special file. if not os.path.exists(filename): return False mode = os.stat(filename).st_mode # Character special device. if stat.S_ISCHR(mode): return True # Block special device if stat.S_ISBLK(mode): return True # Named pipe / FIFO if stat.S_ISFIFO(mode): return True # Socket. if stat.S_ISSOCK(mode): return True return False
Convenience factory function to create from a filename.
def from_filename(cls, filename, start_byte, chunk_size, callbacks=None, enable_callbacks=True): """Convenience factory function to create from a filename. :type start_byte: int :param start_byte: The first byte from which to start reading. :type chunk_size: int :param chunk_size: The max chunk size to read. Trying to read pass the end of the chunk size will behave like you've reached the end of the file. :type full_file_size: int :param full_file_size: The entire content length associated with ``fileobj``. :type callbacks: function(amount_read) :param callbacks: Called whenever data is read from this object. :type enable_callbacks: bool :param enable_callbacks: Indicate whether to invoke callback during read() calls. :rtype: ``ReadFileChunk`` :return: A new instance of ``ReadFileChunk`` """ f = open(filename, 'rb') f.seek(start_byte) file_size = os.fstat(f.fileno()).st_size return cls(f, chunk_size, file_size, callbacks, enable_callbacks)
Acquire the semaphore
def acquire(self, tag, blocking=True): """Acquire the semaphore :param tag: A tag identifying what is acquiring the semaphore. Note that this is not really needed to directly use this class but is needed for API compatibility with the SlidingWindowSemaphore implementation. :param block: If True, block until it can be acquired. If False, do not block and raise an exception if cannot be aquired. :returns: A token (can be None) to use when releasing the semaphore """ logger.debug("Acquiring %s", tag) if not self._semaphore.acquire(blocking): raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
Release the semaphore
def release(self, tag, acquire_token): """Release the semaphore :param tag: A tag identifying what is releasing the semaphore :param acquire_token: The token returned from when the semaphore was acquired. Note that this is not really needed to directly use this class but is needed for API compatibility with the SlidingWindowSemaphore implementation. """ logger.debug("Releasing acquire %s/%s" % (tag, acquire_token)) self._semaphore.release()
Get a chunksize close to current that fits within all S3 limits.
def adjust_chunksize(self, current_chunksize, file_size=None): """Get a chunksize close to current that fits within all S3 limits. :type current_chunksize: int :param current_chunksize: The currently configured chunksize. :type file_size: int or None :param file_size: The size of the file to upload. This might be None if the object being transferred has an unknown size. :returns: A valid chunksize that fits within configured limits. """ chunksize = current_chunksize if file_size is not None: chunksize = self._adjust_for_max_parts(chunksize, file_size) return self._adjust_for_chunksize_limits(chunksize)
Queue IO write for submission to the IO executor.
def queue_file_io_task(self, fileobj, data, offset): """Queue IO write for submission to the IO executor. This method accepts an IO executor and information about the downloaded data, and handles submitting this to the IO executor. This method may defer submission to the IO executor if necessary. """ self._transfer_coordinator.submit( self._io_executor, self.get_io_write_task(fileobj, data, offset) )
Get an IO write task for the requested set of data
def get_io_write_task(self, fileobj, data, offset): """Get an IO write task for the requested set of data This task can be ran immediately or be submitted to the IO executor for it to run. :type fileobj: file-like object :param fileobj: The file-like object to write to :type data: bytes :param data: The data to write out :type offset: integer :param offset: The offset to write the data to in the file-like object :returns: An IO task to be used to write data to a file-like object """ return IOWriteTask( self._transfer_coordinator, main_kwargs={ 'fileobj': fileobj, 'data': data, 'offset': offset, } )
Retrieves a class for managing output for a download
def _get_download_output_manager_cls(self, transfer_future, osutil): """Retrieves a class for managing output for a download :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future for the request :type osutil: s3transfer.utils.OSUtils :param osutil: The os utility associated to the transfer :rtype: class of DownloadOutputManager :returns: The appropriate class to use for managing a specific type of input for downloads. """ download_manager_resolver_chain = [ DownloadSpecialFilenameOutputManager, DownloadFilenameOutputManager, DownloadSeekableOutputManager, DownloadNonSeekableOutputManager, ] fileobj = transfer_future.meta.call_args.fileobj for download_manager_cls in download_manager_resolver_chain: if download_manager_cls.is_compatible(fileobj, osutil): return download_manager_cls raise RuntimeError( 'Output %s of type: %s is not supported.' % ( fileobj, type(fileobj)))
: param client: The client associated with the transfer manager
def _submit(self, client, config, osutil, request_executor, io_executor, transfer_future, bandwidth_limiter=None): """ :param client: The client associated with the transfer manager :type config: s3transfer.manager.TransferConfig :param config: The transfer config associated with the transfer manager :type osutil: s3transfer.utils.OSUtil :param osutil: The os utility associated to the transfer manager :type request_executor: s3transfer.futures.BoundedExecutor :param request_executor: The request executor associated with the transfer manager :type io_executor: s3transfer.futures.BoundedExecutor :param io_executor: The io executor associated with the transfer manager :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for :type bandwidth_limiter: s3transfer.bandwidth.BandwidthLimiter :param bandwidth_limiter: The bandwidth limiter to use when downloading streams """ if transfer_future.meta.size is None: # If a size was not provided figure out the size for the # user. response = client.head_object( Bucket=transfer_future.meta.call_args.bucket, Key=transfer_future.meta.call_args.key, **transfer_future.meta.call_args.extra_args ) transfer_future.meta.provide_transfer_size( response['ContentLength']) download_output_manager = self._get_download_output_manager_cls( transfer_future, osutil)(osutil, self._transfer_coordinator, io_executor) # If it is greater than threshold do a ranged download, otherwise # do a regular GetObject download. if transfer_future.meta.size < config.multipart_threshold: self._submit_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter) else: self._submit_ranged_download_request( client, config, osutil, request_executor, io_executor, download_output_manager, transfer_future, bandwidth_limiter)
Downloads an object and places content into io queue
def _main(self, client, bucket, key, fileobj, extra_args, callbacks, max_attempts, download_output_manager, io_chunksize, start_index=0, bandwidth_limiter=None): """Downloads an object and places content into io queue :param client: The client to use when calling GetObject :param bucket: The bucket to download from :param key: The key to download from :param fileobj: The file handle to write content to :param exta_args: Any extra arguements to include in GetObject request :param callbacks: List of progress callbacks to invoke on download :param max_attempts: The number of retries to do when downloading :param download_output_manager: The download output manager associated with the current download. :param io_chunksize: The size of each io chunk to read from the download stream and queue in the io queue. :param start_index: The location in the file to start writing the content of the key to. :param bandwidth_limiter: The bandwidth limiter to use when throttling the downloading of data in streams. """ last_exception = None for i in range(max_attempts): try: response = client.get_object( Bucket=bucket, Key=key, **extra_args) streaming_body = StreamReaderProgress( response['Body'], callbacks) if bandwidth_limiter: streaming_body = \ bandwidth_limiter.get_bandwith_limited_stream( streaming_body, self._transfer_coordinator) current_index = start_index chunks = DownloadChunkIterator(streaming_body, io_chunksize) for chunk in chunks: # If the transfer is done because of a cancellation # or error somewhere else, stop trying to submit more # data to be written and break out of the download. if not self._transfer_coordinator.done(): self._handle_io( download_output_manager, fileobj, chunk, current_index ) current_index += len(chunk) else: return return except S3_RETRYABLE_DOWNLOAD_ERRORS as e: logger.debug("Retrying exception caught (%s), " "retrying request, (attempt %s / %s)", e, i, max_attempts, exc_info=True) last_exception = e # Also invoke the progress callbacks to indicate that we # are trying to download the stream again and all progress # for this GetObject has been lost. invoke_progress_callbacks( callbacks, start_index - current_index) continue raise RetriesExceededError(last_exception)
Pulls off an io queue to write contents to a file
def _main(self, fileobj, data, offset): """Pulls off an io queue to write contents to a file :param fileobj: The file handle to write content to :param data: The data to write :param offset: The offset to write the data to. """ fileobj.seek(offset) fileobj.write(data)
Request any available writes given new incoming data.
def request_writes(self, offset, data): """Request any available writes given new incoming data. You call this method by providing new data along with the offset associated with the data. If that new data unlocks any contiguous writes that can now be submitted, this method will return all applicable writes. This is done with 1 method call so you don't have to make two method calls (put(), get()) which acquires a lock each method call. """ if offset < self._next_offset: # This is a request for a write that we've already # seen. This can happen in the event of a retry # where if we retry at at offset N/2, we'll requeue # offsets 0-N/2 again. return [] writes = [] if offset in self._pending_offsets: # We've already queued this offset so this request is # a duplicate. In this case we should ignore # this request and prefer what's already queued. return [] heapq.heappush(self._writes, (offset, data)) self._pending_offsets.add(offset) while self._writes and self._writes[0][0] == self._next_offset: next_write = heapq.heappop(self._writes) writes.append({'offset': next_write[0], 'data': next_write[1]}) self._pending_offsets.remove(next_write[0]) self._next_offset += len(next_write[1]) return writes
Backwards compat function to determine if a fileobj is seekable
def seekable(fileobj): """Backwards compat function to determine if a fileobj is seekable :param fileobj: The file-like object to determine if seekable :returns: True, if seekable. False, otherwise. """ # If the fileobj has a seekable attr, try calling the seekable() # method on it. if hasattr(fileobj, 'seekable'): return fileobj.seekable() # If there is no seekable attr, check if the object can be seeked # or telled. If it can, try to seek to the current position. elif hasattr(fileobj, 'seek') and hasattr(fileobj, 'tell'): try: fileobj.seek(0, 1) return True except (OSError, IOError): # If an io related error was thrown then it is not seekable. return False # Else, the fileobj is not seekable return False
Uploads a file to S3
def upload(self, fileobj, bucket, key, extra_args=None, subscribers=None): """Uploads a file to S3 :type fileobj: str or seekable file-like object :param fileobj: The name of a file to upload or a seekable file-like object to upload. It is recommended to use a filename because file-like objects may result in higher memory usage. :type bucket: str :param bucket: The name of the bucket to upload to :type key: str :param key: The name of the key to upload to :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type subscribers: list(s3transfer.subscribers.BaseSubscriber) :param subscribers: The list of subscribers to be invoked in the order provided based on the event emit during the process of the transfer request. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the upload """ if extra_args is None: extra_args = {} if subscribers is None: subscribers = [] self._validate_all_known_args(extra_args, self.ALLOWED_UPLOAD_ARGS) call_args = CallArgs( fileobj=fileobj, bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers ) extra_main_kwargs = {} if self._bandwidth_limiter: extra_main_kwargs['bandwidth_limiter'] = self._bandwidth_limiter return self._submit_transfer( call_args, UploadSubmissionTask, extra_main_kwargs)
Downloads a file from S3
def download(self, bucket, key, fileobj, extra_args=None, subscribers=None): """Downloads a file from S3 :type bucket: str :param bucket: The name of the bucket to download from :type key: str :param key: The name of the key to download from :type fileobj: str or seekable file-like object :param fileobj: The name of a file to download or a seekable file-like object to download. It is recommended to use a filename because file-like objects may result in higher memory usage. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type subscribers: list(s3transfer.subscribers.BaseSubscriber) :param subscribers: The list of subscribers to be invoked in the order provided based on the event emit during the process of the transfer request. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the download """ if extra_args is None: extra_args = {} if subscribers is None: subscribers = [] self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS) call_args = CallArgs( bucket=bucket, key=key, fileobj=fileobj, extra_args=extra_args, subscribers=subscribers ) extra_main_kwargs = {'io_executor': self._io_executor} if self._bandwidth_limiter: extra_main_kwargs['bandwidth_limiter'] = self._bandwidth_limiter return self._submit_transfer( call_args, DownloadSubmissionTask, extra_main_kwargs)
Copies a file in S3
def copy(self, copy_source, bucket, key, extra_args=None, subscribers=None, source_client=None): """Copies a file in S3 :type copy_source: dict :param copy_source: The name of the source bucket, key name of the source object, and optional version ID of the source object. The dictionary format is: ``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note that the ``VersionId`` key is optional and may be omitted. :type bucket: str :param bucket: The name of the bucket to copy to :type key: str :param key: The name of the key to copy to :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type subscribers: a list of subscribers :param subscribers: The list of subscribers to be invoked in the order provided based on the event emit during the process of the transfer request. :type source_client: botocore or boto3 Client :param source_client: The client to be used for operation that may happen at the source object. For example, this client is used for the head_object that determines the size of the copy. If no client is provided, the transfer manager's client is used as the client for the source object. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the copy """ if extra_args is None: extra_args = {} if subscribers is None: subscribers = [] if source_client is None: source_client = self._client self._validate_all_known_args(extra_args, self.ALLOWED_COPY_ARGS) call_args = CallArgs( copy_source=copy_source, bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers, source_client=source_client ) return self._submit_transfer(call_args, CopySubmissionTask)
Delete an S3 object.
def delete(self, bucket, key, extra_args=None, subscribers=None): """Delete an S3 object. :type bucket: str :param bucket: The name of the bucket. :type key: str :param key: The name of the S3 object to delete. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the DeleteObject call. :type subscribers: list :param subscribers: A list of subscribers to be invoked during the process of the transfer request. Note that the ``on_progress`` callback is not invoked during object deletion. :rtype: s3transfer.futures.TransferFuture :return: Transfer future representing the deletion. """ if extra_args is None: extra_args = {} if subscribers is None: subscribers = [] self._validate_all_known_args(extra_args, self.ALLOWED_DELETE_ARGS) call_args = CallArgs( bucket=bucket, key=key, extra_args=extra_args, subscribers=subscribers ) return self._submit_transfer(call_args, DeleteSubmissionTask)
Shutdown the TransferManager
def shutdown(self, cancel=False, cancel_msg=''): """Shutdown the TransferManager It will wait till all transfers complete before it completely shuts down. :type cancel: boolean :param cancel: If True, calls TransferFuture.cancel() for all in-progress in transfers. This is useful if you want the shutdown to happen quicker. :type cancel_msg: str :param cancel_msg: The message to specify if canceling all in-progress transfers. """ self._shutdown(cancel, cancel, cancel_msg)
Cancels all inprogress transfers
def cancel(self, msg='', exc_type=CancelledError): """Cancels all inprogress transfers This cancels the inprogress transfers by calling cancel() on all tracked transfer coordinators. :param msg: The message to pass on to each transfer coordinator that gets cancelled. :param exc_type: The type of exception to set for the cancellation """ for transfer_coordinator in self.tracked_transfer_coordinators: transfer_coordinator.cancel(msg, exc_type)
Wait until there are no more inprogress transfers
def wait(self): """Wait until there are no more inprogress transfers This will not stop when failures are encountered and not propogate any of these errors from failed transfers, but it can be interrupted with a KeyboardInterrupt. """ try: transfer_coordinator = None for transfer_coordinator in self.tracked_transfer_coordinators: transfer_coordinator.result() except KeyboardInterrupt: logger.debug('Received KeyboardInterrupt in wait()') # If Keyboard interrupt is raised while waiting for # the result, then exit out of the wait and raise the # exception if transfer_coordinator: logger.debug( 'On KeyboardInterrupt was waiting for %s', transfer_coordinator) raise except Exception: # A general exception could have been thrown because # of result(). We just want to ignore this and continue # because we at least know that the transfer coordinator # has completed. pass
Reads a specific amount of data from a stream and returns it. If there is any data in initial_data that will be popped out first.
def _read(self, fileobj, amount, truncate=True): """ Reads a specific amount of data from a stream and returns it. If there is any data in initial_data, that will be popped out first. :type fileobj: A file-like object that implements read :param fileobj: The stream to read from. :type amount: int :param amount: The number of bytes to read from the stream. :type truncate: bool :param truncate: Whether or not to truncate initial_data after reading from it. :return: Generator which generates part bodies from the initial data. """ # If the the initial data is empty, we simply read from the fileobj if len(self._initial_data) == 0: return fileobj.read(amount) # If the requested number of bytes is less than the amount of # initial data, pull entirely from initial data. if amount <= len(self._initial_data): data = self._initial_data[:amount] # Truncate initial data so we don't hang onto the data longer # than we need. if truncate: self._initial_data = self._initial_data[amount:] return data # At this point there is some initial data left, but not enough to # satisfy the number of bytes requested. Pull out the remaining # initial data and read the rest from the fileobj. amount_to_read = amount - len(self._initial_data) data = self._initial_data + fileobj.read(amount_to_read) # Zero out initial data so we don't hang onto the data any more. if truncate: self._initial_data = b'' return data
Wraps data with the interrupt reader and the file chunk reader.
def _wrap_data(self, data, callbacks, close_callbacks): """ Wraps data with the interrupt reader and the file chunk reader. :type data: bytes :param data: The data to wrap. :type callbacks: list :param callbacks: The callbacks associated with the transfer future. :type close_callbacks: list :param close_callbacks: The callbacks to be called when closing the wrapper for the data. :return: Fully wrapped data. """ fileobj = self._wrap_fileobj(six.BytesIO(data)) return self._osutil.open_file_chunk_reader_from_fileobj( fileobj=fileobj, chunk_size=len(data), full_file_size=len(data), callbacks=callbacks, close_callbacks=close_callbacks)
Retrieves a class for managing input for an upload based on file type
def _get_upload_input_manager_cls(self, transfer_future): """Retrieves a class for managing input for an upload based on file type :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future for the request :rtype: class of UploadInputManager :returns: The appropriate class to use for managing a specific type of input for uploads. """ upload_manager_resolver_chain = [ UploadFilenameInputManager, UploadSeekableInputManager, UploadNonSeekableInputManager ] fileobj = transfer_future.meta.call_args.fileobj for upload_manager_cls in upload_manager_resolver_chain: if upload_manager_cls.is_compatible(fileobj): return upload_manager_cls raise RuntimeError( 'Input %s of type: %s is not supported.' % ( fileobj, type(fileobj)))
: param client: The client associated with the transfer manager
def _submit(self, client, config, osutil, request_executor, transfer_future, bandwidth_limiter=None): """ :param client: The client associated with the transfer manager :type config: s3transfer.manager.TransferConfig :param config: The transfer config associated with the transfer manager :type osutil: s3transfer.utils.OSUtil :param osutil: The os utility associated to the transfer manager :type request_executor: s3transfer.futures.BoundedExecutor :param request_executor: The request executor associated with the transfer manager :type transfer_future: s3transfer.futures.TransferFuture :param transfer_future: The transfer future associated with the transfer request that tasks are being submitted for """ upload_input_manager = self._get_upload_input_manager_cls( transfer_future)( osutil, self._transfer_coordinator, bandwidth_limiter) # Determine the size if it was not provided if transfer_future.meta.size is None: upload_input_manager.provide_transfer_size(transfer_future) # Do a multipart upload if needed, otherwise do a regular put object. if not upload_input_manager.requires_multipart_upload( transfer_future, config): self._submit_upload_request( client, config, osutil, request_executor, transfer_future, upload_input_manager) else: self._submit_multipart_request( client, config, osutil, request_executor, transfer_future, upload_input_manager)
: param client: The client to use when calling PutObject: param fileobj: The file to upload.: param bucket: The name of the bucket to upload to: param key: The name of the key to upload to: param extra_args: A dictionary of any extra arguments that may be used in the upload.
def _main(self, client, fileobj, bucket, key, extra_args): """ :param client: The client to use when calling PutObject :param fileobj: The file to upload. :param bucket: The name of the bucket to upload to :param key: The name of the key to upload to :param extra_args: A dictionary of any extra arguments that may be used in the upload. """ with fileobj as body: client.put_object(Bucket=bucket, Key=key, Body=body, **extra_args)
: param client: The client to use when calling PutObject: param fileobj: The file to upload.: param bucket: The name of the bucket to upload to: param key: The name of the key to upload to: param upload_id: The id of the upload: param part_number: The number representing the part of the multipart upload: param extra_args: A dictionary of any extra arguments that may be used in the upload.
def _main(self, client, fileobj, bucket, key, upload_id, part_number, extra_args): """ :param client: The client to use when calling PutObject :param fileobj: The file to upload. :param bucket: The name of the bucket to upload to :param key: The name of the key to upload to :param upload_id: The id of the upload :param part_number: The number representing the part of the multipart upload :param extra_args: A dictionary of any extra arguments that may be used in the upload. :rtype: dict :returns: A dictionary representing a part:: {'Etag': etag_value, 'PartNumber': part_number} This value can be appended to a list to be used to complete the multipart upload. """ with fileobj as body: response = client.upload_part( Bucket=bucket, Key=key, UploadId=upload_id, PartNumber=part_number, Body=body, **extra_args) etag = response['ETag'] return {'ETag': etag, 'PartNumber': part_number}
Sets the exception on the future.
def set_exception(self, exception): """Sets the exception on the future.""" if not self.done(): raise TransferNotDoneError( 'set_exception can only be called once the transfer is ' 'complete.') self._coordinator.set_exception(exception, override=True)
Set a result for the TransferFuture
def set_result(self, result): """Set a result for the TransferFuture Implies that the TransferFuture succeeded. This will always set a result because it is invoked on the final task where there is only ever one final task and it is ran at the very end of a transfer process. So if a result is being set for this final task, the transfer succeeded even if something came a long and canceled the transfer on the final task. """ with self._lock: self._exception = None self._result = result self._status = 'success'
Set an exception for the TransferFuture
def set_exception(self, exception, override=False): """Set an exception for the TransferFuture Implies the TransferFuture failed. :param exception: The exception that cause the transfer to fail. :param override: If True, override any existing state. """ with self._lock: if not self.done() or override: self._exception = exception self._status = 'failed'
Waits until TransferFuture is done and returns the result
def result(self): """Waits until TransferFuture is done and returns the result If the TransferFuture succeeded, it will return the result. If the TransferFuture failed, it will raise the exception associated to the failure. """ # Doing a wait() with no timeout cannot be interrupted in python2 but # can be interrupted in python3 so we just wait with the largest # possible value integer value, which is on the scale of billions of # years... self._done_event.wait(MAXINT) # Once done waiting, raise an exception if present or return the # final result. if self._exception: raise self._exception return self._result
Cancels the TransferFuture
def cancel(self, msg='', exc_type=CancelledError): """Cancels the TransferFuture :param msg: The message to attach to the cancellation :param exc_type: The type of exception to set for the cancellation """ with self._lock: if not self.done(): should_announce_done = False logger.debug('%s cancel(%s) called', self, msg) self._exception = exc_type(msg) if self._status == 'not-started': should_announce_done = True self._status = 'cancelled' if should_announce_done: self.announce_done()
Submits a task to a provided executor
def submit(self, executor, task, tag=None): """Submits a task to a provided executor :type executor: s3transfer.futures.BoundedExecutor :param executor: The executor to submit the callable to :type task: s3transfer.tasks.Task :param task: The task to submit to the executor :type tag: s3transfer.futures.TaskTag :param tag: A tag to associate to the submitted task :rtype: concurrent.futures.Future :returns: A future representing the submitted task """ logger.debug( "Submitting task %s to executor %s for transfer request: %s." % ( task, executor, self.transfer_id) ) future = executor.submit(task, tag=tag) # Add this created future to the list of associated future just # in case it is needed during cleanups. self.add_associated_future(future) future.add_done_callback( FunctionContainer(self.remove_associated_future, future)) return future
Add a done callback to be invoked when transfer is done
def add_done_callback(self, function, *args, **kwargs): """Add a done callback to be invoked when transfer is done""" with self._done_callbacks_lock: self._done_callbacks.append( FunctionContainer(function, *args, **kwargs) )