INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Write the default config to the user s config file.
def write_default_config(self, overwrite=False): """Write the default config to the user's config file. :param bool overwrite: Write over an existing config if it exists. """ destination = self.user_config_file() if not overwrite and os.path.exists(destination): return with io.open(destination, mode='wb') as f: self.default_config.write(f)
Write the current config to a file ( defaults to user config ).
def write(self, outfile=None, section=None): """Write the current config to a file (defaults to user config). :param str outfile: The path to the file to write to. :param None/str section: The config section to write, or :data:`None` to write the entire config. """ with io.open(outfile or self.user_config_file(), 'wb') as f: self.data.write(outfile=f, section=section)
Read a config file * f *.
def read_config_file(self, f): """Read a config file *f*. :param str f: The path to a file to read. """ configspec = self.default_file if self.validate else None try: config = ConfigObj(infile=f, configspec=configspec, interpolation=False, encoding='utf8') except ConfigObjError as e: logger.warning( 'Unable to parse line {} of config file {}'.format( e.line_number, f)) config = e.config valid = True if self.validate: valid = config.validate(Validator(), preserve_errors=True, copy=True) if bool(config): self.config_filenames.append(config.filename) return config, valid
Read a list of config files.
def read_config_files(self, files): """Read a list of config files. :param iterable files: An iterable (e.g. list) of files to read. """ errors = {} for _file in files: config, valid = self.read_config_file(_file) self.update(config) if valid is not True: errors[_file] = valid return errors or True
Convert bytes * b * to a string.
def bytes_to_string(b): """Convert bytes *b* to a string. Hexlify bytes that can't be decoded. """ if isinstance(b, binary_type): try: return b.decode('utf8') except UnicodeDecodeError: return '0x' + binascii.hexlify(b).decode('ascii') return b
Truncate string values.
def truncate_string(value, max_width=None): """Truncate string values.""" if isinstance(value, text_type) and max_width is not None and len(value) > max_width: return value[:max_width] return value
Filter the dict * d * to remove keys not in * keys *.
def filter_dict_by_key(d, keys): """Filter the dict *d* to remove keys not in *keys*.""" return {k: v for k, v in d.items() if k in keys}
Return the unique items from iterable * seq * ( in order ).
def unique_items(seq): """Return the unique items from iterable *seq* (in order).""" seen = set() return [x for x in seq if not (x in seen or seen.add(x))]
Replace multiple values in a string
def replace(s, replace): """Replace multiple values in a string""" for r in replace: s = s.replace(*r) return s
Wrap the formatting inside a function for TabularOutputFormatter.
def adapter(data, headers, **kwargs): """Wrap the formatting inside a function for TabularOutputFormatter.""" for row in chain((headers,), data): yield "\t".join((replace(r, (('\n', r'\n'), ('\t', r'\t'))) for r in row))
Run the * cmd * and exit with the proper exit code.
def call_and_exit(self, cmd, shell=True): """Run the *cmd* and exit with the proper exit code.""" sys.exit(subprocess.call(cmd, shell=shell))
Run multiple commmands in a row exiting if one fails.
def call_in_sequence(self, cmds, shell=True): """Run multiple commmands in a row, exiting if one fails.""" for cmd in cmds: if subprocess.call(cmd, shell=shell) == 1: sys.exit(1)
Apply command - line options.
def apply_options(self, cmd, options=()): """Apply command-line options.""" for option in (self.default_cmd_options + options): cmd = self.apply_option(cmd, option, active=getattr(self, option, False)) return cmd
Apply a command - line option.
def apply_option(self, cmd, option, active=True): """Apply a command-line option.""" return re.sub(r'{{{}\:(?P<option>[^}}]*)}}'.format(option), '\g<option>' if active else '', cmd)
Set the default options.
def initialize_options(self): """Set the default options.""" self.branch = 'master' self.fix = False super(lint, self).initialize_options()
Run the linter.
def run(self): """Run the linter.""" cmd = 'pep8radius {branch} {{fix: --in-place}}{{verbose: -vv}}' cmd = cmd.format(branch=self.branch) self.call_and_exit(self.apply_options(cmd, ('fix', )))
Generate and view the documentation.
def run(self): """Generate and view the documentation.""" cmds = (self.clean_docs_cmd, self.html_docs_cmd, self.view_docs_cmd) self.call_in_sequence(cmds)
Truncate very long strings. Only needed for tabular representation because trying to tabulate very long data is problematic in terms of performance and does not make any sense visually.
def truncate_string(data, headers, max_field_width=None, **_): """Truncate very long strings. Only needed for tabular representation, because trying to tabulate very long data is problematic in terms of performance, and does not make any sense visually. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param int max_field_width: Width to truncate field for display :return: The processed data and headers. :rtype: tuple """ return (([utils.truncate_string(v, max_field_width) for v in row] for row in data), [utils.truncate_string(h, max_field_width) for h in headers])
Convert all * data * and * headers * to strings.
def convert_to_string(data, headers, **_): """Convert all *data* and *headers* to strings. Binary data that cannot be decoded is converted to a hexadecimal representation via :func:`binascii.hexlify`. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :return: The processed data and headers. :rtype: tuple """ return (([utils.to_string(v) for v in row] for row in data), [utils.to_string(h) for h in headers])
Override missing values in the * data * with * missing_value *.
def override_missing_value(data, headers, missing_value='', **_): """Override missing values in the *data* with *missing_value*. A missing value is any value that is :data:`None`. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param missing_value: The default value to use for missing data. :return: The processed data and headers. :rtype: tuple """ return (([missing_value if v is None else v for v in row] for row in data), headers)
Override tab values in the * data * with * new_value *.
def override_tab_value(data, headers, new_value=' ', **_): """Override tab values in the *data* with *new_value*. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param new_value: The new value to use for tab. :return: The processed data and headers. :rtype: tuple """ return (([v.replace('\t', new_value) if isinstance(v, text_type) else v for v in row] for row in data), headers)
Convert all * data * and * headers * bytes to strings.
def bytes_to_string(data, headers, **_): """Convert all *data* and *headers* bytes to strings. Binary data that cannot be decoded is converted to a hexadecimal representation via :func:`binascii.hexlify`. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :return: The processed data and headers. :rtype: tuple """ return (([utils.bytes_to_string(v) for v in row] for row in data), [utils.bytes_to_string(h) for h in headers])
Align numbers in * data * on their decimal points.
def align_decimals(data, headers, column_types=(), **_): """Align numbers in *data* on their decimal points. Whitespace padding is added before a number so that all numbers in a column are aligned. Outputting data before aligning the decimals:: 1 2.1 10.59 Outputting data after aligning the decimals:: 1 2.1 10.59 :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param iterable column_types: The columns' type objects (e.g. int or float). :return: The processed data and headers. :rtype: tuple """ pointpos = len(headers) * [0] data = list(data) for row in data: for i, v in enumerate(row): if column_types[i] is float and type(v) in float_types: v = text_type(v) pointpos[i] = max(utils.intlen(v), pointpos[i]) def results(data): for row in data: result = [] for i, v in enumerate(row): if column_types[i] is float and type(v) in float_types: v = text_type(v) result.append((pointpos[i] - utils.intlen(v)) * " " + v) else: result.append(v) yield result return results(data), headers
Quote leading/ trailing whitespace in * data *.
def quote_whitespaces(data, headers, quotestyle="'", **_): """Quote leading/trailing whitespace in *data*. When outputing data with leading or trailing whitespace, it can be useful to put quotation marks around the value so the whitespace is more apparent. If one value in a column needs quoted, then all values in that column are quoted to keep things consistent. .. NOTE:: :data:`string.whitespace` is used to determine which characters are whitespace. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str quotestyle: The quotation mark to use (defaults to ``'``). :return: The processed data and headers. :rtype: tuple """ whitespace = tuple(string.whitespace) quote = len(headers) * [False] data = list(data) for row in data: for i, v in enumerate(row): v = text_type(v) if v.startswith(whitespace) or v.endswith(whitespace): quote[i] = True def results(data): for row in data: result = [] for i, v in enumerate(row): quotation = quotestyle if quote[i] else '' result.append('{quotestyle}{value}{quotestyle}'.format( quotestyle=quotation, value=v)) yield result return results(data), headers
Style the * data * and * headers * ( e. g. bold italic and colors )
def style_output(data, headers, style=None, header_token='Token.Output.Header', odd_row_token='Token.Output.OddRow', even_row_token='Token.Output.EvenRow', **_): """Style the *data* and *headers* (e.g. bold, italic, and colors) .. NOTE:: This requires the `Pygments <http://pygments.org/>`_ library to be installed. You can install it with CLI Helpers as an extra:: $ pip install cli_helpers[styles] Example usage:: from cli_helpers.tabular_output.preprocessors import style_output from pygments.style import Style from pygments.token import Token class YourStyle(Style): default_style = "" styles = { Token.Output.Header: 'bold #ansired', Token.Output.OddRow: 'bg:#eee #111', Token.Output.EvenRow: '#0f0' } headers = ('First Name', 'Last Name') data = [['Fred', 'Roberts'], ['George', 'Smith']] data, headers = style_output(data, headers, style=YourStyle) :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str/pygments.style.Style style: A Pygments style. You can `create your own styles <http://pygments.org/docs/styles/#creating-own-styles>`_. :param str header_token: The token type to be used for the headers. :param str odd_row_token: The token type to be used for odd rows. :param str even_row_token: The token type to be used for even rows. :return: The styled data and headers. :rtype: tuple """ if style and HAS_PYGMENTS: formatter = Terminal256Formatter(style=style) def style_field(token, field): """Get the styled text for a *field* using *token* type.""" s = StringIO() formatter.format(((token, field),), s) return s.getvalue() headers = [style_field(header_token, header) for header in headers] data = ([style_field(odd_row_token if i % 2 else even_row_token, f) for f in r] for i, r in enumerate(data, 1)) return iter(data), headers
Format numbers according to a format specification.
def format_numbers(data, headers, column_types=(), integer_format=None, float_format=None, **_): """Format numbers according to a format specification. This uses Python's format specification to format numbers of the following types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and :class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more information about the format strings. .. NOTE:: A column is only formatted if all of its values are the same type (except for :data:`None`). :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param iterable column_types: The columns' type objects (e.g. int or float). :param str integer_format: The format string to use for integer columns. :param str float_format: The format string to use for float columns. :return: The processed data and headers. :rtype: tuple """ if (integer_format is None and float_format is None) or not column_types: return iter(data), headers def _format_number(field, column_type): if integer_format and column_type is int and type(field) in int_types: return format(field, integer_format) elif float_format and column_type is float and type(field) in float_types: return format(field, float_format) return field data = ([_format_number(v, column_types[i]) for i, v in enumerate(row)] for row in data) return data, headers
Get a row separator for row * num *.
def _get_separator(num, sep_title, sep_character, sep_length): """Get a row separator for row *num*.""" left_divider_length = right_divider_length = sep_length if isinstance(sep_length, tuple): left_divider_length, right_divider_length = sep_length left_divider = sep_character * left_divider_length right_divider = sep_character * right_divider_length title = sep_title.format(n=num + 1) return "{left_divider}[ {title} ]{right_divider}\n".format( left_divider=left_divider, right_divider=right_divider, title=title)
Format a row.
def _format_row(headers, row): """Format a row.""" formatted_row = [' | '.join(field) for field in zip(headers, row)] return '\n'.join(formatted_row)
Format * data * and * headers * as an vertical table.
def vertical_table(data, headers, sep_title='{n}. row', sep_character='*', sep_length=27): """Format *data* and *headers* as an vertical table. The values in *data* and *headers* must be strings. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str sep_title: The title given to each row separator. Defaults to ``'{n}. row'``. Any instance of ``'{n}'`` is replaced by the record number. :param str sep_character: The character used to separate rows. Defaults to ``'*'``. :param int/tuple sep_length: The number of separator characters that should appear on each side of the *sep_title*. Use a tuple to specify the left and right values separately. :return: The formatted data. :rtype: str """ header_len = max([len(x) for x in headers]) padded_headers = [x.ljust(header_len) for x in headers] formatted_rows = [_format_row(padded_headers, row) for row in data] output = [] for i, result in enumerate(formatted_rows): yield _get_separator(i, sep_title, sep_character, sep_length) + result
Wrap vertical table in a function for TabularOutputFormatter.
def adapter(data, headers, **kwargs): """Wrap vertical table in a function for TabularOutputFormatter.""" keys = ('sep_title', 'sep_character', 'sep_length') return vertical_table(data, headers, **filter_dict_by_key(kwargs, keys))
Wrap the formatting inside a function for TabularOutputFormatter.
def adapter(data, headers, table_format='csv', **kwargs): """Wrap the formatting inside a function for TabularOutputFormatter.""" keys = ('dialect', 'delimiter', 'doublequote', 'escapechar', 'quotechar', 'quoting', 'skipinitialspace', 'strict') if table_format == 'csv': delimiter = ',' elif table_format == 'csv-tab': delimiter = '\t' else: raise ValueError('Invalid table_format specified.') ckwargs = {'delimiter': delimiter, 'lineterminator': ''} ckwargs.update(filter_dict_by_key(kwargs, keys)) l = linewriter() writer = csv.writer(l, **ckwargs) writer.writerow(headers) yield l.line for row in data: l.reset() writer.writerow(row) yield l.line
Wrap terminaltables inside a function for TabularOutputFormatter.
def adapter(data, headers, table_format=None, **kwargs): """Wrap terminaltables inside a function for TabularOutputFormatter.""" keys = ('title', ) table = table_format_handler[table_format] t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys)) dimensions = terminaltables.width_and_alignment.max_dimensions( t.table_data, t.padding_left, t.padding_right)[:3] for r in t.gen_table(*dimensions): yield u''.join(r)
Copy template and substitute template strings
def render_template(template_file, dst_file, **kwargs): """Copy template and substitute template strings File `template_file` is copied to `dst_file`. Then, each template variable is replaced by a value. Template variables are of the form {{val}} Example: Contents of template_file: VAR1={{val1}} VAR2={{val2}} VAR3={{val3}} render_template(template_file, output_file, val1="hello", val2="world") Contents of output_file: VAR1=hello VAR2=world VAR3={{val3}} :param template_file: Path to the template file. :param dst_file: Path to the destination file. :param kwargs: Keys correspond to template variables. :return: """ with open(template_file) as f: template_text = f.read() dst_text = template_text for key, value in kwargs.iteritems(): dst_text = dst_text .replace("{{" + key + "}}", value) with open(dst_file, "wt") as f: f.write(dst_text)
convert the fields of the object into a dictionnary
def to_dict(self): """ convert the fields of the object into a dictionnary """ # all the attibutes defined by PKCS#11 all_attributes = PyKCS11.CKA.keys() # only use the integer values and not the strings like 'CKM_RSA_PKCS' all_attributes = [attr for attr in all_attributes if isinstance(attr, int)] # all the attributes of the object attributes = self.session.getAttributeValue(self, all_attributes) dico = dict() for key, attr in zip(all_attributes, attributes): if attr is None: continue if key == CKA_CLASS: dico[PyKCS11.CKA[key]] = PyKCS11.CKO[attr] elif key == CKA_CERTIFICATE_TYPE: dico[PyKCS11.CKA[key]] = PyKCS11.CKC[attr] elif key == CKA_KEY_TYPE: dico[PyKCS11.CKA[key]] = PyKCS11.CKK[attr] else: dico[PyKCS11.CKA[key]] = attr return dico
parse the self. flags field and create a list of CKF_ * strings corresponding to bits set in flags
def flags2text(self): """ parse the `self.flags` field and create a list of `CKF_*` strings corresponding to bits set in flags :return: a list of strings :rtype: list """ r = [] for v in self.flags_dict.keys(): if self.flags & v: r.append(self.flags_dict[v]) return r
convert the fields of the object into a dictionnary
def to_dict(self): """ convert the fields of the object into a dictionnary """ dico = dict() for field in self.fields.keys(): if field == "flags": dico[field] = self.flags2text() elif field == "state": dico[field] = self.state2text() else: dico[field] = eval("self." + field) return dico
load a PKCS#11 library
def load(self, pkcs11dll_filename=None, *init_string): """ load a PKCS#11 library :type pkcs11dll_filename: string :param pkcs11dll_filename: the library name. If this parameter is not set then the environment variable `PYKCS11LIB` is used instead :returns: a :class:`PyKCS11Lib` object :raises: :class:`PyKCS11Error` (-1): when the load fails """ if pkcs11dll_filename is None: pkcs11dll_filename = os.getenv("PYKCS11LIB") if pkcs11dll_filename is None: raise PyKCS11Error(-1, "No PKCS11 library specified (set PYKCS11LIB env variable)") rv = self.lib.Load(pkcs11dll_filename) if rv == 0: raise PyKCS11Error(-1, pkcs11dll_filename)
C_InitToken
def initToken(self, slot, pin, label): """ C_InitToken :param slot: slot number returned by :func:`getSlotList` :type slot: integer :param pin: Security Officer's initial PIN :param label: new label of the token """ pin1 = ckbytelist(pin) rv = self.lib.C_InitToken(slot, pin1, label) if rv != CKR_OK: raise PyKCS11Error(rv)
C_GetInfo
def getInfo(self): """ C_GetInfo :return: a :class:`CK_INFO` object """ info = PyKCS11.LowLevel.CK_INFO() rv = self.lib.C_GetInfo(info) if rv != CKR_OK: raise PyKCS11Error(rv) i = CK_INFO() i.cryptokiVersion = (info.cryptokiVersion.major, info.cryptokiVersion.minor) i.manufacturerID = info.GetManufacturerID() i.flags = info.flags i.libraryDescription = info.GetLibraryDescription() i.libraryVersion = (info.libraryVersion.major, info.libraryVersion.minor) return i
C_GetSlotList
def getSlotList(self, tokenPresent=False): """ C_GetSlotList :param tokenPresent: `False` (default) to list all slots, `True` to list only slots with present tokens :type tokenPresent: bool :return: a list of available slots :rtype: list """ slotList = PyKCS11.LowLevel.ckintlist() rv = self.lib.C_GetSlotList(CK_TRUE if tokenPresent else CK_FALSE, slotList) if rv != CKR_OK: raise PyKCS11Error(rv) s = [] for x in range(len(slotList)): s.append(slotList[x]) return s
C_GetSlotInfo
def getSlotInfo(self, slot): """ C_GetSlotInfo :param slot: slot number returned by :func:`getSlotList` :type slot: integer :return: a :class:`CK_SLOT_INFO` object """ slotInfo = PyKCS11.LowLevel.CK_SLOT_INFO() rv = self.lib.C_GetSlotInfo(slot, slotInfo) if rv != CKR_OK: raise PyKCS11Error(rv) s = CK_SLOT_INFO() s.slotDescription = slotInfo.GetSlotDescription() s.manufacturerID = slotInfo.GetManufacturerID() s.flags = slotInfo.flags s.hardwareVersion = slotInfo.GetHardwareVersion() s.firmwareVersion = slotInfo.GetFirmwareVersion() return s
C_GetTokenInfo
def getTokenInfo(self, slot): """ C_GetTokenInfo :param slot: slot number returned by :func:`getSlotList` :type slot: integer :return: a :class:`CK_TOKEN_INFO` object """ tokeninfo = PyKCS11.LowLevel.CK_TOKEN_INFO() rv = self.lib.C_GetTokenInfo(slot, tokeninfo) if rv != CKR_OK: raise PyKCS11Error(rv) t = CK_TOKEN_INFO() t.label = tokeninfo.GetLabel() t.manufacturerID = tokeninfo.GetManufacturerID() t.model = tokeninfo.GetModel() t.serialNumber = tokeninfo.GetSerialNumber() t.flags = tokeninfo.flags t.ulMaxSessionCount = tokeninfo.ulMaxSessionCount if t.ulMaxSessionCount == CK_UNAVAILABLE_INFORMATION: t.ulMaxSessionCount = -1 t.ulSessionCount = tokeninfo.ulSessionCount if t.ulSessionCount == CK_UNAVAILABLE_INFORMATION: t.ulSessionCount = -1 t.ulMaxRwSessionCount = tokeninfo.ulMaxRwSessionCount if t.ulMaxRwSessionCount == CK_UNAVAILABLE_INFORMATION: t.ulMaxRwSessionCount = -1 t.ulRwSessionCount = tokeninfo.ulRwSessionCount if t.ulRwSessionCount == CK_UNAVAILABLE_INFORMATION: t.ulRwSessionCount = -1 t.ulMaxPinLen = tokeninfo.ulMaxPinLen t.ulMinPinLen = tokeninfo.ulMinPinLen t.ulTotalPublicMemory = tokeninfo.ulTotalPublicMemory if t.ulTotalPublicMemory == CK_UNAVAILABLE_INFORMATION: t.ulTotalPublicMemory = -1 t.ulFreePublicMemory = tokeninfo.ulFreePublicMemory if t.ulFreePublicMemory == CK_UNAVAILABLE_INFORMATION: t.ulFreePublicMemory = -1 t.ulTotalPrivateMemory = tokeninfo.ulTotalPrivateMemory if t.ulTotalPrivateMemory == CK_UNAVAILABLE_INFORMATION: t.ulTotalPrivateMemory = -1 t.ulFreePrivateMemory = tokeninfo.ulFreePrivateMemory if t.ulFreePrivateMemory == CK_UNAVAILABLE_INFORMATION: t.ulFreePrivateMemory = -1 t.hardwareVersion = (tokeninfo.hardwareVersion.major, tokeninfo.hardwareVersion.minor) t.firmwareVersion = (tokeninfo.firmwareVersion.major, tokeninfo.firmwareVersion.minor) t.utcTime = tokeninfo.GetUtcTime().replace('\000', ' ') return t
C_OpenSession
def openSession(self, slot, flags=0): """ C_OpenSession :param slot: slot number returned by :func:`getSlotList` :type slot: integer :param flags: 0 (default), `CKF_RW_SESSION` for RW session :type flags: integer :return: a :class:`Session` object """ se = PyKCS11.LowLevel.CK_SESSION_HANDLE() flags |= CKF_SERIAL_SESSION rv = self.lib.C_OpenSession(slot, flags, se) if rv != CKR_OK: raise PyKCS11Error(rv) return Session(self, se)
C_CloseAllSessions
def closeAllSessions(self, slot): """ C_CloseAllSessions :param slot: slot number :type slot: integer """ rv = self.lib.C_CloseAllSessions(slot) if rv != CKR_OK: raise PyKCS11Error(rv)
C_GetMechanismList
def getMechanismList(self, slot): """ C_GetMechanismList :param slot: slot number returned by :func:`getSlotList` :type slot: integer :return: the list of available mechanisms for a slot :rtype: list """ mechanismList = PyKCS11.LowLevel.ckintlist() rv = self.lib.C_GetMechanismList(slot, mechanismList) if rv != CKR_OK: raise PyKCS11Error(rv) m = [] for x in range(len(mechanismList)): mechanism = mechanismList[x] if mechanism >= CKM_VENDOR_DEFINED: k = 'CKR_VENDOR_DEFINED_%X' % (mechanism - CKM_VENDOR_DEFINED) CKM[k] = mechanism CKM[mechanism] = k m.append(CKM[mechanism]) return m
C_GetMechanismInfo
def getMechanismInfo(self, slot, type): """ C_GetMechanismInfo :param slot: slot number returned by :func:`getSlotList` :type slot: integer :param type: a `CKM_*` type :type type: integer :return: information about a mechanism :rtype: a :class:`CK_MECHANISM_INFO` object """ info = PyKCS11.LowLevel.CK_MECHANISM_INFO() rv = self.lib.C_GetMechanismInfo(slot, CKM[type], info) if rv != CKR_OK: raise PyKCS11Error(rv) i = CK_MECHANISM_INFO() i.ulMinKeySize = info.ulMinKeySize i.ulMaxKeySize = info.ulMaxKeySize i.flags = info.flags return i
C_WaitForSlotEvent
def waitForSlotEvent(self, flags=0): """ C_WaitForSlotEvent :param flags: 0 (default) or `CKF_DONT_BLOCK` :type flags: integer :return: slot :rtype: integer """ tmp = 0 (rv, slot) = self.lib.C_WaitForSlotEvent(flags, tmp) if rv != CKR_OK: raise PyKCS11Error(rv) return slot
C_DigestUpdate
def update(self, data): """ C_DigestUpdate :param data: data to add to the digest :type data: bytes or string """ data1 = ckbytelist(data) rv = self._lib.C_DigestUpdate(self._session, data1) if rv != CKR_OK: raise PyKCS11Error(rv) return self
C_DigestKey
def digestKey(self, handle): """ C_DigestKey :param handle: key handle :type handle: CK_OBJECT_HANDLE """ rv = self._lib.C_DigestKey(self._session, handle) if rv != CKR_OK: raise PyKCS11Error(rv) return self
C_DigestFinal
def final(self): """ C_DigestFinal :return: the digest :rtype: ckbytelist """ digest = ckbytelist() # Get the size of the digest rv = self._lib.C_DigestFinal(self._session, digest) if rv != CKR_OK: raise PyKCS11Error(rv) # Get the actual digest rv = self._lib.C_DigestFinal(self._session, digest) if rv != CKR_OK: raise PyKCS11Error(rv) return digest
C_CloseSession
def closeSession(self): """ C_CloseSession """ rv = self.lib.C_CloseSession(self.session) if rv != CKR_OK: raise PyKCS11Error(rv)
C_GetSessionInfo
def getSessionInfo(self): """ C_GetSessionInfo :return: a :class:`CK_SESSION_INFO` object """ sessioninfo = PyKCS11.LowLevel.CK_SESSION_INFO() rv = self.lib.C_GetSessionInfo(self.session, sessioninfo) if rv != CKR_OK: raise PyKCS11Error(rv) s = CK_SESSION_INFO() s.slotID = sessioninfo.slotID s.state = sessioninfo.state s.flags = sessioninfo.flags s.ulDeviceError = sessioninfo.ulDeviceError return s
C_Login
def login(self, pin, user_type=CKU_USER): """ C_Login :param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH :type pin: string :param user_type: the user type. The default value is CKU_USER. You may also use CKU_SO :type user_type: integer """ pin1 = ckbytelist(pin) rv = self.lib.C_Login(self.session, user_type, pin1) if rv != CKR_OK: raise PyKCS11Error(rv)
C_Logout
def logout(self): """ C_Logout """ rv = self.lib.C_Logout(self.session) if rv != CKR_OK: raise PyKCS11Error(rv) del self
C_InitPIN
def initPin(self, pin): """ C_InitPIN :param pin: new PIN """ new_pin1 = ckbytelist(pin) rv = self.lib.C_InitPIN(self.session, new_pin1) if rv != CKR_OK: raise PyKCS11Error(rv)
C_SetPIN
def setPin(self, old_pin, new_pin): """ C_SetPIN :param old_pin: old PIN :param new_pin: new PIN """ old_pin1 = ckbytelist(old_pin) new_pin1 = ckbytelist(new_pin) rv = self.lib.C_SetPIN(self.session, old_pin1, new_pin1) if rv != CKR_OK: raise PyKCS11Error(rv)
C_CreateObject
def createObject(self, template): """ C_CreateObject :param template: object template """ attrs = self._template2ckattrlist(template) handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE() rv = self.lib.C_CreateObject(self.session, attrs, handle) if rv != PyKCS11.CKR_OK: raise PyKCS11.PyKCS11Error(rv) return handle
C_DestroyObject
def destroyObject(self, obj): """ C_DestroyObject :param obj: object ID """ rv = self.lib.C_DestroyObject(self.session, obj) if rv != CKR_OK: raise PyKCS11Error(rv)
C_DigestInit/ C_DigestUpdate/ C_DigestKey/ C_DigestFinal
def digestSession(self, mecha=MechanismSHA1): """ C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal :param mecha: the digesting mechanism to be used (use `MechanismSHA1` for `CKM_SHA_1`) :type mecha: :class:`Mechanism` :return: A :class:`DigestSession` object :rtype: DigestSession """ return DigestSession(self.lib, self.session, mecha)
C_DigestInit/ C_Digest
def digest(self, data, mecha=MechanismSHA1): """ C_DigestInit/C_Digest :param data: the data to be digested :type data: (binary) sring or list/tuple of bytes :param mecha: the digesting mechanism to be used (use `MechanismSHA1` for `CKM_SHA_1`) :type mecha: :class:`Mechanism` :return: the computed digest :rtype: list of bytes :note: the returned value is an istance of :class:`ckbytelist`. You can easly convert it to a binary string with: ``bytes(ckbytelistDigest)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistDigest)`` """ digest = ckbytelist() m = mecha.to_native() data1 = ckbytelist(data) rv = self.lib.C_DigestInit(self.session, m) if rv != CKR_OK: raise PyKCS11Error(rv) # first call get digest size rv = self.lib.C_Digest(self.session, data1, digest) if rv != CKR_OK: raise PyKCS11Error(rv) # second call get actual digest data rv = self.lib.C_Digest(self.session, data1, digest) if rv != CKR_OK: raise PyKCS11Error(rv) return digest
C_SignInit/ C_Sign
def sign(self, key, data, mecha=MechanismRSAPKCS1): """ C_SignInit/C_Sign :param key: a key handle, obtained calling :func:`findObjects`. :type key: integer :param data: the data to be signed :type data: (binary) string or list/tuple of bytes :param mecha: the signing mechanism to be used (use `MechanismRSAPKCS1` for `CKM_RSA_PKCS`) :type mecha: :class:`Mechanism` :return: the computed signature :rtype: list of bytes :note: the returned value is an instance of :class:`ckbytelist`. You can easly convert it to a binary string with: ``bytes(ckbytelistSignature)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistSignature)`` """ m = mecha.to_native() signature = ckbytelist() data1 = ckbytelist(data) rv = self.lib.C_SignInit(self.session, m, key) if rv != CKR_OK: raise PyKCS11Error(rv) # first call get signature size rv = self.lib.C_Sign(self.session, data1, signature) if rv != CKR_OK: raise PyKCS11Error(rv) # second call get actual signature data rv = self.lib.C_Sign(self.session, data1, signature) if rv != CKR_OK: raise PyKCS11Error(rv) return signature
C_VerifyInit/ C_Verify
def verify(self, key, data, signature, mecha=MechanismRSAPKCS1): """ C_VerifyInit/C_Verify :param key: a key handle, obtained calling :func:`findObjects`. :type key: integer :param data: the data that was signed :type data: (binary) string or list/tuple of bytes :param signature: the signature to be verified :type signature: (binary) string or list/tuple of bytes :param mecha: the signing mechanism to be used (use `MechanismRSAPKCS1` for `CKM_RSA_PKCS`) :type mecha: :class:`Mechanism` :return: True if signature is valid, False otherwise :rtype: bool """ m = mecha.to_native() data1 = ckbytelist(data) rv = self.lib.C_VerifyInit(self.session, m, key) if rv != CKR_OK: raise PyKCS11Error(rv) rv = self.lib.C_Verify(self.session, data1, signature) if rv == CKR_OK: return True elif rv == CKR_SIGNATURE_INVALID: return False else: raise PyKCS11Error(rv)
C_EncryptInit/ C_Encrypt
def encrypt(self, key, data, mecha=MechanismRSAPKCS1): """ C_EncryptInit/C_Encrypt :param key: a key handle, obtained calling :func:`findObjects`. :type key: integer :param data: the data to be encrypted :type data: (binary) string or list/tuple of bytes :param mecha: the encryption mechanism to be used (use `MechanismRSAPKCS1` for `CKM_RSA_PKCS`) :type mecha: :class:`Mechanism` :return: the encrypted data :rtype: list of bytes :note: the returned value is an instance of :class:`ckbytelist`. You can easly convert it to a binary string with: ``bytes(ckbytelistEncrypted)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistEncrypted)`` """ encrypted = ckbytelist() m = mecha.to_native() data1 = ckbytelist(data) rv = self.lib.C_EncryptInit(self.session, m, key) if rv != CKR_OK: raise PyKCS11Error(rv) # first call get encrypted size rv = self.lib.C_Encrypt(self.session, data1, encrypted) if rv != CKR_OK: raise PyKCS11Error(rv) # second call get actual encrypted data rv = self.lib.C_Encrypt(self.session, data1, encrypted) if rv != CKR_OK: raise PyKCS11Error(rv) return encrypted
C_DecryptInit/ C_Decrypt
def decrypt(self, key, data, mecha=MechanismRSAPKCS1): """ C_DecryptInit/C_Decrypt :param key: a key handle, obtained calling :func:`findObjects`. :type key: integer :param data: the data to be decrypted :type data: (binary) string or list/tuple of bytes :param mecha: the decrypt mechanism to be used :type mecha: :class:`Mechanism` instance or :class:`MechanismRSAPKCS1` for CKM_RSA_PKCS :return: the decrypted data :rtype: list of bytes :note: the returned value is an instance of :class:`ckbytelist`. You can easly convert it to a binary string with: ``bytes(ckbytelistData)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistData)`` """ m = mecha.to_native() decrypted = ckbytelist() data1 = ckbytelist(data) rv = self.lib.C_DecryptInit(self.session, m, key) if rv != CKR_OK: raise PyKCS11Error(rv) # first call get decrypted size rv = self.lib.C_Decrypt(self.session, data1, decrypted) if rv != CKR_OK: raise PyKCS11Error(rv) # second call get actual decrypted data rv = self.lib.C_Decrypt(self.session, data1, decrypted) if rv != CKR_OK: raise PyKCS11Error(rv) return decrypted
C_WrapKey
def wrapKey(self, wrappingKey, key, mecha=MechanismRSAPKCS1): """ C_WrapKey :param wrappingKey: a wrapping key handle :type wrappingKey: integer :param key: a handle of the key to be wrapped :type key: integer :param mecha: the encrypt mechanism to be used (use `MechanismRSAPKCS1` for `CKM_RSA_PKCS`) :type mecha: :class:`Mechanism` :return: the wrapped key bytes :rtype: list of bytes :note: the returned value is an instance of :class:`ckbytelist`. You can easily convert it to a binary string with: ``bytes(ckbytelistData)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistData)`` """ wrapped = ckbytelist() native = mecha.to_native() # first call get wrapped size rv = self.lib.C_WrapKey(self.session, native, wrappingKey, key, wrapped) if rv != CKR_OK: raise PyKCS11Error(rv) # second call get actual wrapped key data rv = self.lib.C_WrapKey(self.session, native, wrappingKey, key, wrapped) if rv != CKR_OK: raise PyKCS11Error(rv) return wrapped
C_UnwrapKey
def unwrapKey(self, unwrappingKey, wrappedKey, template, mecha=MechanismRSAPKCS1): """ C_UnwrapKey :param unwrappingKey: the unwrapping key handle :type unwrappingKey: integer :param wrappedKey: the bytes of the wrapped key :type wrappedKey: (binary) string or list/tuple of bytes :param template: template for the unwrapped key :param mecha: the decrypt mechanism to be used (use `MechanismRSAPKCS1` for `CKM_RSA_PKCS`) :type mecha: :class:`Mechanism` :return: the unwrapped key object :rtype: integer """ m = mecha.to_native() data1 = ckbytelist(wrappedKey) handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE() attrs = self._template2ckattrlist(template) rv = self.lib.C_UnwrapKey(self.session, m, unwrappingKey, data1, attrs, handle) if rv != CKR_OK: raise PyKCS11Error(rv) return handle
is the type a numerical value?
def isNum(self, type): """ is the type a numerical value? :param type: PKCS#11 type like `CKA_CERTIFICATE_TYPE` :rtype: bool """ if type in (CKA_CERTIFICATE_TYPE, CKA_CLASS, CKA_KEY_GEN_MECHANISM, CKA_KEY_TYPE, CKA_MODULUS_BITS, CKA_VALUE_BITS, CKA_VALUE_LEN): return True return False
is the type a boolean value?
def isBool(self, type): """ is the type a boolean value? :param type: PKCS#11 type like `CKA_ALWAYS_SENSITIVE` :rtype: bool """ if type in (CKA_ALWAYS_SENSITIVE, CKA_DECRYPT, CKA_DERIVE, CKA_ENCRYPT, CKA_EXTRACTABLE, CKA_HAS_RESET, CKA_LOCAL, CKA_MODIFIABLE, CKA_NEVER_EXTRACTABLE, CKA_PRIVATE, CKA_RESET_ON_INIT, CKA_SECONDARY_AUTH, CKA_SENSITIVE, CKA_SIGN, CKA_SIGN_RECOVER, CKA_TOKEN, CKA_TRUSTED, CKA_UNWRAP, CKA_VERIFY, CKA_VERIFY_RECOVER, CKA_WRAP, CKA_WRAP_WITH_TRUSTED): return True return False
is the type a byte array value?
def isBin(self, type): """ is the type a byte array value? :param type: PKCS#11 type like `CKA_MODULUS` :rtype: bool """ return (not self.isBool(type)) \ and (not self.isString(type)) \ and (not self.isNum(type))
generate a secret key
def generateKey(self, template, mecha=MechanismAESGENERATEKEY): """ generate a secret key :param template: template for the secret key :param mecha: mechanism to use :return: handle of the generated key :rtype: PyKCS11.LowLevel.CK_OBJECT_HANDLE """ t = self._template2ckattrlist(template) ck_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE() m = mecha.to_native() rv = self.lib.C_GenerateKey(self.session, m, t, ck_handle) if rv != CKR_OK: raise PyKCS11Error(rv) return ck_handle
generate a key pair
def generateKeyPair(self, templatePub, templatePriv, mecha=MechanismRSAGENERATEKEYPAIR): """ generate a key pair :param templatePub: template for the public key :param templatePriv: template for the private key :param mecha: mechanism to use :return: a tuple of handles (pub, priv) :rtype: tuple """ tPub = self._template2ckattrlist(templatePub) tPriv = self._template2ckattrlist(templatePriv) ck_pub_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE() ck_prv_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE() m = mecha.to_native() rv = self.lib.C_GenerateKeyPair(self.session, m, tPub, tPriv, ck_pub_handle, ck_prv_handle) if rv != CKR_OK: raise PyKCS11Error(rv) return ck_pub_handle, ck_prv_handle
find the objects matching the template pattern
def findObjects(self, template=()): """ find the objects matching the template pattern :param template: list of attributes tuples (attribute,value). The default value is () and all the objects are returned :type template: list :return: a list of object ids :rtype: list """ t = self._template2ckattrlist(template) # we search for 10 objects by default. speed/memory tradeoff result = PyKCS11.LowLevel.ckobjlist(10) rv = self.lib.C_FindObjectsInit(self.session, t) if rv != CKR_OK: raise PyKCS11Error(rv) res = [] while True: rv = self.lib.C_FindObjects(self.session, result) if rv != CKR_OK: raise PyKCS11Error(rv) for x in result: # make a copy of the handle: the original value get # corrupted (!!) a = CK_OBJECT_HANDLE(self) a.assign(x.value()) res.append(a) if len(result) == 0: break rv = self.lib.C_FindObjectsFinal(self.session) if rv != CKR_OK: raise PyKCS11Error(rv) return res
C_GetAttributeValue
def getAttributeValue(self, obj_id, attr, allAsBinary=False): """ C_GetAttributeValue :param obj_id: object ID returned by :func:`findObjects` :type obj_id: integer :param attr: list of attributes :type attr: list :param allAsBinary: return all values as binary data; default is False. :type allAsBinary: Boolean :return: a list of values corresponding to the list of attributes :rtype: list :see: :func:`getAttributeValue_fragmented` :note: if allAsBinary is True the function do not convert results to Python types (i.e.: CKA_TOKEN to Bool, CKA_CLASS to int, ...). Binary data is returned as :class:`ckbytelist` type, usable as a list containing only bytes. You can easly convert it to a binary string with: ``bytes(ckbytelistVariable)`` or, for Python 2: ``''.join(chr(i) for i in ckbytelistVariable)`` """ valTemplate = PyKCS11.LowLevel.ckattrlist(len(attr)) for x in range(len(attr)): valTemplate[x].SetType(attr[x]) # first call to get the attribute size and reserve the memory rv = self.lib.C_GetAttributeValue(self.session, obj_id, valTemplate) if rv in (CKR_ATTRIBUTE_TYPE_INVALID, CKR_ATTRIBUTE_SENSITIVE, CKR_ARGUMENTS_BAD): return self.getAttributeValue_fragmented(obj_id, attr, allAsBinary) if rv != CKR_OK: raise PyKCS11Error(rv) # second call to get the attribute value rv = self.lib.C_GetAttributeValue(self.session, obj_id, valTemplate) if rv != CKR_OK: raise PyKCS11Error(rv) res = [] for x in range(len(attr)): if allAsBinary: res.append(valTemplate[x].GetBin()) elif valTemplate[x].IsNum(): res.append(valTemplate[x].GetNum()) elif valTemplate[x].IsBool(): res.append(valTemplate[x].GetBool()) elif valTemplate[x].IsString(): res.append(valTemplate[x].GetString()) elif valTemplate[x].IsBin(): res.append(valTemplate[x].GetBin()) else: raise PyKCS11Error(-2) return res
C_SeedRandom
def seedRandom(self, seed): """ C_SeedRandom :param seed: seed material :type seed: iterable """ low_seed = ckbytelist(seed) rv = self.lib.C_SeedRandom(self.session, low_seed) if rv != CKR_OK: raise PyKCS11Error(rv)
C_GenerateRandom
def generateRandom(self, size=16): """ C_GenerateRandom :param size: number of random bytes to get :type size: integer :note: the returned value is an instance of :class:`ckbytelist`. You can easly convert it to a binary string with: ``bytes(random)`` or, for Python 2: ``''.join(chr(i) for i in random)`` """ low_rand = ckbytelist([0] * size) rv = self.lib.C_GenerateRandom(self.session, low_rand) if rv != CKR_OK: raise PyKCS11Error(rv) return low_rand
Makes qr image using qrcode as qrc. See documentation for qrcode ( https:// pypi. python. org/ pypi/ qrcode ) package for more info.
def qrcode( cls, data, mode="base64", version=None, error_correction="L", box_size=10, border=0, fit=True, fill_color="black", back_color="white", **kwargs ): """Makes qr image using qrcode as qrc. See documentation for qrcode (https://pypi.python.org/pypi/qrcode) package for more info. :param data: String data. :param mode: Output mode, [base64|raw]. :param version: The size of the QR Code (1-40). :param error_correction: The error correction used for the QR Code. :param box_size: The number of pixels for each "box" of the QR code. :param border: The number of box for border. :param fit: If `True`, find the best fit for the data. :param fill_color: Frontend color. :param back_color: Background color. :param icon_img: Small icon image name or url. :param factor: Resize for icon image (default: 4, one-fourth of QRCode) :param icon_box: Icon image position [left, top] (default: image center) """ qr = qrc.QRCode( version=version, error_correction=cls.correction_levels[error_correction], box_size=box_size, border=border, ) qr.add_data(data) qr.make(fit=fit) fcolor = ( fill_color if fill_color.lower() in cls.color or fill_color.startswith("#") else "#" + fill_color ) bcolor = ( back_color if back_color.lower() in cls.color or back_color.startswith("#") else "#" + back_color ) # creates qrcode base64 out = BytesIO() qr_img = qr.make_image(back_color=bcolor, fill_color=fcolor) qr_img = qr_img.convert("RGBA") qr_img = cls._insert_img(qr_img, **kwargs) qr_img.save(out, "PNG") out.seek(0) if mode == "base64": return u"data:image/png;base64," + base64.b64encode(out.getvalue()).decode( "ascii" ) elif mode == "raw": return out
Inserts a small icon to QR Code image
def _insert_img(qr_img, icon_img=None, factor=4, icon_box=None, static_dir=None): """Inserts a small icon to QR Code image""" img_w, img_h = qr_img.size size_w = int(img_w) / int(factor) size_h = int(img_h) / int(factor) try: # load icon from current dir icon_fp = os.path.join(icon_img) if static_dir: # load icon from app's static dir icon_fp = os.path.join(static_dir, icon_img) if icon_img.split("://")[0] in ["http", "https", "ftp"]: icon_fp = BytesIO(urlopen(icon_img).read()) # download icon icon = Image.open(icon_fp) except: return qr_img icon_w, icon_h = icon.size icon_w = size_w if icon_w > size_w else icon_w icon_h = size_h if icon_h > size_h else icon_h icon = icon.resize((int(icon_w), int(icon_h)), Image.ANTIALIAS) icon = icon.convert("RGBA") left = int((img_w - icon_w) / 2) top = int((img_h - icon_h) / 2) icon_box = (int(icon_box[0]), int(icon_box[1])) if icon_box else (left, top) qr_img.paste(im=icon, box=icon_box, mask=icon) return qr_img
Export gene panels to. bed like format. Specify any number of panels on the command line
def panel(context, panel, build, bed, version): """Export gene panels to .bed like format. Specify any number of panels on the command line """ LOG.info("Running scout export panel") adapter = context.obj['adapter'] # Save all chromosomes found in the collection if panels chromosomes_found = set() if not panel: LOG.warning("Please provide at least one gene panel") context.abort() LOG.info("Exporting panels: {}".format(', '.join(panel))) if bed: if version: version = [version] lines = export_panels( adapter=adapter, panels=panel, versions=version, build=build, ) else: lines = export_gene_panels( adapter=adapter, panels=panel, version=version, ) for line in lines: click.echo(line)
Given a weekday and a date will increment the date until it s weekday matches that of the given weekday then that date is returned.
def _first_weekday(weekday, d): """ Given a weekday and a date, will increment the date until it's weekday matches that of the given weekday, then that date is returned. """ while weekday != d.weekday(): d += timedelta(days=1) return d
If a repeating chunk event exists in a particular month but didn t start that month it may be neccessary to fill out the first week. Five cases: 1. event starts repeating on the 1st day of month 2. event starts repeating past the 1st day of month 3. event starts repeating before the 1st day of month and continues through it. 4. event starts repeating before the 1st day of month and finishes repeating before it. 5. event starts repeating before the 1st day of month and finishes on it.
def _chunk_fill_out_first_week(year, month, count, event, diff): """ If a repeating chunk event exists in a particular month, but didn't start that month, it may be neccessary to fill out the first week. Five cases: 1. event starts repeating on the 1st day of month 2. event starts repeating past the 1st day of month 3. event starts repeating before the 1st day of month, and continues through it. 4. event starts repeating before the 1st day of month, and finishes repeating before it. 5. event starts repeating before the 1st day of month, and finishes on it. """ first_of_the_month = date(year, month, 1) d = _first_weekday(event.l_end_date.weekday(), first_of_the_month) d2 = _first_weekday(event.l_start_date.weekday(), first_of_the_month) diff_weekdays = d.day - d2.day day = first_of_the_month.day start = event.l_start_date.weekday() first = first_of_the_month.weekday() if start == first or diff_weekdays == diff: return count elif start > first: end = event.l_end_date.weekday() diff = end - first + 1 elif start < first: diff = d.day for i in xrange(diff): if event.end_repeat is not None and \ date(year, month, day) >= event.end_repeat: break count[day].append((event.title, event.pk)) day += 1 return count
Add num to the day and count that day until we reach end_repeat or until we re outside of the current month counting the days as we go along.
def repeat(self, day=None): """ Add 'num' to the day and count that day until we reach end_repeat, or until we're outside of the current month, counting the days as we go along. """ if day is None: day = self.day try: d = date(self.year, self.month, day) except ValueError: # out of range day return self.count if self.count_first and d <= self.end_repeat: self.count_it(d.day) d += timedelta(days=self.num) if self.end_on is not None: while d.month == self.month and \ d <= self.end_repeat and \ d.day <= self.end_on: self.count_it(d.day) d += timedelta(days=self.num) else: while d.month == self.month and d <= self.end_repeat: self.count_it(d.day) d += timedelta(days=self.num)
Like self. repeat () but used to repeat every weekday.
def repeat_weekdays(self): """ Like self.repeat(), but used to repeat every weekday. """ try: d = date(self.year, self.month, self.day) except ValueError: # out of range day return self.count if self.count_first and \ d <= self.end_repeat and \ d.weekday() not in (5, 6): self.count_it(d.day) d += timedelta(days=1) while d.month == self.month and d <= self.end_repeat: if d.weekday() not in (5, 6): self.count_it(d.day) d += timedelta(days=1)
Starts from start day and counts backwards until end day. start should be > = end. If it s equal to does nothing. If a day falls outside of end_repeat it won t be counted.
def repeat_reverse(self, start, end): """ Starts from 'start' day and counts backwards until 'end' day. 'start' should be >= 'end'. If it's equal to, does nothing. If a day falls outside of end_repeat, it won't be counted. """ day = start diff = start - end try: if date(self.year, self.month, day) <= self.end_repeat: self.count_it(day) # a value error likely means the event runs past the end of the month, # like an event that ends on the 31st, but the month doesn't have that # many days. Ignore it b/c the dates won't be added to calendar anyway except ValueError: pass for i in xrange(diff): day -= 1 try: if date(self.year, self.month, day) <= self.end_repeat: self.count_it(day) except ValueError: pass
This function is unique b/ c it creates an empty defaultdict adds in the event occurrences by creating an instance of Repeater then returns the defaultdict likely to be merged into the main defaultdict ( the one holding all event occurrences for this month ).
def repeat_biweekly(self): """ This function is unique b/c it creates an empty defaultdict, adds in the event occurrences by creating an instance of Repeater, then returns the defaultdict, likely to be merged into the 'main' defaultdict (the one holding all event occurrences for this month). """ mycount = defaultdict(list) d = self.event.l_start_date while d.year != self.year or d.month != self.month: d += timedelta(days=14) r = self.__class__( mycount, self.year, self.month, d.day, self.event.end_repeat, self.event, num=self.num, count_first=True ) r.repeat() if self.event.is_chunk() and r.count: r.day = min(r.count) r.repeat_chunk(self.event.start_end_diff) return r.count
Events that repeat every year should be shown every year on the same date they started e. g. an event that starts on March 23rd would appear on March 23rd every year it is scheduled to repeat. If the event is a chunk event hand it over to _repeat_chunk ().
def repeat_it(self): """ Events that repeat every year should be shown every year on the same date they started e.g. an event that starts on March 23rd would appear on March 23rd every year it is scheduled to repeat. If the event is a chunk event, hand it over to _repeat_chunk(). """ # The start day will be counted if we're in the start year, # so only count the day if we're in the same month as # l_start_date, but not in the same year. if self.event.l_start_date.month == self.month and \ self.event.l_start_date.year != self.year: self.count_it(self.event.l_start_date.day) # If we're in the same mo & yr as l_start_date, # should already be filled in if self.event.is_chunk() and not \ self.event.starts_same_year_month_as(self.year, self.month): self._repeat_chunk() return self.count
Events that repeat every month should be shown every month on the same date they started e. g. an event that starts on the 23rd would appear on the 23rd every month it is scheduled to repeat.
def repeat_it(self): """ Events that repeat every month should be shown every month on the same date they started e.g. an event that starts on the 23rd would appear on the 23rd every month it is scheduled to repeat. """ start_day = self.event.l_start_date.day if not self.event.starts_same_month_as(self.month): self.count_it(start_day) elif self.event.starts_same_month_not_year_as(self.month, self.year): self.count_it(start_day) if self.event.is_chunk(): self._repeat_chunk() return self.count
Created to take some of the load off of _handle_weekly_repeat_out
def _biweekly_helper(self): """Created to take some of the load off of _handle_weekly_repeat_out""" self.num = 14 mycount = self.repeat_biweekly() if mycount: if self.event.is_chunk() and min(mycount) not in xrange(1, 8): mycount = _chunk_fill_out_first_week( self.year, self.month, mycount, self.event, diff=self.event.start_end_diff, ) for k, v in mycount.items(): for item in v: self.count[k].append(item)
Handles repeating an event weekly ( or biweekly ) if the current year and month are outside of its start year and month. It takes care of cases 3 and 4 in _handle_weekly_repeat_in () comments.
def _handle_weekly_repeat_out(self): """ Handles repeating an event weekly (or biweekly) if the current year and month are outside of its start year and month. It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments. """ start_d = _first_weekday( self.event.l_start_date.weekday(), date(self.year, self.month, 1) ) self.day = start_d.day self.count_first = True if self.event.repeats('BIWEEKLY'): self._biweekly_helper() elif self.event.repeats('WEEKLY'): # Note count_first=True b/c although the start date isn't this # month, the event does begin repeating this month and start_date # has not yet been counted. # Also note we start from start_d.day and not # event.l_start_date.day self.repeat() if self.event.is_chunk(): diff = self.event.start_end_diff self.count = _chunk_fill_out_first_week( self.year, self.month, self.count, self.event, diff ) for i in xrange(diff): # count the chunk days, then repeat them self.day = start_d.day + i + 1 self.repeat()
Handles repeating both weekly and biweekly events if the current year and month are inside it s l_start_date and l_end_date. Four possibilites: 1. The event starts this month and ends repeating this month. 2. The event starts this month and doesn t finish repeating this month. 3. The event didn t start this month but ends repeating this month. 4. The event didn t start this month and doesn t end repeating this month.
def _handle_weekly_repeat_in(self): """ Handles repeating both weekly and biweekly events, if the current year and month are inside it's l_start_date and l_end_date. Four possibilites: 1. The event starts this month and ends repeating this month. 2. The event starts this month and doesn't finish repeating this month. 3. The event didn't start this month but ends repeating this month. 4. The event didn't start this month and doesn't end repeating this month. """ self.day = self.event.l_start_date.day self.count_first = False repeats = {'WEEKLY': 7, 'BIWEEKLY': 14} if self.event.starts_same_year_month_as(self.year, self.month): # This takes care of 1 and 2 above. # Note that 'count' isn't incremented before adding a week (in # Repeater.repeat()), b/c it's assumed that l_start_date # was already counted. for repeat, num in repeats.items(): self.num = num if self.event.repeats(repeat): self.repeat() if self.event.is_chunk(): self.repeat_chunk(diff=self.event.start_end_diff)
This handles either a non - repeating event chunk or the first month of a repeating event chunk.
def _handle_single_chunk(self, event): """ This handles either a non-repeating event chunk, or the first month of a repeating event chunk. """ if not event.starts_same_month_as(self.month) and not \ event.repeats('NEVER'): # no repeating chunk events if we're not in it's start month return # add the events into an empty defaultdict. This is better than passing # in self.count, which we don't want to make another copy of because it # could be very large. mycount = defaultdict(list) r = Repeater( mycount, self.year, self.month, day=event.l_start_date.day, end_repeat=event.end_repeat, event=event, count_first=True, end_on=event.l_end_date.day, num=1 ) if event.starts_same_month_as(self.month): if not event.ends_same_month_as(self.month): # The chunk event starts this month, # but does NOT end this month r.end_on = None else: # event chunks can be maximum of 7 days, so if an event chunk # didn't start this month, we know it will end this month. r.day = 1 r.repeat() # now we add in the events we generated to self.count for k, v in r.count.items(): self.count[k].extend(v)
Load a manually curated gene panel into scout Args: panel_path ( str ): path to gene panel file adapter ( scout. adapter. MongoAdapter ) date ( str ): date of gene panel on format 2017 - 12 - 24 display_name ( str ) version ( float ) panel_type ( str ) panel_id ( str ) institute ( str )
def load_panel(panel_path, adapter, date=None, display_name=None, version=None, panel_type=None, panel_id=None, institute=None): """Load a manually curated gene panel into scout Args: panel_path(str): path to gene panel file adapter(scout.adapter.MongoAdapter) date(str): date of gene panel on format 2017-12-24 display_name(str) version(float) panel_type(str) panel_id(str) institute(str) """ panel_lines = get_file_handle(panel_path) try: # This will parse panel metadata if includeed in panel file panel_info = get_panel_info( panel_lines=panel_lines, panel_id=panel_id, institute=institute, version=version, date=date, display_name=display_name ) except Exception as err: raise err version = None if panel_info.get('version'): version = float(panel_info['version']) panel_id = panel_info['panel_id'] display_name = panel_info['display_name'] or panel_id institute = panel_info['institute'] date = panel_info['date'] if not institute: raise SyntaxError("A Panel has to belong to a institute") #Check if institute exists in database if not adapter.institute(institute): raise SyntaxError("Institute {0} does not exist in database".format(institute)) if not panel_id: raise SyntaxError("A Panel has to have a panel id") if version: existing_panel = adapter.gene_panel(panel_id, version) else: ## Assuming version 1.0 existing_panel = adapter.gene_panel(panel_id) version = 1.0 LOG.info("Set version to %s", version) if existing_panel: LOG.info("found existing panel") if version == existing_panel['version']: LOG.warning("Panel with same version exists in database") LOG.info("Reload with updated version") raise SyntaxError() display_name = display_name or existing_panel['display_name'] institute = institute or existing_panel['institute'] parsed_panel = parse_gene_panel( path=panel_path, institute=institute, panel_type=panel_type, date=date, version=version, panel_id=panel_id, display_name=display_name, ) try: adapter.load_panel(parsed_panel=parsed_panel) except Exception as err: raise err
Load PanelApp panels into scout database If no panel_id load all PanelApp panels Args: adapter ( scout. adapter. MongoAdapter ) panel_id ( str ): The panel app panel id
def load_panel_app(adapter, panel_id=None, institute='cust000'): """Load PanelApp panels into scout database If no panel_id load all PanelApp panels Args: adapter(scout.adapter.MongoAdapter) panel_id(str): The panel app panel id """ base_url = 'https://panelapp.genomicsengland.co.uk/WebServices/{0}/' hgnc_map = adapter.genes_by_alias() if panel_id: panel_ids = [panel_id] if not panel_id: LOG.info("Fetching all panel app panels") data = get_request(base_url.format('list_panels')) json_lines = json.loads(data) panel_ids = [panel_info['Panel_Id'] for panel_info in json_lines['result']] for panel_id in panel_ids: panel_data = get_request(base_url.format('get_panel') + panel_id) parsed_panel = parse_panel_app_panel( panel_info = json.loads(panel_data)['result'], hgnc_map=hgnc_map, institute=institute ) parsed_panel['panel_id'] = panel_id if len(parsed_panel['genes']) == 0: LOG.warning("Panel {} is missing genes. Skipping.".format(parsed_panel['display_name'])) continue try: adapter.load_panel(parsed_panel=parsed_panel) except Exception as err: raise err
Export causative variants for a collaborator
def export_variants(adapter, collaborator, document_id=None, case_id=None): """Export causative variants for a collaborator Args: adapter(MongoAdapter) collaborator(str) document_id(str): Search for a specific variant case_id(str): Search causative variants for a case Yields: variant_obj(scout.Models.Variant): Variants marked as causative ordered by position. """ # Store the variants in a list for sorting variants = [] if document_id: yield adapter.variant(document_id) return variant_ids = adapter.get_causatives( institute_id=collaborator, case_id=case_id ) ##TODO add check so that same variant is not included more than once for document_id in variant_ids: variant_obj = adapter.variant(document_id) chrom = variant_obj['chromosome'] # Convert chromosome to integer for sorting chrom_int = CHROMOSOME_INTEGERS.get(chrom) if not chrom_int: LOG.info("Unknown chromosome %s", chrom) continue # Add chromosome and position to prepare for sorting variants.append((chrom_int, variant_obj['position'], variant_obj)) # Sort varants based on position variants.sort(key=lambda x: (x[0], x[1])) for variant in variants: variant_obj = variant[2] yield variant_obj
Create the lines for an excel file with verified variants for an institute
def export_verified_variants(aggregate_variants, unique_callers): """Create the lines for an excel file with verified variants for an institute Args: aggregate_variants(list): a list of variants with aggregates case data unique_callers(set): a unique list of available callers Returns: document_lines(list): list of lines to include in the document """ document_lines = [] for variant in aggregate_variants: # get genotype and allele depth for each sample samples = [] for sample in variant['samples']: line = [] # line elements corespond to contants.variants_export.VERIFIED_VARIANTS_HEADER line.append(variant['institute']) line.append(variant['_id']) # variant database ID line.append(variant['category']) line.append(variant['variant_type']) line.append(variant['display_name'][:30]) # variant display name # Build local link to variant: case_name = variant['case_obj']['display_name'] # case display name local_link = '/'.join([ '', variant['institute'], case_name, variant['_id'] ]) line.append(local_link) line.append(variant.get('validation')) line.append(case_name) case_individual = next(ind for ind in variant['case_obj']['individuals'] if ind['individual_id'] == sample['sample_id']) if case_individual['phenotype'] == 2: line.append(' '.join([sample.get('display_name'),'(A)'])) # label sample as affected else: line.append(sample.get('display_name')) line.append(''.join(['chr',variant['chromosome'],':',str(variant['position'])])) # position line.append('>'.join([variant.get('reference')[:10],variant.get('alternative')[:10]])) # change genes = [] prot_effect = [] funct_anno = [] for gene in variant.get('genes'): # this will be a unique long field in the document genes.append(gene.get('hgnc_symbol','')) funct_anno.append(gene.get('functional_annotation')) for transcript in gene.get('transcripts'): if transcript.get('is_canonical') and transcript.get('protein_sequence_name'): prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name'))) line.append(','.join(prot_effect)) line.append(','.join(funct_anno)) line.append(','.join(genes)) line.append(variant.get('rank_score')) line.append(variant.get('cadd_score')) line.append(sample.get('genotype_call')) line.append(sample['allele_depths'][0]) line.append(sample['allele_depths'][1]) line.append(sample['genotype_quality']) # Set callers values. One cell per caller, leave blank if not applicable for caller in unique_callers: if variant.get(caller): line.append(variant.get(caller)) else: line.append('-') document_lines.append(line) return document_lines
Export mitochondrial variants for a case to create a MT excel report
def export_mt_variants(variants, sample_id): """Export mitochondrial variants for a case to create a MT excel report Args: variants(list): all MT variants for a case, sorted by position sample_id(str) : the id of a sample within the case Returns: document_lines(list): list of lines to include in the document """ document_lines = [] for variant in variants: line = [] position = variant.get('position') change = '>'.join([variant.get('reference'),variant.get('alternative')]) line.append(position) line.append(change) line.append(str(position)+change) genes = [] prot_effect = [] for gene in variant.get('genes'): genes.append(gene.get('hgnc_symbol','')) for transcript in gene.get('transcripts'): if transcript.get('is_canonical') and transcript.get('protein_sequence_name'): prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name'))) line.append(','.join(prot_effect)) line.append(','.join(genes)) ref_ad = '' alt_ad = '' for sample in variant['samples']: if sample.get('sample_id') == sample_id: ref_ad = sample['allele_depths'][0] alt_ad = sample['allele_depths'][1] line.append(ref_ad) line.append(alt_ad) document_lines.append(line) return document_lines
Update a user in the database
def user(context, user_id, update_role, add_institute, remove_admin, remove_institute): """ Update a user in the database """ adapter = context.obj['adapter'] user_obj = adapter.user(user_id) if not user_obj: LOG.warning("User %s could not be found", user_id) context.abort() existing_roles = set(user_obj.get('roles',[])) if update_role: if not update_role in user_obj['roles']: existing_roles = set(user_obj['roles']) existing_roles.add(update_role) LOG.info("Adding role %s to user", update_role) else: LOG.warning("User already have role %s", update_role) if remove_admin: try: existing_roles.remove('admin') LOG.info("Removing admin rights from user %s", user_id) except KeyError as err: LOG.info("User %s does not have admin rights", user_id) user_obj['roles'] = list(existing_roles) existing_institutes = set(user_obj.get('institutes',[])) for institute_id in add_institute: institute_obj = adapter.institute(institute_id) if not institute_obj: LOG.warning("Institute %s could not be found", institute_id) else: existing_institutes.add(institute_id) LOG.info("Adding institute %s to user", institute_id) for institute_id in remove_institute: try: existing_institutes.remove(institute_id) LOG.info("Removing institute %s from user", institute_id) except KeyError as err: LOG.info("User does not have access to institute %s", institute_id) user_obj['institutes'] = list(existing_institutes) updated_user = adapter.update_user(user_obj)
Display a list of SNV variants.
def variants(institute_id, case_name): """Display a list of SNV variants.""" page = int(request.form.get('page', 1)) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) variant_type = request.args.get('variant_type', 'clinical') # Update filter settings if Clinical Filter was requested default_panels = [] for panel in case_obj['panels']: if panel.get('is_default'): default_panels.append(panel['panel_name']) request.form.get('gene_panels') if bool(request.form.get('clinical_filter')): clinical_filter = MultiDict({ 'variant_type': 'clinical', 'region_annotations': ['exonic','splicing'], 'functional_annotations': SEVERE_SO_TERMS, 'clinsig': [4,5], 'clinsig_confident_always_returned': True, 'gnomad_frequency': str(institute_obj['frequency_cutoff']), 'variant_type': 'clinical', 'gene_panels': default_panels }) if(request.method == "POST"): if bool(request.form.get('clinical_filter')): form = FiltersForm(clinical_filter) form.csrf_token = request.args.get('csrf_token') else: form = FiltersForm(request.form) else: form = FiltersForm(request.args) # populate available panel choices available_panels = case_obj.get('panels', []) + [ {'panel_name': 'hpo', 'display_name': 'HPO'}] panel_choices = [(panel['panel_name'], panel['display_name']) for panel in available_panels] form.gene_panels.choices = panel_choices # upload gene panel if symbol file exists if (request.files): file = request.files[form.symbol_file.name] if request.files and file and file.filename != '': log.debug("Upload file request files: {0}".format(request.files.to_dict())) try: stream = io.StringIO(file.stream.read().decode('utf-8'), newline=None) except UnicodeDecodeError as error: flash("Only text files are supported!", 'warning') return redirect(request.referrer) hgnc_symbols_set = set(form.hgnc_symbols.data) log.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set)) new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream) hgnc_symbols_set.update(new_hgnc_symbols) form.hgnc_symbols.data = hgnc_symbols_set # reset gene panels form.gene_panels.data = '' # update status of case if vistited for the first time if case_obj['status'] == 'inactive' and not current_user.is_admin: flash('You just activated this case!', 'info') user_obj = store.user(current_user.email) case_link = url_for('cases.case', institute_id=institute_obj['_id'], case_name=case_obj['display_name']) store.update_status(institute_obj, case_obj, user_obj, 'active', case_link) # check if supplied gene symbols exist hgnc_symbols = [] non_clinical_symbols = [] not_found_symbols = [] not_found_ids = [] if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0: is_clinical = form.data.get('variant_type', 'clinical') == 'clinical' clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None for hgnc_symbol in form.hgnc_symbols.data: if hgnc_symbol.isdigit(): hgnc_gene = store.hgnc_gene(int(hgnc_symbol)) if hgnc_gene is None: not_found_ids.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_gene['hgnc_symbol']) elif store.hgnc_genes(hgnc_symbol).count() == 0: not_found_symbols.append(hgnc_symbol) elif is_clinical and (hgnc_symbol not in clinical_symbols): non_clinical_symbols.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_symbol) if (not_found_ids): flash("HGNC id not found: {}".format(", ".join(not_found_ids)), 'warning') if (not_found_symbols): flash("HGNC symbol not found: {}".format(", ".join(not_found_symbols)), 'warning') if (non_clinical_symbols): flash("Gene not included in clinical list: {}".format(", ".join(non_clinical_symbols)), 'warning') form.hgnc_symbols.data = hgnc_symbols # handle HPO gene list separately if form.data['gene_panels'] == ['hpo']: hpo_symbols = list(set(term_obj['hgnc_symbol'] for term_obj in case_obj['dynamic_gene_list'])) form.hgnc_symbols.data = hpo_symbols variants_query = store.variants(case_obj['_id'], query=form.data) data = {} if request.form.get('export'): document_header = controllers.variants_export_header(case_obj) export_lines = [] if form.data['chrom'] == 'MT': # Return all MT variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query) else: # Return max 500 variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query.limit(500)) def generate(header, lines): yield header + '\n' for line in lines: yield line + '\n' headers = Headers() headers.add('Content-Disposition','attachment', filename=str(case_obj['display_name'])+'-filtered_variants.csv') # return a csv with the exported variants return Response(generate(",".join(document_header), export_lines), mimetype='text/csv', headers=headers) data = controllers.variants(store, institute_obj, case_obj, variants_query, page) return dict(institute=institute_obj, case=case_obj, form=form, severe_so_terms=SEVERE_SO_TERMS, page=page, **data)
Display a specific SNV variant.
def variant(institute_id, case_name, variant_id): """Display a specific SNV variant.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) log.debug("Variants view requesting data for variant {}".format(variant_id)) data = controllers.variant(store, institute_obj, case_obj, variant_id=variant_id) if data is None: log.warning("An error occurred: variants view requesting data for variant {}".format(variant_id)) flash('An error occurred while retrieving variant object', 'danger') return redirect(request.referrer) if current_app.config.get('LOQUSDB_SETTINGS'): data['observations'] = controllers.observations(store, loqusdb, case_obj, data['variant']) data['cancer'] = request.args.get('cancer') == 'yes' return dict(institute=institute_obj, case=case_obj, **data)
Display a list of STR variants.
def str_variants(institute_id, case_name): """Display a list of STR variants.""" page = int(request.args.get('page', 1)) variant_type = request.args.get('variant_type', 'clinical') form = StrFiltersForm(request.args) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) query = form.data query['variant_type'] = variant_type variants_query = store.variants(case_obj['_id'], category='str', query=query) data = controllers.str_variants(store, institute_obj, case_obj, variants_query, page) return dict(institute=institute_obj, case=case_obj, variant_type = variant_type, form=form, page=page, **data)
Display a list of structural variants.
def sv_variants(institute_id, case_name): """Display a list of structural variants.""" page = int(request.form.get('page', 1)) variant_type = request.args.get('variant_type', 'clinical') institute_obj, case_obj = institute_and_case(store, institute_id, case_name) form = SvFiltersForm(request.form) default_panels = [] for panel in case_obj['panels']: if panel['is_default']: default_panels.append(panel['panel_name']) request.form.get('gene_panels') if bool(request.form.get('clinical_filter')): clinical_filter = MultiDict({ 'variant_type': 'clinical', 'region_annotations': ['exonic','splicing'], 'functional_annotations': SEVERE_SO_TERMS, 'thousand_genomes_frequency': str(institute_obj['frequency_cutoff']), 'variant_type': 'clinical', 'clingen_ngi': 10, 'swegen': 10, 'size': 100, 'gene_panels': default_panels }) if(request.method == "POST"): if bool(request.form.get('clinical_filter')): form = SvFiltersForm(clinical_filter) form.csrf_token = request.args.get('csrf_token') else: form = SvFiltersForm(request.form) else: form = SvFiltersForm(request.args) available_panels = case_obj.get('panels', []) + [ {'panel_name': 'hpo', 'display_name': 'HPO'}] panel_choices = [(panel['panel_name'], panel['display_name']) for panel in available_panels] form.gene_panels.choices = panel_choices if form.data['gene_panels'] == ['hpo']: hpo_symbols = list(set(term_obj['hgnc_symbol'] for term_obj in case_obj['dynamic_gene_list'])) form.hgnc_symbols.data = hpo_symbols # update status of case if vistited for the first time if case_obj['status'] == 'inactive' and not current_user.is_admin: flash('You just activated this case!', 'info') user_obj = store.user(current_user.email) case_link = url_for('cases.case', institute_id=institute_obj['_id'], case_name=case_obj['display_name']) store.update_status(institute_obj, case_obj, user_obj, 'active', case_link) variants_query = store.variants(case_obj['_id'], category='sv', query=form.data) data = {} # if variants should be exported if request.form.get('export'): document_header = controllers.variants_export_header(case_obj) export_lines = [] # Return max 500 variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query.limit(500)) def generate(header, lines): yield header + '\n' for line in lines: yield line + '\n' headers = Headers() headers.add('Content-Disposition','attachment', filename=str(case_obj['display_name'])+'-filtered_sv-variants.csv') return Response(generate(",".join(document_header), export_lines), mimetype='text/csv', headers=headers) # return a csv with the exported variants else: data = controllers.sv_variants(store, institute_obj, case_obj, variants_query, page) return dict(institute=institute_obj, case=case_obj, variant_type=variant_type, form=form, severe_so_terms=SEVERE_SO_TERMS, page=page, **data)