INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Get information about a case from archive.
def archive_info(database: Database, archive_case: dict) -> dict: """Get information about a case from archive.""" data = { 'collaborators': archive_case['collaborators'], 'synopsis': archive_case.get('synopsis'), 'assignees': [], 'suspects': [], 'causatives': [], 'phenotype_terms': [], 'phenotype_groups': [], } if archive_case.get('assignee'): archive_user = database.user.find_one({'_id': archive_case['assignee']}) data['assignee'].append(archive_user['email']) for key in ['suspects', 'causatives']: for variant_id in archive_case.get(key, []): archive_variant = database.variant.find_one({'_id': variant_id}) data[key].append({ 'chromosome': archive_variant['chromosome'], 'position': archive_variant['position'], 'reference': archive_variant['reference'], 'alternative': archive_variant['alternative'], 'variant_type': archive_variant['variant_type'], }) for key in ['phenotype_terms', 'phenotype_groups']: for archive_term in archive_case.get(key, []): data[key].append({ 'phenotype_id': archive_term['phenotype_id'], 'feature': archive_term['feature'], }) return data
Migrate case information from archive.
def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict): """Migrate case information from archive.""" # update collaborators collaborators = list(set(scout_case['collaborators'] + archive_data['collaborators'])) if collaborators != scout_case['collaborators']: LOG.info(f"set collaborators: {', '.join(collaborators)}") scout_case['collaborators'] = collaborators # update assignees if len(scout_case.get('assignees', [])) == 0: scout_user = adapter.user(archive_data['assignee']) if scout_user: scout_case['assignees'] = [archive_data['assignee']] else: LOG.warning(f"{archive_data['assignee']}: unable to find assigned user") # add/update suspected/causative variants for key in ['suspects', 'causatives']: scout_case[key] = scout_case.get(key, []) for archive_variant in archive_data[key]: variant_id = get_variantid(archive_variant, scout_case['_id']) scout_variant = adapter.variant(variant_id) if scout_variant: if scout_variant['_id'] in scout_case[key]: LOG.info(f"{scout_variant['_id']}: variant already in {key}") else: LOG.info(f"{scout_variant['_id']}: add to {key}") scout_variant[key].append(scout_variant['_id']) else: LOG.warning(f"{scout_variant['_id']}: unable to find variant ({key})") scout_variant[key].append(variant_id) if not scout_case.get('synopsis'): # update synopsis scout_case['synopsis'] = archive_data['synopsis'] scout_case['is_migrated'] = True adapter.case_collection.find_one_and_replace( {'_id': scout_case['_id']}, scout_case, ) # add/update phenotype groups/terms scout_institute = adapter.institute(scout_case['owner']) scout_user = adapter.user('mans.magnusson@scilifelab.se') for key in ['phenotype_terms', 'phenotype_groups']: for archive_term in archive_data[key]: adapter.add_phenotype( institute=scout_institute, case=scout_case, user=scout_user, link=f"/{scout_case['owner']}/{scout_case['display_name']}", hpo_term=archive_term['phenotype_id'], is_group=key == 'phenotype_groups', )
Update all information that was manually annotated from a old instance.
def migrate(uri: str, archive_uri: str, case_id: str, dry: bool, force: bool): """Update all information that was manually annotated from a old instance.""" scout_client = MongoClient(uri) scout_database = scout_client[uri.rsplit('/', 1)[-1]] scout_adapter = MongoAdapter(database=scout_database) scout_case = scout_adapter.case(case_id) if not force and scout_case.get('is_migrated'): print("case already migrated") return archive_client = MongoClient(archive_uri) archive_database = archive_client[archive_uri.rsplit('/', 1)[-1]] archive_case = archive_database.case.find_one({ 'owner': scout_case['owner'], 'display_name': scout_case['display_name'] }) archive_data = archive_info(archive_database, archive_case) if dry: print(ruamel.yaml.safe_dump(archive_data)) else: #migrate_case(scout_adapter, scout_case, archive_data) pass
Upload research variants to cases
def research(context, case_id, institute, force): """Upload research variants to cases If a case is specified, all variants found for that case will be uploaded. If no cases are specified then all cases that have 'research_requested' will have there research variants uploaded """ LOG.info("Running scout load research") adapter = context.obj['adapter'] if case_id: if not institute: # There was an old way to create case ids so we need a special case to handle this # Assume institute-case combo splitted_case = case_id.split('-') # Check if first part is institute, then we know it is the old format if len(splitted_case) > 1: institute_obj = adapter.institute(splitted_case[0]) if institute_obj: institute = institute_obj['_id'] case_id = splitted_case[1] case_obj = adapter.case(institute_id=institute, case_id=case_id) if case_obj is None: LOG.warning("No matching case found") context.abort() else: case_objs = [case_obj] else: # Fetch all cases that have requested research case_objs = adapter.cases(research_requested=True) default_threshold = 8 files = False for case_obj in case_objs: if force or case_obj['research_requested']: # Test to upload research snvs if case_obj['vcf_files'].get('vcf_snv_research'): files = True adapter.delete_variants(case_id=case_obj['_id'], variant_type='research', category='snv') LOG.info("Load research SNV for: %s", case_obj['_id']) adapter.load_variants( case_obj=case_obj, variant_type='research', category='snv', rank_threshold=default_threshold, ) # Test to upload research svs if case_obj['vcf_files'].get('vcf_sv_research'): files = True adapter.delete_variants(case_id=case_obj['_id'], variant_type='research', category='sv') LOG.info("Load research SV for: %s", case_obj['_id']) adapter.load_variants( case_obj=case_obj, variant_type='research', category='sv', rank_threshold=default_threshold, ) # Test to upload research cancer variants if case_obj['vcf_files'].get('vcf_cancer_research'): files = True adapter.delete_variants(case_id=case_obj['_id'], variant_type='research', category='cancer') LOG.info("Load research cancer for: %s", case_obj['_id']) adapter.load_variants( case_obj=case_obj, variant_type='research', category='cancer', rank_threshold=default_threshold, ) if not files: LOG.warning("No research files found for case %s", case_id) context.abort() case_obj['is_research'] = True case_obj['research_requested'] = False adapter.update_case(case_obj) else: LOG.warn("research not requested, use '--force'")
Load Genes and transcripts into the database If no resources are provided the correct ones will be fetched. Args: adapter ( scout. adapter. MongoAdapter ) genes ( dict ): If genes are already parsed ensembl_lines ( iterable ( str )): Lines formated with ensembl gene information hgnc_lines ( iterable ( str )): Lines with gene information from genenames. org exac_lines ( iterable ( str )): Lines with information pLi - scores from ExAC mim2gene ( iterable ( str )): Lines with map from omim id to gene symbol genemap_lines ( iterable ( str )): Lines with information of omim entries hpo_lines ( iterable ( str )): Lines information about map from hpo terms to genes transcripts_lines ( iterable ): iterable with ensembl transcript lines build ( str ): What build to use. Defaults to 37
def load_hgnc(adapter, genes=None, ensembl_lines=None, hgnc_lines=None, exac_lines=None, mim2gene_lines=None, genemap_lines=None, hpo_lines=None, transcripts_lines=None, build='37', omim_api_key=''): """Load Genes and transcripts into the database If no resources are provided the correct ones will be fetched. Args: adapter(scout.adapter.MongoAdapter) genes(dict): If genes are already parsed ensembl_lines(iterable(str)): Lines formated with ensembl gene information hgnc_lines(iterable(str)): Lines with gene information from genenames.org exac_lines(iterable(str)): Lines with information pLi-scores from ExAC mim2gene(iterable(str)): Lines with map from omim id to gene symbol genemap_lines(iterable(str)): Lines with information of omim entries hpo_lines(iterable(str)): Lines information about map from hpo terms to genes transcripts_lines(iterable): iterable with ensembl transcript lines build(str): What build to use. Defaults to '37' """ gene_objs = load_hgnc_genes( adapter=adapter, genes = genes, ensembl_lines=ensembl_lines, hgnc_lines=hgnc_lines, exac_lines=exac_lines, mim2gene_lines=mim2gene_lines, genemap_lines=genemap_lines, hpo_lines=hpo_lines, build=build, omim_api_key=omim_api_key, ) ensembl_genes = {} for gene_obj in gene_objs: ensembl_genes[gene_obj['ensembl_id']] = gene_obj transcript_objs = load_transcripts( adapter=adapter, transcripts_lines=transcripts_lines, build=build, ensembl_genes=ensembl_genes)
Load genes into the database link_genes will collect information from all the different sources and merge it into a dictionary with hgnc_id as key and gene information as values.
def load_hgnc_genes(adapter, genes = None, ensembl_lines=None, hgnc_lines=None, exac_lines=None, mim2gene_lines=None, genemap_lines=None, hpo_lines=None, build='37', omim_api_key=''): """Load genes into the database link_genes will collect information from all the different sources and merge it into a dictionary with hgnc_id as key and gene information as values. Args: adapter(scout.adapter.MongoAdapter) genes(dict): If genes are already parsed ensembl_lines(iterable(str)): Lines formated with ensembl gene information hgnc_lines(iterable(str)): Lines with gene information from genenames.org exac_lines(iterable(str)): Lines with information pLi-scores from ExAC mim2gene(iterable(str)): Lines with map from omim id to gene symbol genemap_lines(iterable(str)): Lines with information of omim entries hpo_lines(iterable(str)): Lines information about map from hpo terms to genes build(str): What build to use. Defaults to '37' Returns: gene_objects(list): A list with all gene_objects that was loaded into database """ gene_objects = list() if not genes: # Fetch the resources if not provided if ensembl_lines is None: ensembl_lines = fetch_ensembl_genes(build=build) hgnc_lines = hgnc_lines or fetch_hgnc() exac_lines = exac_lines or fetch_exac_constraint() if not (mim2gene_lines and genemap_lines): if not omim_api_key: raise SyntaxError("Need to provide omim api key") mim_files = fetch_mim_files(omim_api_key, mim2genes=True, genemap2=True) mim2gene_lines = mim_files['mim2genes'] genemap_lines = mim_files['genemap2'] if not hpo_lines: hpo_files = fetch_hpo_files(hpogenes=True) hpo_lines = hpo_files['hpogenes'] # Link the resources genes = link_genes( ensembl_lines=ensembl_lines, hgnc_lines=hgnc_lines, exac_lines=exac_lines, mim2gene_lines=mim2gene_lines, genemap_lines=genemap_lines, hpo_lines=hpo_lines ) non_existing = 0 nr_genes = len(genes) with progressbar(genes.values(), label="Building genes", length=nr_genes) as bar: for gene_data in bar: if not gene_data.get('chromosome'): LOG.debug("skipping gene: %s. No coordinates found", gene_data.get('hgnc_symbol', '?')) non_existing += 1 continue gene_obj = build_hgnc_gene(gene_data, build=build) gene_objects.append(gene_obj) LOG.info("Loading genes build %s", build) adapter.load_hgnc_bulk(gene_objects) LOG.info("Loading done. %s genes loaded", len(gene_objects)) LOG.info("Nr of genes without coordinates in build %s: %s", build,non_existing) return gene_objects
Show all hpo terms in the database
def hpo(context, term, description): """Show all hpo terms in the database""" LOG.info("Running scout view hpo") adapter = context.obj['adapter'] if term: term = term.upper() if not term.startswith('HP:'): while len(term) < 7: term = '0' + term term = 'HP:' + term LOG.info("Searching for term %s", term) hpo_terms = adapter.hpo_terms(hpo_term=term) elif description: sorted_terms = sorted(adapter.hpo_terms(query=description), key=itemgetter('hpo_number')) for term in sorted_terms: term.pop('genes') print("name: {} | {} | {}".format(term['_id'], term['description'], term['hpo_number'])) # pp(hpo_terms) context.abort() else: hpo_terms = adapter.hpo_terms() if hpo_terms.count() == 0: LOG.warning("No matching terms found") return click.echo("hpo_id\tdescription\tnr_genes") for hpo_obj in hpo_terms: click.echo("{0}\t{1}\t{2}".format( hpo_obj['hpo_id'], hpo_obj['description'], len(hpo_obj.get('genes',[])) ))
Build a gene object Has to build the transcripts for the genes to Args: gene ( dict ): Parsed information from the VCF hgncid_to_gene ( dict ): A map from hgnc_id - > hgnc_gene objects
def build_gene(gene, hgncid_to_gene=None): """Build a gene object Has to build the transcripts for the genes to Args: gene(dict): Parsed information from the VCF hgncid_to_gene(dict): A map from hgnc_id -> hgnc_gene objects Returns: gene_obj(dict) gene = dict( # The hgnc gene id hgnc_id = int, # required hgnc_symbol = str, ensembl_gene_id = str, # A list of Transcript objects transcripts = list, # list of <transcript> # This is the worst functional impact of all transcripts functional_annotation = str, # choices=SO_TERM_KEYS # This is the region of the most severe functional impact region_annotation = str, # choices=FEATURE_TYPES # This is most severe sift prediction of all transcripts sift_prediction = str, # choices=CONSEQUENCE # This is most severe polyphen prediction of all transcripts polyphen_prediction = str, # choices=CONSEQUENCE ) """ hgncid_to_gene = hgncid_to_gene or {} gene_obj = dict() # This id is collected from the VCF # Typically annotated by VEP or snpEFF hgnc_id = int(gene['hgnc_id']) gene_obj['hgnc_id'] = hgnc_id # Get the gene information from database hgnc_gene = hgncid_to_gene.get(hgnc_id) inheritance = set() hgnc_transcripts = [] if hgnc_gene: # Set the hgnc symbol etc to the one internally in Scout gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol'] gene_obj['ensembl_id'] = hgnc_gene['ensembl_id'] gene_obj['description'] = hgnc_gene['description'] if hgnc_gene.get('inheritance_models'): gene_obj['inheritance'] = hgnc_gene['inheritance_models'] transcripts = [] for transcript in gene['transcripts']: transcript_obj = build_transcript(transcript) transcripts.append(transcript_obj) gene_obj['transcripts'] = transcripts functional_annotation = gene.get('most_severe_consequence') if functional_annotation: if not functional_annotation in SO_TERM_KEYS: LOG.warning("Invalid functional annotation %s", functional_annotation) else: gene_obj['functional_annotation'] = functional_annotation region_annotation = gene.get('region_annotation') if region_annotation: if not region_annotation in FEATURE_TYPES: LOG.warning("Invalid region annotation %s", region_annotation) else: gene_obj['region_annotation'] = region_annotation sift_prediction = gene.get('most_severe_sift') if sift_prediction: if not sift_prediction in CONSEQUENCE: LOG.warning("Invalid sift prediction %s", sift_prediction) else: gene_obj['sift_prediction'] = sift_prediction polyphen_prediction = gene.get('most_severe_polyphen') if polyphen_prediction: if not polyphen_prediction in CONSEQUENCE: LOG.warning("Invalid polyphen prediction %s", polyphen_prediction) else: gene_obj['polyphen_prediction'] = polyphen_prediction gene_obj['hgvs_identifier'] = gene['hgvs_identifier'] gene_obj['canonical_transcript'] = gene['canonical_transcript'] gene_obj['exon'] = gene['exon'] return gene_obj
Flask app factory function.
def create_app(config_file=None, config=None): """Flask app factory function.""" app = Flask(__name__) app.config.from_pyfile('config.py') app.jinja_env.add_extension('jinja2.ext.do') if config: app.config.update(config) if config_file: app.config.from_pyfile(config_file) # If there is a MatchMaker Exchange server # collect the connected external nodes app.mme_nodes = mme_nodes(app.config.get('MME_URL'), app.config.get('MME_TOKEN')) app.config["JSON_SORT_KEYS"] = False current_log_level = logger.getEffectiveLevel() coloredlogs.install(level='DEBUG' if app.debug else current_log_level) configure_extensions(app) register_blueprints(app) register_filters(app) if not (app.debug or app.testing) and app.config.get('MAIL_USERNAME'): # setup email logging of errors configure_email_logging(app) @app.before_request def check_user(): if not app.config.get('LOGIN_DISABLED') and request.endpoint: # check if the endpoint requires authentication static_endpoint = 'static' in request.endpoint or 'report' in request.endpoint public_endpoint = getattr(app.view_functions[request.endpoint], 'is_public', False) relevant_endpoint = not (static_endpoint or public_endpoint) # if endpoint requires auth, check if user is authenticated if relevant_endpoint and not current_user.is_authenticated: # combine visited URL (convert byte string query string to unicode!) next_url = "{}?{}".format(request.path, request.query_string.decode()) login_url = url_for('login.login', next=next_url) return redirect(login_url) return app
Configure Flask extensions.
def configure_extensions(app): """Configure Flask extensions.""" extensions.toolbar.init_app(app) extensions.bootstrap.init_app(app) extensions.mongo.init_app(app) extensions.store.init_app(app) extensions.login_manager.init_app(app) extensions.oauth.init_app(app) extensions.mail.init_app(app) Markdown(app) if app.config.get('SQLALCHEMY_DATABASE_URI'): configure_coverage(app) if app.config.get('LOQUSDB_SETTINGS'): # setup LoqusDB extensions.loqusdb.init_app(app)
Register Flask blueprints.
def register_blueprints(app): """Register Flask blueprints.""" app.register_blueprint(public.public_bp) app.register_blueprint(genes.genes_bp) app.register_blueprint(cases.cases_bp) app.register_blueprint(login.login_bp) app.register_blueprint(variants.variants_bp) app.register_blueprint(panels.panels_bp) app.register_blueprint(dashboard.dashboard_bp) app.register_blueprint(api.api_bp) app.register_blueprint(alignviewers.alignviewers_bp) app.register_blueprint(phenotypes.hpo_bp) app.register_blueprint(institutes.overview)
Setup logging of error/ exceptions to email.
def configure_email_logging(app): """Setup logging of error/exceptions to email.""" import logging from scout.log import TlsSMTPHandler mail_handler = TlsSMTPHandler( mailhost=app.config['MAIL_SERVER'], fromaddr=app.config['MAIL_USERNAME'], toaddrs=app.config['ADMINS'], subject="O_ops... {} failed!".format(app.name), credentials=(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) ) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]') ) app.logger.addHandler(mail_handler)
Setup coverage related extensions.
def configure_coverage(app): """Setup coverage related extensions.""" # setup chanjo report app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if app.debug else False if chanjo_api: chanjo_api.init_app(app) configure_template_filters(app) # register chanjo report blueprint app.register_blueprint(report_bp, url_prefix='/reports') babel = Babel(app) @babel.localeselector def get_locale(): """Determine locale to use for translations.""" accept_languages = current_app.config.get('ACCEPT_LANGUAGES', ['en']) # first check request args session_language = request.args.get('lang') if session_language in accept_languages: current_app.logger.info("using session language: %s", session_language) return session_language # language can be forced in config user_language = current_app.config.get('REPORT_LANGUAGE') if user_language: return user_language # try to guess the language from the user accept header that # the browser transmits. We support de/fr/en in this example. # The best match wins. return request.accept_languages.best_match(accept_languages)
Show all alias symbols and how they map to ids
def aliases(context, build, symbol): """Show all alias symbols and how they map to ids""" LOG.info("Running scout view aliases") adapter = context.obj['adapter'] if symbol: alias_genes = {} res = adapter.gene_by_alias(symbol, build=build) for gene_obj in res: hgnc_id = gene_obj['hgnc_id'] # Collect the true symbol given by hgnc hgnc_symbol = gene_obj['hgnc_symbol'] # Loop aver all aliases for alias in gene_obj['aliases']: true_id = None # If the alias is the same as hgnc symbol we know the true id if alias == hgnc_symbol: true_id = hgnc_id # If the alias is already in the list we add the id if alias in alias_genes: alias_genes[alias]['ids'].add(hgnc_id) if true_id: alias_genes[alias]['true'] = hgnc_id else: alias_genes[alias] = { 'true': hgnc_id, 'ids': set([hgnc_id]) } else: alias_genes = adapter.genes_by_alias(build=build) if len(alias_genes) == 0: LOG.info("No gene found for build %s", build) return click.echo("#hgnc_symbol\ttrue_id\thgnc_ids") for alias_symbol in alias_genes: info = alias_genes[alias_symbol] # pp(info) click.echo("{0}\t{1}\t{2}\t".format( alias_symbol, (alias_genes[alias_symbol]['true'] or 'None'), ', '.join([str(gene_id) for gene_id in alias_genes[alias_symbol]['ids']]) ) )
Build a panel_gene object Args: gene_info ( dict ) Returns: gene_obj ( dict ) panel_gene = dict ( hgnc_id = int # required symbol = str
def build_gene(gene_info, adapter): """Build a panel_gene object Args: gene_info(dict) Returns: gene_obj(dict) panel_gene = dict( hgnc_id = int, # required symbol = str, disease_associated_transcripts = list, # list of strings that represent refseq transcripts reduced_penetrance = bool, mosaicism = bool, database_entry_version = str, ar = bool, ad = bool, mt = bool, xr = bool, xd = bool, x = bool, y = bool, ) """ symbol = gene_info.get('hgnc_symbol') try: # A gene has to have a hgnc id hgnc_id = gene_info['hgnc_id'] if not hgnc_id: raise KeyError() gene_obj = dict(hgnc_id=hgnc_id) except KeyError as err: raise KeyError("Gene {0} is missing hgnc id. Panel genes has to have hgnc_id".format(symbol)) hgnc_gene = adapter.hgnc_gene(hgnc_id) if hgnc_gene is None: raise IntegrityError("hgnc_id {0} is not in the gene database!".format(hgnc_id)) gene_obj['symbol'] = hgnc_gene['hgnc_symbol'] if symbol != gene_obj['symbol']: LOG.warning("Symbol in database does not correspond to symbol in panel file for gene %s", hgnc_id) LOG.warning("Using symbol %s for gene %s, instead of %s" % (hgnc_gene['hgnc_symbol'], hgnc_id, symbol)) if gene_info.get('transcripts'): gene_obj['disease_associated_transcripts'] = gene_info['transcripts'] if gene_info.get('reduced_penetrance'): gene_obj['reduced_penetrance'] = True if gene_info.get('mosaicism'): gene_obj['mosaicism'] = True if gene_info.get('database_entry_version'): gene_obj['database_entry_version'] = gene_info['database_entry_version'] if gene_info.get('inheritance_models'): for model in gene_info['inheritance_models']: if model == 'AR': gene_obj['ar'] = True if model == 'AD': gene_obj['ad'] = True if model == 'MT': gene_obj['mt'] = True if model == 'XR': gene_obj['xr'] = True if model == 'XD': gene_obj['xd'] = True if model == 'X': gene_obj['x'] = True if model == 'Y': gene_obj['y'] = True return gene_obj
Build a gene_panel object
def build_panel(panel_info, adapter): """Build a gene_panel object Args: panel_info(dict): A dictionary with panel information adapter (scout.adapter.MongoAdapter) Returns: panel_obj(dict) gene_panel = dict( panel_id = str, # required institute = str, # institute_id, required version = float, # required date = datetime, # required display_name = str, # default is panel_name genes = list, # list of panel genes, sorted on panel_gene['symbol'] ) """ panel_name = panel_info.get('panel_id', panel_info.get('panel_name')) if not panel_name: raise KeyError("Panel has to have a id") panel_obj = dict(panel_name = panel_name) LOG.info("Building panel with name: {0}".format(panel_name)) try: institute_id = panel_info['institute'] except KeyError as err: raise KeyError("Panel has to have a institute") # Check if institute exists in database if adapter.institute(institute_id) is None: raise IntegrityError("Institute %s could not be found" % institute_id) panel_obj['institute'] = panel_info['institute'] panel_obj['version'] = float(panel_info['version']) try: panel_obj['date'] = panel_info['date'] except KeyError as err: raise KeyError("Panel has to have a date") panel_obj['display_name'] = panel_info.get('display_name', panel_obj['panel_name']) gene_objs = [] fail = False for gene_info in panel_info.get('genes', []): try: gene_obj = build_gene(gene_info, adapter) gene_objs.append(gene_obj) except IntegrityError as err: LOG.warning(err) fail=True if fail: raise IntegrityError("Some genes did not exist in database. Please see log messages.") panel_obj['genes'] = gene_objs return panel_obj
Export variants which have been verified for an institute and write them to an excel file.
def verified(context, collaborator, test, outpath=None): """Export variants which have been verified for an institute and write them to an excel file. Args: collaborator(str): institute id test(bool): True if the function is called for testing purposes outpath(str): path to output file Returns: written_files(int): number of written or simulated files """ written_files = 0 collaborator = collaborator or 'cust000' LOG.info('Exporting verified variants for cust {}'.format(collaborator)) adapter = context.obj['adapter'] verified_vars = adapter.verified(institute_id=collaborator) LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator)) if not verified_vars: LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator)) return None document_lines = export_verified_variants(verified_vars) today = datetime.datetime.now().strftime('%Y-%m-%d') document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx' # If this was a test and lines are created return success if test and document_lines: written_files +=1 LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines))) return written_files # create workbook and new sheet # set up outfolder if not outpath: outpath = str(os.getcwd()) workbook = Workbook(os.path.join(outpath,document_name)) Report_Sheet = workbook.add_worksheet() # Write the column header row = 0 for col,field in enumerate(VERIFIED_VARIANTS_HEADER): Report_Sheet.write(row,col,field) # Write variant lines, after header (start at line 1) for row, line in enumerate(document_lines,1): # each line becomes a row in the document for col, field in enumerate(line): # each field in line becomes a cell Report_Sheet.write(row,col,field) workbook.close() if os.path.exists(os.path.join(outpath,document_name)): LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines))) written_files += 1 return written_files
Export causatives for a collaborator in. vcf format
def variants(context, collaborator, document_id, case_id, json): """Export causatives for a collaborator in .vcf format""" LOG.info("Running scout export variants") adapter = context.obj['adapter'] collaborator = collaborator or 'cust000' variants = export_variants( adapter, collaborator, document_id=document_id, case_id=case_id ) if json: click.echo(dumps([var for var in variants])) return vcf_header = VCF_HEADER #If case_id is given, print more complete vcf entries, with INFO, #and genotypes if case_id: vcf_header[-1] = vcf_header[-1] + "\tFORMAT" case_obj = adapter.case(case_id=case_id) for individual in case_obj['individuals']: vcf_header[-1] = vcf_header[-1] + "\t" + individual['individual_id'] #print header for line in vcf_header: click.echo(line) for variant_obj in variants: variant_string = get_vcf_entry(variant_obj, case_id=case_id) click.echo(variant_string)
Get vcf entry from variant object
def get_vcf_entry(variant_obj, case_id=None): """ Get vcf entry from variant object Args: variant_obj(dict) Returns: variant_string(str): string representing variant in vcf format """ if variant_obj['category'] == 'snv': var_type = 'TYPE' else: var_type = 'SVTYPE' info_field = ';'.join( [ 'END='+str(variant_obj['end']), var_type+'='+variant_obj['sub_category'].upper() ] ) variant_string = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}".format( variant_obj['chromosome'], variant_obj['position'], variant_obj['dbsnp_id'], variant_obj['reference'], variant_obj['alternative'], variant_obj['quality'], ';'.join(variant_obj['filters']), info_field ) if case_id: variant_string += "\tGT" for sample in variant_obj['samples']: variant_string += "\t" + sample['genotype_call'] return variant_string
Start the web server.
def serve(context, config, host, port, debug, livereload): """Start the web server.""" pymongo_config = dict( MONGO_HOST=context.obj['host'], MONGO_PORT=context.obj['port'], MONGO_DBNAME=context.obj['mongodb'], MONGO_USERNAME=context.obj['username'], MONGO_PASSWORD=context.obj['password'], ) valid_connection = check_connection( host=pymongo_config['MONGO_HOST'], port=pymongo_config['MONGO_PORT'], username=pymongo_config['MONGO_USERNAME'], password=pymongo_config['MONGO_PASSWORD'], authdb=context.obj['authdb'], ) log.info("Test if mongod is running") if not valid_connection: log.warning("Connection could not be established") log.info("Is mongod running?") context.abort() config = os.path.abspath(config) if config else None app = create_app(config=pymongo_config, config_file=config) if livereload: server = Server(app.wsgi_app) server.serve(host=host, port=port, debug=debug) else: app.run(host=host, port=port, debug=debug)
Generate an md5 - key from a list of arguments.
def generate_md5_key(list_of_arguments): """ Generate an md5-key from a list of arguments. Args: list_of_arguments: A list of strings Returns: A md5-key object generated from the list of strings. """ for arg in list_of_arguments: if not isinstance(arg, string_types): raise SyntaxError("Error in generate_md5_key: " "Argument: {0} is a {1}".format(arg, type(arg))) hash = hashlib.md5() hash.update(' '.join(list_of_arguments).encode('utf-8')) return hash.hexdigest()
Setup via Flask.
def init_app(self, app): """Setup via Flask.""" host = app.config.get('MONGO_HOST', 'localhost') port = app.config.get('MONGO_PORT', 27017) dbname = app.config['MONGO_DBNAME'] log.info("connecting to database: %s:%s/%s", host, port, dbname) self.setup(app.config['MONGO_DATABASE'])
Setup connection to database.
def setup(self, database): """Setup connection to database.""" self.db = database self.hgnc_collection = database.hgnc_gene self.user_collection = database.user self.whitelist_collection = database.whitelist self.institute_collection = database.institute self.event_collection = database.event self.case_collection = database.case self.panel_collection = database.gene_panel self.hpo_term_collection = database.hpo_term self.disease_term_collection = database.disease_term self.variant_collection = database.variant self.acmg_collection = database.acmg self.clinvar_collection = database.clinvar self.clinvar_submission_collection = database.clinvar_submission self.exon_collection = database.exon self.transcript_collection = database.transcript
Create indexes for the database
def index(context, update): """Create indexes for the database""" LOG.info("Running scout index") adapter = context.obj['adapter'] if update: adapter.update_indexes() else: adapter.load_indexes()
Setup a scout database.
def database(context, institute_name, user_name, user_mail, api_key): """Setup a scout database.""" LOG.info("Running scout setup database") # Fetch the omim information api_key = api_key or context.obj.get('omim_api_key') if not api_key: LOG.warning("Please provide a omim api key with --api-key") context.abort() institute_name = institute_name or context.obj['institute_name'] user_name = user_name or context.obj['user_name'] user_mail = user_mail or context.obj['user_mail'] adapter = context.obj['adapter'] LOG.info("Setting up database %s", context.obj['mongodb']) setup_scout( adapter=adapter, institute_id=institute_name, user_name=user_name, user_mail = user_mail, api_key=api_key )
Setup a scout demo instance. This instance will be populated with a case a gene panel and some variants.
def demo(context): """Setup a scout demo instance. This instance will be populated with a case, a gene panel and some variants. """ LOG.info("Running scout setup demo") institute_name = context.obj['institute_name'] user_name = context.obj['user_name'] user_mail = context.obj['user_mail'] adapter = context.obj['adapter'] LOG.info("Setting up database %s", context.obj['mongodb']) setup_scout( adapter=adapter, institute_id=institute_name, user_name=user_name, user_mail = user_mail, demo=True )
Setup scout instances.
def setup(context, institute, user_mail, user_name): """ Setup scout instances. """ context.obj['institute_name'] = institute context.obj['user_name'] = user_name context.obj['user_mail'] = user_mail if context.invoked_subcommand == 'demo': # Update context.obj settings here LOG.debug("Change database name to scout-demo") context.obj['mongodb'] = 'scout-demo' LOG.info("Setting database name to %s", context.obj['mongodb']) LOG.debug("Setting host to %s", context.obj['host']) LOG.debug("Setting port to %s", context.obj['port']) try: client = get_connection( host=context.obj['host'], port=context.obj['port'], username=context.obj['username'], password=context.obj['password'], mongodb = context.obj['mongodb'] ) except ConnectionFailure: context.abort() LOG.info("connecting to database %s", context.obj['mongodb']) database = client[context.obj['mongodb']] LOG.info("Test if mongod is running") try: LOG.info("Test if mongod is running") database.test.find_one() except ServerSelectionTimeoutError as err: LOG.warning("Connection could not be established") LOG.warning("Please check if mongod is running") context.abort() LOG.info("Setting up a mongo adapter") mongo_adapter = MongoAdapter(database) context.obj['adapter'] = mongo_adapter
Show all institutes in the database
def institutes(context, institute_id, json): """Show all institutes in the database""" LOG.info("Running scout view institutes") adapter = context.obj['adapter'] if institute_id: institute_objs = [] institute_obj = adapter.institute(institute_id) if not institute_obj: LOG.info("Institute %s does not exost", institute_id) return institute_objs.append(institute_obj) else: institute_objs = [ins_obj for ins_obj in adapter.institutes()] if len(institute_objs) == 0: click.echo("No institutes found") context.abort() header = '' if not json: for key in institute_objs[0].keys(): header = header + "{0}\t".format(key) click.echo(header) for institute_obj in institute_objs: if json: click.echo(institute_obj) continue row = '' for value in institute_obj.values(): row = row + "{0}\t".format(value) click.echo(row)
Parse the genetic models entry of a vcf
def parse_genetic_models(models_info, case_id): """Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list) """ genetic_models = [] if models_info: for family_info in models_info.split(','): splitted_info = family_info.split(':') if splitted_info[0] == case_id: genetic_models = splitted_info[1].split('|') return genetic_models
Show all gene panels in the database
def panels(context, institute): """Show all gene panels in the database""" LOG.info("Running scout view panels") adapter = context.obj['adapter'] panel_objs = adapter.gene_panels(institute_id=institute) if panel_objs.count() == 0: LOG.info("No panels found") context.abort() click.echo("#panel_name\tversion\tnr_genes\tdate") for panel_obj in panel_objs: click.echo("{0}\t{1}\t{2}\t{3}".format( panel_obj['panel_name'], str(panel_obj['version']), len(panel_obj['genes']), str(panel_obj['date'].strftime('%Y-%m-%d')) ))
Add a institute to the database
def add_institute(self, institute_obj): """Add a institute to the database Args: institute_obj(Institute) """ internal_id = institute_obj['internal_id'] display_name = institute_obj['internal_id'] # Check if institute already exists if self.institute(institute_id=internal_id): raise IntegrityError("Institute {0} already exists in database" .format(display_name)) LOG.info("Adding institute with internal_id: {0} and " "display_name: {1}".format(internal_id, display_name)) insert_info = self.institute_collection.insert_one(institute_obj) ##TODO check if insert info was ok LOG.info("Institute saved")
Update the information for an institute
def update_institute(self, internal_id, sanger_recipient=None, coverage_cutoff=None, frequency_cutoff=None, display_name=None, remove_sanger=None, phenotype_groups=None, group_abbreviations=None, add_groups=None): """Update the information for an institute Args: internal_id(str): The internal institute id sanger_recipient(str): Email adress to add for sanger order coverage_cutoff(int): Update coverage cutoff frequency_cutoff(float): New frequency cutoff display_name(str): New display name remove_sanger(str): Email adress for sanger user to be removed phenotype_groups(iterable(str)): New phenotype groups group_abbreviations(iterable(str)) add_groups: If groups should be added. If False replace groups Returns: updated_institute(dict) """ add_groups = add_groups or False institute_obj = self.institute(internal_id) if not institute_obj: raise IntegrityError("Institute {} does not exist in database".format(internal_id)) updates = {} updated_institute = institute_obj if sanger_recipient: user_obj = self.user(sanger_recipient) if not user_obj: raise IntegrityError("user {} does not exist in database".format(sanger_recipient)) LOG.info("Updating sanger recipients for institute: {0} with {1}".format( internal_id, sanger_recipient)) updates['$push'] = {'sanger_recipients':remove_sanger} if remove_sanger: LOG.info("Removing sanger recipient {0} from institute: {1}".format( remove_sanger, internal_id)) updates['$pull'] = {'sanger_recipients':remove_sanger} if coverage_cutoff: LOG.info("Updating coverage cutoff for institute: {0} to {1}".format( internal_id, coverage_cutoff)) updates['$set'] = {'coverage_cutoff': coverage_cutoff} if frequency_cutoff: LOG.info("Updating frequency cutoff for institute: {0} to {1}".format( internal_id, frequency_cutoff)) if not '$set' in updates: updates['$set'] = {} updates['$set'] = {'frequency_cutoff': frequency_cutoff} if display_name: LOG.info("Updating display name for institute: {0} to {1}".format( internal_id, display_name)) if not '$set' in updates: updates['$set'] = {} updates['$set'] = {'display_name': display_name} if phenotype_groups: if group_abbreviations: group_abbreviations = list(group_abbreviations) existing_groups = {} if add_groups: existing_groups = institute_obj.get('phenotype_groups', PHENOTYPE_GROUPS) for i,hpo_term in enumerate(phenotype_groups): hpo_obj = self.hpo_term(hpo_term) if not hpo_obj: raise IntegrityError("Term {} does not exist".format(hpo_term)) hpo_id = hpo_obj['hpo_id'] description = hpo_obj['description'] abbreviation = None if group_abbreviations: abbreviation = group_abbreviations[i] existing_groups[hpo_term] = {'name': description, 'abbr':abbreviation} updates['$set'] = {'phenotype_groups': existing_groups} if updates: if not '$set' in updates: updates['$set'] = {} updates['$set']['updated_at'] = datetime.now() updated_institute = self.institute_collection.find_one_and_update( {'_id':internal_id}, updates, return_document = pymongo.ReturnDocument.AFTER) LOG.info("Institute updated") return updated_institute
Featch a single institute from the backend
def institute(self, institute_id): """Featch a single institute from the backend Args: institute_id(str) Returns: Institute object """ LOG.debug("Fetch institute {}".format(institute_id)) institute_obj = self.institute_collection.find_one({ '_id': institute_id }) if institute_obj is None: LOG.debug("Could not find institute {0}".format(institute_id)) return institute_obj
Fetch all institutes. Args: institute_ids ( list ( str )) Returns: res ( pymongo. Cursor )
def institutes(self, institute_ids=None): """Fetch all institutes. Args: institute_ids(list(str)) Returns: res(pymongo.Cursor) """ query = {} if institute_ids: query['_id'] = {'$in': institute_ids} LOG.debug("Fetching all institutes") return self.institute_collection.find(query)
Check if a string is a valid date
def match_date(date): """Check if a string is a valid date Args: date(str) Returns: bool """ date_pattern = re.compile("^(19|20)\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])") if re.match(date_pattern, date): return True return False
Return a datetime object if there is a valid date
def get_date(date, date_format = None): """Return a datetime object if there is a valid date Raise exception if date is not valid Return todays date if no date where added Args: date(str) date_format(str) Returns: date_obj(datetime.datetime) """ date_obj = datetime.datetime.now() if date: if date_format: date_obj = datetime.datetime.strptime(date, date_format) else: if match_date(date): if len(date.split('-')) == 3: date = date.split('-') elif len(date.split(' ')) == 3: date = date.split(' ') elif len(date.split('.')) == 3: date = date.split('.') else: date = date.split('/') date_obj = datetime.datetime(*(int(number) for number in date)) else: raise ValueError("Date %s is invalid" % date) return date_obj
Export a list of genes based on hpo terms
def hpo_genes(context, hpo_term): """Export a list of genes based on hpo terms""" LOG.info("Running scout export hpo_genes") adapter = context.obj['adapter'] header = ["#Gene_id\tCount"] if not hpo_term: LOG.warning("Please use at least one hpo term") context.abort() for line in header: click.echo(line) for term in adapter.generate_hpo_gene_list(*hpo_term): click.echo("{0}\t{1}".format(term[0], term[1]))
Parse transcript information and get the gene information from there. Use hgnc_id as identifier for genes and ensembl transcript id to identify transcripts Args: transcripts ( iterable ( dict ))
def parse_genes(transcripts): """Parse transcript information and get the gene information from there. Use hgnc_id as identifier for genes and ensembl transcript id to identify transcripts Args: transcripts(iterable(dict)) Returns: genes (list(dict)): A list with dictionaries that represents genes """ # Dictionary to group the transcripts by hgnc_id genes_to_transcripts = {} # List with all genes and there transcripts genes = [] hgvs_identifier = None canonical_transcript = None exon = None # Group all transcripts by gene for transcript in transcripts: # Check what hgnc_id a transcript belongs to hgnc_id = transcript['hgnc_id'] hgnc_symbol = transcript['hgnc_symbol'] if (transcript['is_canonical'] and transcript.get('coding_sequence_name')): hgvs_identifier = transcript.get('coding_sequence_name') canonical_transcript = transcript['transcript_id'] exon = transcript['exon'] # If there is a identifier we group the transcripts under gene if hgnc_id: if hgnc_id in genes_to_transcripts: genes_to_transcripts[hgnc_id].append(transcript) else: genes_to_transcripts[hgnc_id] = [transcript] else: if hgnc_symbol: if hgnc_symbol in genes_to_transcripts: genes_to_transcripts[hgnc_symbol].append(transcript) else: genes_to_transcripts[hgnc_symbol] = [transcript] # We need to find out the most severe consequence in all transcripts # and save in what transcript we found it # Loop over all genes for gene_id in genes_to_transcripts: # Get the transcripts for a gene gene_transcripts = genes_to_transcripts[gene_id] # This will be a consequece from SO_TERMS most_severe_consequence = None # Set the most severe score to infinity most_severe_rank = float('inf') # The most_severe_transcript is a dict most_severe_transcript = None most_severe_region = None most_severe_sift = None most_severe_polyphen = None # Loop over all transcripts for a gene to check which is most severe for transcript in gene_transcripts: hgnc_id = transcript['hgnc_id'] hgnc_symbol = transcript['hgnc_symbol'] # Loop over the consequences for a transcript for consequence in transcript['functional_annotations']: # Get the rank based on SO_TERM # Lower rank is worse new_rank = SO_TERMS[consequence]['rank'] if new_rank < most_severe_rank: # If a worse consequence is found, update the parameters most_severe_rank = new_rank most_severe_consequence = consequence most_severe_transcript = transcript most_severe_sift = transcript['sift_prediction'] most_severe_polyphen = transcript['polyphen_prediction'] most_severe_region = SO_TERMS[consequence]['region'] gene = { 'transcripts': gene_transcripts, 'most_severe_transcript': most_severe_transcript, 'most_severe_consequence': most_severe_consequence, 'most_severe_sift': most_severe_sift, 'most_severe_polyphen': most_severe_polyphen, 'hgnc_id': hgnc_id, 'hgnc_symbol': hgnc_symbol, 'region_annotation': most_severe_region, 'hgvs_identifier': transcript['coding_sequence_name'], 'canonical_transcript': transcript['transcript_id'], 'exon': transcript['exon'], } genes.append(gene) return genes
Parse the rank score
def parse_rank_score(rank_score_entry, case_id): """Parse the rank score Args: rank_score_entry(str): The raw rank score entry case_id(str) Returns: rank_score(float) """ rank_score = None if rank_score_entry: for family_info in rank_score_entry.split(','): splitted_info = family_info.split(':') if case_id == splitted_info[0]: rank_score = float(splitted_info[1]) return rank_score
Add a user to the database.
def user(context, institute_id, user_name, user_mail, admin): """Add a user to the database.""" adapter = context.obj['adapter'] institutes = [] for institute in institute_id: institute_obj = adapter.institute(institute_id=institute) if not institute_obj: LOG.warning("Institute % does not exist", institute) context.abort() institutes.append(institute) roles = [] if admin: LOG.info("User is admin") roles.append('admin') user_info = dict(email=user_mail.lower(), name=user_name, roles=roles, institutes=institutes) user_obj = build_user(user_info) try: adapter.add_user(user_obj) except Exception as err: LOG.warning(err) context.abort()
Parse transcript information from VCF variants
def parse_transcripts(raw_transcripts, allele=None): """Parse transcript information from VCF variants Args: raw_transcripts(iterable(dict)): An iterable with raw transcript information Yields: transcript(dict) A dictionary with transcript information """ for entry in raw_transcripts: transcript = {} # There can be several functional annotations for one variant functional_annotations = entry.get('CONSEQUENCE', '').split('&') transcript['functional_annotations'] = functional_annotations # Get the transcript id (ensembl gene id) transcript_id = entry.get('FEATURE', '').split(':')[0] transcript['transcript_id'] = transcript_id # Add the hgnc gene identifiers # The HGNC ID is prefered and will be used if it exists hgnc_id = entry.get('HGNC_ID') if hgnc_id: hgnc_id = hgnc_id.split(':')[-1] transcript['hgnc_id'] = int(hgnc_id) else: transcript['hgnc_id'] = None hgnc_symbol = entry.get('SYMBOL') if hgnc_symbol: transcript['hgnc_symbol'] = hgnc_symbol else: transcript['hgnc_symbol'] = None ########### Fill it with the available information ########### ### Protein specific annotations ### ## Protein ID ## transcript['protein_id'] = entry.get('ENSP') ## Polyphen prediction ## polyphen_prediction = entry.get('POLYPHEN') # Default is 'unknown' prediction_term = 'unknown' if polyphen_prediction: prediction_term = polyphen_prediction.split('(')[0] transcript['polyphen_prediction'] = prediction_term ## Sift prediction ## # Check with other key if it does not exist sift_prediction = entry.get('SIFT') # Default is 'unknown' prediction_term = 'unknown' if not sift_prediction: sift_prediction = entry.get('SIFT_PRED') if sift_prediction: prediction_term = sift_prediction.split('(')[0] transcript['sift_prediction'] = prediction_term transcript['swiss_prot'] = entry.get('SWISSPROT') or 'unknown' if entry.get('DOMAINS', None): pfam_domains = entry['DOMAINS'].split('&') for annotation in pfam_domains: annotation = annotation.split(':') domain_name = annotation[0] domain_id = annotation[1] if domain_name == 'Pfam_domain': transcript['pfam_domain'] = domain_id elif domain_name == 'PROSITE_profiles': transcript['prosite_profile'] = domain_id elif domain_name == 'SMART_domains': transcript['smart_domain'] = domain_id coding_sequence_entry = entry.get('HGVSC', '').split(':') protein_sequence_entry = entry.get('HGVSP', '').split(':') coding_sequence_name = None if len(coding_sequence_entry) > 1: coding_sequence_name = coding_sequence_entry[-1] transcript['coding_sequence_name'] = coding_sequence_name protein_sequence_name = None if len(protein_sequence_entry) > 1: protein_sequence_name = protein_sequence_entry[-1] transcript['protein_sequence_name'] = protein_sequence_name transcript['biotype'] = entry.get('BIOTYPE') transcript['exon'] = entry.get('EXON') transcript['intron'] = entry.get('INTRON') if entry.get('STRAND'): if entry['STRAND'] == '1': transcript['strand'] = '+' elif entry['STRAND'] == '-1': transcript['strand'] = '-' else: transcript['strand'] = None functional = [] regional = [] for annotation in functional_annotations: functional.append(annotation) regional.append(SO_TERMS[annotation]['region']) transcript['functional_annotations'] = functional transcript['region_annotations'] = regional # Check if the transcript is marked cannonical by vep transcript['is_canonical'] = (entry.get('CANONICAL') == 'YES') # Check if the CADD score is available on transcript level cadd_phred = entry.get('CADD_PHRED') if cadd_phred: transcript['cadd'] = float(cadd_phred) # Check frequencies # There are different keys for different versions of VEP # We only support version 90+ thousandg_freqs = [] gnomad_freqs = [] try: # The keys for VEP v90+: # 'AF' or '1000GAF' - 1000G all populations combined # 'xxx_AF' - 1000G (or NHLBI-ESP) individual populations # 'gnomAD_AF' - gnomAD exomes, all populations combined # 'gnomAD_xxx_AF' - gnomAD exomes, individual populations # 'MAX_AF' - Max of all populations (1000G, gnomAD exomes, ESP) # https://www.ensembl.org/info/docs/tools/vep/vep_formats.html # Loop over all keys to find frequency entries for key in entry: #All frequencies endswith AF if not key.endswith('AF'): continue value = entry[key] if not value: continue # This is the 1000G max af information if (key == 'AF' or key == '1000GAF'): transcript['thousand_g_maf'] = float(value) continue if key == 'GNOMAD_AF': transcript['gnomad_maf'] = float(value) continue if key == 'EXAC_MAX_AF': transcript['exac_max'] = float(value) transcript['exac_maf'] = float(value) continue if 'GNOMAD' in key: gnomad_freqs.append(float(value)) else: thousandg_freqs.append(float(value)) if thousandg_freqs: transcript['thousandg_max'] = max(thousandg_freqs) if gnomad_freqs: transcript['gnomad_max'] = max(gnomad_freqs) except Exception as err: LOG.debug("Something went wrong when parsing frequencies") LOG.debug("Only splitted and normalised VEP v90+ is supported") clinsig = entry.get('CLIN_SIG') if clinsig: transcript['clinsig'] = clinsig.split('&') transcript['dbsnp'] = [] transcript['cosmic'] = [] variant_ids = entry.get('EXISTING_VARIATION') if variant_ids: for variant_id in variant_ids.split('&'): if variant_id.startswith('rs'): transcript['dbsnp'].append(variant_id) elif variant_id.startswith('COSM'): transcript['cosmic'].append(int(variant_id[4:])) yield transcript
Check if a connection could be made to the mongo process specified
def check_connection(host='localhost', port=27017, username=None, password=None, authdb=None, max_delay=1): """Check if a connection could be made to the mongo process specified Args: host(str) port(int) username(str) password(str) authdb (str): database to to for authentication max_delay(int): Number of milliseconds to wait for connection Returns: bool: If connection could be established """ #uri looks like: #mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]] if username and password: uri = ("mongodb://{}:{}@{}:{}/{}" .format(quote_plus(username), quote_plus(password), host, port, authdb)) log_uri = ("mongodb://{}:****@{}:{}/{}" .format(quote_plus(username), host, port, authdb)) else: log_uri = uri = "mongodb://%s:%s" % (host, port) LOG.info("Test connection with uri: %s", log_uri) client = MongoClient(uri, serverSelectionTimeoutMS=max_delay) try: client.server_info() except (ServerSelectionTimeoutError,OperationFailure) as err: LOG.warning(err) return False return True
Initialize from flask
def init_app(self, app): """Initialize from flask""" uri = app.config.get("MONGO_URI", None) db_name = app.config.get("MONGO_DBNAME", 'scout') try: client = get_connection( host = app.config.get("MONGO_HOST", 'localhost'), port=app.config.get("MONGO_PORT", 27017), username=app.config.get("MONGO_USERNAME", None), password=app.config.get("MONGO_PASSWORD", None), uri=uri, mongodb= db_name ) except ConnectionFailure: context.abort() app.config["MONGO_DATABASE"] = client[db_name] app.config['MONGO_CLIENT'] = client
Display a list of all user institutes.
def institutes(): """Display a list of all user institutes.""" institute_objs = user_institutes(store, current_user) institutes = [] for ins_obj in institute_objs: sanger_recipients = [] for user_mail in ins_obj.get('sanger_recipients',[]): user_obj = store.user(user_mail) if not user_obj: continue sanger_recipients.append(user_obj['name']) institutes.append( { 'display_name': ins_obj['display_name'], 'internal_id': ins_obj['_id'], 'coverage_cutoff': ins_obj.get('coverage_cutoff', 'None'), 'sanger_recipients': sanger_recipients, 'frequency_cutoff': ins_obj.get('frequency_cutoff', 'None'), 'phenotype_groups': ins_obj.get('phenotype_groups', PHENOTYPE_GROUPS) } ) data = dict(institutes=institutes) return render_template( 'overview/institutes.html', **data)
Load a delivery report into a case in the database
def load_delivery_report(adapter: MongoAdapter, report_path: str, case_id: str, update: bool = False): """ Load a delivery report into a case in the database If the report already exists the function will exit. If the user want to load a report that is already in the database 'update' has to be 'True' Args: adapter (MongoAdapter): Connection to the database report_path (string): Path to delivery report case_id (string): Optional case identifier update (bool): If an existing report should be replaced Returns: updated_case(dict) """ case_obj = adapter.case( case_id=case_id, ) if case_obj is None: raise DataNotFoundError("no case found") if not case_obj.get('delivery_report'): _put_report_in_case_root(case_obj, report_path) else: if update: _put_report_in_case_root(case_obj, report_path) else: raise IntegrityError('Existing delivery report found, use update = True to ' 'overwrite') logger.info('Saving report for case {} in database'.format(case_obj['_id'])) return adapter.replace_case(case_obj)
Build a transcript object These represents the transcripts that are parsed from the VCF not the transcript definitions that are collected from ensembl. Args: transcript ( dict ): Parsed transcript information Returns: transcript_obj ( dict )
def build_transcript(transcript, build='37'): """Build a transcript object These represents the transcripts that are parsed from the VCF, not the transcript definitions that are collected from ensembl. Args: transcript(dict): Parsed transcript information Returns: transcript_obj(dict) """ # Transcripts has to have an id transcript_id = transcript['transcript_id'] transcript_obj = dict( transcript_id = transcript_id ) # Transcripts has to belong to a gene transcript_obj['hgnc_id'] = transcript['hgnc_id'] if transcript.get('protein_id'): transcript_obj['protein_id'] = transcript['protein_id'] if transcript.get('sift_prediction'): transcript_obj['sift_prediction'] = transcript['sift_prediction'] if transcript.get('polyphen_prediction'): transcript_obj['polyphen_prediction'] = transcript['polyphen_prediction'] if transcript.get('swiss_prot'): transcript_obj['swiss_prot'] = transcript['swiss_prot'] if transcript.get('pfam_domain'): transcript_obj['pfam_domain'] = transcript.get('pfam_domain') if transcript.get('prosite_profile'): transcript_obj['prosite_profile'] = transcript.get('prosite_profile') if transcript.get('smart_domain'): transcript_obj['smart_domain'] = transcript.get('smart_domain') if transcript.get('biotype'): transcript_obj['biotype'] = transcript.get('biotype') if transcript.get('functional_annotations'): transcript_obj['functional_annotations'] = transcript['functional_annotations'] if transcript.get('region_annotations'): transcript_obj['region_annotations'] = transcript['region_annotations'] if transcript.get('exon'): transcript_obj['exon'] = transcript.get('exon') if transcript.get('intron'): transcript_obj['intron'] = transcript.get('intron') if transcript.get('strand'): transcript_obj['strand'] = transcript.get('strand') if transcript.get('coding_sequence_name'): transcript_obj['coding_sequence_name'] = transcript['coding_sequence_name'] if transcript.get('protein_sequence_name'): transcript_obj['protein_sequence_name'] = transcript['protein_sequence_name'] transcript_obj['is_canonical'] = transcript.get('is_canonical', False) return transcript_obj
Update an existing user. Args: user_obj ( dict ) Returns: updated_user ( dict )
def update_user(self, user_obj): """Update an existing user. Args: user_obj(dict) Returns: updated_user(dict) """ LOG.info("Updating user %s", user_obj['_id']) updated_user = self.user_collection.find_one_and_replace( {'_id': user_obj['_id']}, user_obj, return_document=pymongo.ReturnDocument.AFTER ) return updated_user
Add a user object to the database
def add_user(self, user_obj): """Add a user object to the database Args: user_obj(scout.models.User): A dictionary with user information Returns: user_info(dict): a copy of what was inserted """ LOG.info("Adding user %s to the database", user_obj['email']) if not '_id' in user_obj: user_obj['_id'] = user_obj['email'] try: self.user_collection.insert_one(user_obj) LOG.debug("User inserted") except DuplicateKeyError as err: raise IntegrityError("User {} already exists in database".format(user_obj['email'])) return user_obj
Return all users from the database Args: institute ( str ): A institute_id Returns: res ( pymongo. Cursor ): A cursor with users
def users(self, institute=None): """Return all users from the database Args: institute(str): A institute_id Returns: res(pymongo.Cursor): A cursor with users """ query = {} if institute: LOG.info("Fetching all users from institute %s", institute) query = {'institutes': {'$in': [institute]}} else: LOG.info("Fetching all users") res = self.user_collection.find(query) return res
Fetch a user from the database. Args: email ( str ) Returns: user_obj ( dict )
def user(self, email): """Fetch a user from the database. Args: email(str) Returns: user_obj(dict) """ LOG.info("Fetching user %s", email) user_obj = self.user_collection.find_one({'_id': email}) return user_obj
Delete a user from the database Args: email ( str ) Returns: user_obj ( dict )
def delete_user(self, email): """Delete a user from the database Args: email(str) Returns: user_obj(dict) """ LOG.info("Deleting user %s", email) user_obj = self.user_collection.delete_one({'_id': email}) return user_obj
Build a compound Args: compound ( dict ) Returns: compound_obj ( dict ) dict ( # This must be the document_id for this variant variant = str # required = True # This is the variant id display_name = str # required combined_score = float # required rank_score = float not_loaded = bool genes = [ { hgnc_id: int hgnc_symbol: str region_annotation: str functional_annotation: str }... ] )
def build_compound(compound): """Build a compound Args: compound(dict) Returns: compound_obj(dict) dict( # This must be the document_id for this variant variant = str, # required=True # This is the variant id display_name = str, # required combined_score = float, # required rank_score = float, not_loaded = bool genes = [ { hgnc_id: int, hgnc_symbol: str, region_annotation: str, functional_annotation:str }, ... ] ) """ compound_obj = dict( variant = compound['variant'], display_name = compound['display_name'], combined_score = float(compound['score']) ) return compound_obj
Stream * large * static files with special requirements.
def remote_static(): """Stream *large* static files with special requirements.""" file_path = request.args.get('file') range_header = request.headers.get('Range', None) if not range_header and file_path.endswith('.bam'): return abort(500) new_resp = send_file_partial(file_path) return new_resp
Visualize BAM alignments.
def pileup(): """Visualize BAM alignments.""" vcf_file = request.args.get('vcf') bam_files = request.args.getlist('bam') bai_files = request.args.getlist('bai') samples = request.args.getlist('sample') alignments = [{'bam': bam, 'bai': bai, 'sample': sample} for bam, bai, sample in zip(bam_files, bai_files, samples)] position = { 'contig': request.args['contig'], 'start': request.args['start'], 'stop': request.args['stop'] } genome = current_app.config.get('PILEUP_GENOME') if genome: if not os.path.isfile(genome): flash("The pilup genome path ({}) provided does not exist".format(genome)) genome = None LOG.debug("Use pileup genome %s", genome) exons = current_app.config.get('PILEUP_EXONS') if exons: if not os.path.isfile(exons): flash("The pilup exons path ({}) provided does not exist".format(exons)) genome = None LOG.debug("Use pileup exons %s", exons) LOG.debug("View alignment for positions Chrom:{0}, Start:{1}, End: {2}".format( position['contig'], position['start'], position['stop'])) LOG.debug("Use alignment files {}".format(alignments)) return render_template('alignviewers/pileup.html', alignments=alignments, position=position, vcf_file=vcf_file, genome=genome, exons=exons)
Visualize BAM alignments using igv. js ( https:// github. com/ igvteam/ igv. js )
def igv(): """Visualize BAM alignments using igv.js (https://github.com/igvteam/igv.js)""" chrom = request.args.get('contig') if chrom == 'MT': chrom = 'M' start = request.args.get('start') stop = request.args.get('stop') locus = "chr{0}:{1}-{2}".format(chrom,start,stop) LOG.debug('Displaying locus %s', locus) chromosome_build = request.args.get('build') LOG.debug('Chromosome build is %s', chromosome_build) samples = request.args.getlist('sample') bam_files = request.args.getlist('bam') bai_files = request.args.getlist('bai') LOG.debug('loading the following tracks: %s', bam_files) display_obj={} # Add chromosome build info to the track object fastaURL = '' indexURL = '' cytobandURL = '' gene_track_format = '' gene_track_URL = '' gene_track_indexURL = '' if chromosome_build == "GRCh38" or chrom == 'M': fastaURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa' indexURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa.fai' cytobandURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/cytoBandIdeo.txt' gene_track_format = 'gtf' gene_track_URL = 'https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz' gene_track_indexURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz.tbi' else: fastaURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta' indexURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta.fai' cytobandURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/cytoBand.txt' gene_track_format = 'bed' gene_track_URL = 'https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz' gene_track_indexURL = 'https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz.tbi' display_obj['reference_track'] = { 'fastaURL' : fastaURL, 'indexURL' : indexURL, 'cytobandURL' : cytobandURL } display_obj['genes_track'] = { 'name' : 'Genes', 'type' : 'annotation', 'format': gene_track_format, 'sourceType': 'file', 'url' : gene_track_URL, 'indexURL' : gene_track_indexURL, 'displayMode' : 'EXPANDED' } sample_tracks = [] counter = 0 for sample in samples: # some samples might not have an associated bam file, take care if this if bam_files[counter]: sample_tracks.append({ 'name' : sample, 'url' : bam_files[counter], 'indexURL' : bai_files[counter], 'height' : 700, 'maxHeight' : 2000}) counter += 1 display_obj['sample_tracks'] = sample_tracks if request.args.get('center_guide'): display_obj['display_center_guide'] = True else: display_obj['display_center_guide'] = False return render_template('alignviewers/igv_viewer.html', locus=locus, **display_obj )
Build a disease phenotype object Args: disease_info ( dict ): Dictionary with phenotype information alias_genes ( dict ): { <alias_symbol >: { true: hgnc_id or None ids: [ <hgnc_id >... ] }} Returns: disease_obj ( dict ): Formated for mongodb disease_term = dict ( _id = str # Same as disease_id disease_id = str # required like OMIM: 600233 disase_nr = int # The disease nr required description = str # required source = str # required inheritance = list # list of strings genes = list # List with integers that are hgnc_ids hpo_terms = list # List with str that are hpo_terms )
def build_disease_term(disease_info, alias_genes={}): """Build a disease phenotype object Args: disease_info(dict): Dictionary with phenotype information alias_genes(dict): { <alias_symbol>: { 'true': hgnc_id or None, 'ids': [<hgnc_id>, ...]}} Returns: disease_obj(dict): Formated for mongodb disease_term = dict( _id = str, # Same as disease_id disease_id = str, # required, like OMIM:600233 disase_nr = int, # The disease nr, required description = str, # required source = str, # required inheritance = list, # list of strings genes = list, # List with integers that are hgnc_ids hpo_terms = list, # List with str that are hpo_terms ) """ try: disease_nr = int(disease_info['mim_number']) except KeyError: raise KeyError("Diseases has to have a disease number") except ValueError: raise KeyError("Diseases nr has to be integer") disease_id = "{0}:{1}".format('OMIM', disease_nr) LOG.debug("Building disease term %s", disease_id) try: description = disease_info['description'] except KeyError: raise KeyError("Diseases has to have a description") disease_obj = DiseaseTerm( disease_id=disease_id, disease_nr=disease_nr, description=description, source='OMIM' ) # Check if there where any inheritance information inheritance_models = disease_info.get('inheritance') if inheritance_models: disease_obj['inheritance'] = list(inheritance_models) hgnc_ids = set() for hgnc_symbol in disease_info.get('hgnc_symbols', []): ## TODO need to consider genome build here? if hgnc_symbol in alias_genes: # If the symbol identifies a unique gene we add that if alias_genes[hgnc_symbol]['true']: hgnc_ids.add(alias_genes[hgnc_symbol]['true']) else: for hgnc_id in alias_genes[hgnc_symbol]['ids']: hgnc_ids.add(hgnc_id) else: LOG.debug("Gene symbol %s could not be found in database", hgnc_symbol) disease_obj['genes'] = list(hgnc_ids) if 'hpo_terms' in disease_info: disease_obj['hpo_terms'] = list(disease_info['hpo_terms']) return disease_obj
Load all the exons Transcript information is from ensembl. Check that the transcript that the exon belongs to exists in the database
def load_exons(adapter, exon_lines, build='37', ensembl_genes=None): """Load all the exons Transcript information is from ensembl. Check that the transcript that the exon belongs to exists in the database Args: adapter(MongoAdapter) exon_lines(iterable): iterable with ensembl exon lines build(str) ensembl_transcripts(dict): Existing ensembl transcripts """ # Fetch all genes with ensemblid as keys ensembl_genes = ensembl_genes or adapter.ensembl_genes(build) hgnc_id_transcripts = adapter.id_transcripts_by_gene(build=build) if isinstance(exon_lines, DataFrame): exons = parse_ensembl_exon_request(exon_lines) nr_exons = exon_lines.shape[0] else: exons = parse_ensembl_exons(exon_lines) nr_exons = 1000000 start_insertion = datetime.now() loaded_exons = 0 LOG.info("Loading exons...") with progressbar(exons, label="Loading exons", length=nr_exons) as bar: for exon in bar: ensg_id = exon['gene'] enst_id = exon['transcript'] gene_obj = ensembl_genes.get(ensg_id) if not gene_obj: continue hgnc_id = gene_obj['hgnc_id'] if not enst_id in hgnc_id_transcripts[hgnc_id]: continue exon['hgnc_id'] = hgnc_id exon_obj = build_exon(exon, build) adapter.load_exon(exon_obj) loaded_exons += 1 LOG.info('Number of exons in build {0}: {1}'.format(build, nr_exons)) LOG.info('Number loaded: {0}'.format(loaded_exons)) LOG.info('Time to load exons: {0}'.format(datetime.now() - start_insertion))
Return a parsed variant
def parse_variant(variant, case, variant_type='clinical', rank_results_header=None, vep_header=None, individual_positions=None, category=None): """Return a parsed variant Get all the necessary information to build a variant object Args: variant(cyvcf2.Variant) case(dict) variant_type(str): 'clinical' or 'research' rank_results_header(list) vep_header(list) individual_positions(dict): Explain what position each individual has in vcf category(str): 'snv', 'sv', 'str' or 'cancer' Returns: parsed_variant(dict): Parsed variant """ # These are to display how the rank score is built rank_results_header = rank_results_header or [] # Vep information vep_header = vep_header or [] parsed_variant = {} # Create the ID for the variant case_id = case['_id'] if '-' in case_id: logger.debug('internal case id detected') genmod_key = case['display_name'] else: genmod_key = case['_id'] chrom_match = CHR_PATTERN.match(variant.CHROM) chrom = chrom_match.group(2) # Builds a dictionary with the different ids that are used if variant.ALT: alt=variant.ALT[0] elif not variant.ALT and category == "str": alt='.' parsed_variant['ids'] = parse_ids( chrom=chrom, pos=variant.POS, ref=variant.REF, alt=alt, case_id=case_id, variant_type=variant_type, ) parsed_variant['case_id'] = case_id # type can be 'clinical' or 'research' parsed_variant['variant_type'] = variant_type # category is sv or snv # cyvcf2 knows if it is a sv, indel or snv variant if not category: category = variant.var_type if category == 'indel': category = 'snv' if category == 'snp': category = 'snv' parsed_variant['category'] = category ################# General information ################# parsed_variant['reference'] = variant.REF ### We allways assume splitted and normalized vcfs!!! if len(variant.ALT) > 1: raise VcfError("Variants are only allowed to have one alternative") parsed_variant['alternative'] = alt # cyvcf2 will set QUAL to None if '.' in vcf parsed_variant['quality'] = variant.QUAL if variant.FILTER: parsed_variant['filters'] = variant.FILTER.split(';') else: parsed_variant['filters'] = ['PASS'] # Add the dbsnp ids parsed_variant['dbsnp_id'] = variant.ID # This is the id of other position in translocations # (only for specific svs) parsed_variant['mate_id'] = None ################# Position specific ################# parsed_variant['chromosome'] = chrom coordinates = parse_coordinates(variant, category) parsed_variant['position'] = coordinates['position'] parsed_variant['sub_category'] = coordinates['sub_category'] parsed_variant['mate_id'] = coordinates['mate_id'] parsed_variant['end'] = coordinates['end'] parsed_variant['length'] = coordinates['length'] parsed_variant['end_chrom'] = coordinates['end_chrom'] parsed_variant['cytoband_start'] = coordinates['cytoband_start'] parsed_variant['cytoband_end'] = coordinates['cytoband_end'] ################# Add rank score ################# # The rank score is central for displaying variants in scout. rank_score = parse_rank_score(variant.INFO.get('RankScore', ''), genmod_key) parsed_variant['rank_score'] = rank_score or 0 ################# Add gt calls ################# if individual_positions and case['individuals']: parsed_variant['samples'] = parse_genotypes(variant, case['individuals'], individual_positions) else: parsed_variant['samples'] = [] ################# Add compound information ################# compounds = parse_compounds(compound_info=variant.INFO.get('Compounds'), case_id=genmod_key, variant_type=variant_type) if compounds: parsed_variant['compounds'] = compounds ################# Add inheritance patterns ################# genetic_models = parse_genetic_models(variant.INFO.get('GeneticModels'), genmod_key) if genetic_models: parsed_variant['genetic_models'] = genetic_models ################# Add autozygosity calls if present ################# azlength = variant.INFO.get('AZLENGTH') if azlength: parsed_variant['azlength'] = int(azlength) azqual = variant.INFO.get('AZQUAL') if azqual: parsed_variant['azqual'] = float(azqual) ################ Add STR info if present ################ # repeat id generally corresponds to gene symbol repeat_id = variant.INFO.get('REPID') if repeat_id: parsed_variant['str_repid'] = str(repeat_id) # repeat unit - used e g in PanelApp naming of STRs repeat_unit = variant.INFO.get('RU') if repeat_unit: parsed_variant['str_ru'] = str(repeat_unit) # repeat ref - reference copy number repeat_ref = variant.INFO.get('REF') if repeat_ref: parsed_variant['str_ref'] = int(repeat_ref) # repeat len - number of repeats found in case repeat_len = variant.INFO.get('RL') if repeat_len: parsed_variant['str_len'] = int(repeat_len) # str status - this indicates the severity of the expansion level str_status = variant.INFO.get('STR_STATUS') if str_status: parsed_variant['str_status'] = str(str_status) ################# Add gene and transcript information ################# raw_transcripts = [] if vep_header: vep_info = variant.INFO.get('CSQ') if vep_info: raw_transcripts = (dict(zip(vep_header, transcript_info.split('|'))) for transcript_info in vep_info.split(',')) parsed_transcripts = [] dbsnp_ids = set() cosmic_ids = set() for parsed_transcript in parse_transcripts(raw_transcripts, parsed_variant['alternative']): parsed_transcripts.append(parsed_transcript) for dbsnp in parsed_transcript.get('dbsnp', []): dbsnp_ids.add(dbsnp) for cosmic in parsed_transcript.get('cosmic', []): cosmic_ids.add(cosmic) # The COSMIC tag in INFO is added via VEP and/or bcftools annotate cosmic_tag = variant.INFO.get('COSMIC') if cosmic_tag: cosmic_ids.add(cosmic_tag[4:]) if (dbsnp_ids and not parsed_variant['dbsnp_id']): parsed_variant['dbsnp_id'] = ';'.join(dbsnp_ids) if cosmic_ids: parsed_variant['cosmic_ids'] = list(cosmic_ids) gene_info = parse_genes(parsed_transcripts) parsed_variant['genes'] = gene_info hgnc_ids = set([]) for gene in parsed_variant['genes']: hgnc_ids.add(gene['hgnc_id']) parsed_variant['hgnc_ids'] = list(hgnc_ids) ################# Add clinsig prediction ################# if variant.INFO.get('CLNACC'): acc = variant.INFO.get('CLNACC') else: acc = variant.INFO.get('CLNVID') clnsig_predictions = parse_clnsig( acc=acc, sig=variant.INFO.get('CLNSIG'), revstat=variant.INFO.get('CLNREVSTAT'), transcripts=parsed_transcripts ) if clnsig_predictions: parsed_variant['clnsig'] = clnsig_predictions ################# Add the frequencies ################# frequencies = parse_frequencies(variant, parsed_transcripts) parsed_variant['frequencies'] = frequencies # parse out old local observation count local_obs_old = variant.INFO.get('Obs') if local_obs_old: parsed_variant['local_obs_old'] = int(local_obs_old) local_obs_hom_old = variant.INFO.get('Hom') if local_obs_hom_old: parsed_variant['local_obs_hom_old'] = int(local_obs_hom_old) ###################### Add severity predictions ###################### cadd = parse_cadd(variant, parsed_transcripts) if cadd: parsed_variant['cadd_score'] = cadd spidex = variant.INFO.get('SPIDEX') if spidex: parsed_variant['spidex'] = float(spidex) ###################### Add conservation ###################### parsed_variant['conservation'] = parse_conservations(variant) parsed_variant['callers'] = parse_callers(variant, category=category) rank_result = variant.INFO.get('RankResult') if rank_result: results = [int(i) for i in rank_result.split('|')] parsed_variant['rank_result'] = dict(zip(rank_results_header, results)) ###################### Add SV specific annotations ###################### sv_frequencies = parse_sv_frequencies(variant) for key in sv_frequencies: parsed_variant['frequencies'][key] = sv_frequencies[key] ###################### Add Cancer specific annotations ###################### # MSK_MVL indicates if variants are in the MSK managed variant list # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5437632/ mvl_tag = variant.INFO.get('MSK_MVL') if mvl_tag: parsed_variant['mvl_tag'] = True return parsed_variant
Update all compounds for a case
def compounds(context, case_id): """ Update all compounds for a case """ adapter = context.obj['adapter'] LOG.info("Running scout update compounds") # Check if the case exists case_obj = adapter.case(case_id) if not case_obj: LOG.warning("Case %s could not be found", case_id) context.abort() try: adapter.update_case_compounds(case_obj) except Exception as err: LOG.warning(err) context.abort()
Update a gene object with links
def add_gene_links(gene_obj, build=37): """Update a gene object with links Args: gene_obj(dict) build(int) Returns: gene_obj(dict): gene_obj updated with many links """ try: build = int(build) except ValueError: build = 37 # Add links that use the hgnc_id hgnc_id = gene_obj['hgnc_id'] gene_obj['hgnc_link'] = genenames(hgnc_id) gene_obj['omim_link'] = omim(hgnc_id) # Add links that use ensembl_id if not 'ensembl_id' in gene_obj: ensembl_id = gene_obj.get('common',{}).get('ensembl_id') else: ensembl_id = gene_obj['ensembl_id'] ensembl_37_link = ensembl(ensembl_id, build=37) ensembl_38_link = ensembl(ensembl_id, build=38) gene_obj['ensembl_37_link'] = ensembl_37_link gene_obj['ensembl_38_link'] = ensembl_38_link gene_obj['ensembl_link'] = ensembl_37_link if build == 38: gene_obj['ensembl_link'] = ensembl_38_link gene_obj['hpa_link'] = hpa(ensembl_id) gene_obj['string_link'] = string(ensembl_id) gene_obj['reactome_link'] = reactome(ensembl_id) gene_obj['clingen_link'] = clingen(hgnc_id) gene_obj['expression_atlas_link'] = expression_atlas(ensembl_id) gene_obj['exac_link'] = exac(ensembl_id) # Add links that use entrez_id gene_obj['entrez_link'] = entrez(gene_obj.get('entrez_id')) # Add links that use omim id gene_obj['omim_link'] = omim(gene_obj.get('omim_id')) # Add links that use hgnc_symbol gene_obj['ppaint_link'] = ppaint(gene_obj['hgnc_symbol']) # Add links that use vega_id gene_obj['vega_link'] = vega(gene_obj.get('vega_id')) # Add links that use ucsc link gene_obj['ucsc_link'] = ucsc(gene_obj.get('ucsc_id'))
Query the hgnc aliases
def hgnc(ctx, hgnc_symbol, hgnc_id, build): """ Query the hgnc aliases """ adapter = ctx.obj['adapter'] if not (hgnc_symbol or hgnc_id): log.warning("Please provide a hgnc symbol or hgnc id") ctx.abort() if hgnc_id: result = adapter.hgnc_gene(hgnc_id, build=build) if result: hgnc_symbol = result['hgnc_symbol'] else: log.warning("Gene with id %s could not be found", hgnc_id) ctx.abort() result = adapter.hgnc_genes(hgnc_symbol, build=build) if result.count() == 0: log.info("No results found") else: click.echo("#hgnc_id\thgnc_symbol\taliases\ttranscripts") for gene in result: click.echo("{0}\t{1}\t{2}\t{3}".format( gene['hgnc_id'], gene['hgnc_symbol'], ', '.join(gene['aliases']), ', '.join(tx['ensembl_transcript_id'] for tx in gene['transcripts']), ))
Parse an hgnc formated line
def parse_hgnc_line(line, header): """Parse an hgnc formated line Args: line(list): A list with hgnc gene info header(list): A list with the header info Returns: hgnc_info(dict): A dictionary with the relevant info """ hgnc_gene = {} line = line.rstrip().split('\t') raw_info = dict(zip(header, line)) # Skip all genes that have status withdrawn if 'Withdrawn' in raw_info['status']: return hgnc_gene hgnc_symbol = raw_info['symbol'] hgnc_gene['hgnc_symbol'] = hgnc_symbol hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1]) hgnc_gene['description'] = raw_info['name'] # We want to have the current symbol as an alias aliases = set([hgnc_symbol, hgnc_symbol.upper()]) # We then need to add both the previous symbols and # alias symbols previous_names = raw_info['prev_symbol'] if previous_names: for alias in previous_names.strip('"').split('|'): aliases.add(alias) alias_symbols = raw_info['alias_symbol'] if alias_symbols: for alias in alias_symbols.strip('"').split('|'): aliases.add(alias) hgnc_gene['previous_symbols'] = list(aliases) # We need the ensembl_gene_id to link the genes with ensembl hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id') omim_id = raw_info.get('omim_id') if omim_id: hgnc_gene['omim_id'] = int(omim_id.strip('"').split('|')[0]) else: hgnc_gene['omim_id'] = None entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id') if entrez_id: hgnc_gene['entrez_id'] = int(entrez_id) else: hgnc_gene['entrez_id'] = None # These are the primary transcripts according to HGNC ref_seq = raw_info.get('refseq_accession') if ref_seq: hgnc_gene['ref_seq'] = ref_seq.strip('"').split('|') else: hgnc_gene['ref_seq'] = [] uniprot_ids = raw_info.get('uniprot_ids') if uniprot_ids: hgnc_gene['uniprot_ids'] = uniprot_ids.strip('""').split('|') else: hgnc_gene['uniprot_ids'] = [] ucsc_id = raw_info.get('ucsc_id') if ucsc_id: hgnc_gene['ucsc_id'] = ucsc_id else: hgnc_gene['ucsc_id'] = None vega_id = raw_info.get('vega_id') if vega_id: hgnc_gene['vega_id'] = vega_id else: hgnc_gene['vega_id'] = None return hgnc_gene
Parse lines with hgnc formated genes
def parse_hgnc_genes(lines): """Parse lines with hgnc formated genes This is designed to take a dump with genes from HGNC. This is downloaded from: ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt Args: lines(iterable(str)): An iterable with HGNC formated genes Yields: hgnc_gene(dict): A dictionary with the relevant information """ header = [] logger.info("Parsing hgnc genes...") for index, line in enumerate(lines): if index == 0: header = line.split('\t') elif len(line) > 1: hgnc_gene = parse_hgnc_line(line=line, header=header) if hgnc_gene: yield hgnc_gene
Create an open clinvar submission for a user and an institute Args: user_id ( str ): a user ID institute_id ( str ): an institute ID
def create_submission(self, user_id, institute_id): """Create an open clinvar submission for a user and an institute Args: user_id(str): a user ID institute_id(str): an institute ID returns: submission(obj): an open clinvar submission object """ submission_obj = { 'status' : 'open', 'created_at' : datetime.now(), 'user_id' : user_id, 'institute_id' : institute_id } LOG.info("Creating a new clinvar submission for user '%s' and institute %s", user_id, institute_id) result = self.clinvar_submission_collection.insert_one(submission_obj) return result.inserted_id
Deletes a Clinvar submission object along with all associated clinvar objects ( variants and casedata )
def delete_submission(self, submission_id): """Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata) Args: submission_id(str): the ID of the submission to be deleted Returns: deleted_objects(int): the number of associated objects removed (variants and/or casedata) deleted_submissions(int): 1 if it's deleted, 0 if something went wrong """ LOG.info("Deleting clinvar submission %s", submission_id) submission_obj = self.clinvar_submission_collection.find_one({ '_id' : ObjectId(submission_id)}) submission_variants = submission_obj.get('variant_data') submission_casedata = submission_obj.get('case_data') submission_objects = [] if submission_variants and submission_casedata: submission_objects = submission_variants + submission_casedata elif submission_variants: submission_objects = submission_variants elif submission_casedata: submission_objects = submission_casedata # Delete all variants and casedata objects associated with this submission result = self.clinvar_collection.delete_many({'_id': { "$in": submission_objects} }) deleted_objects = result.deleted_count # Delete the submission itself result = self.clinvar_submission_collection.delete_one({'_id': ObjectId(submission_id)}) deleted_submissions = result.deleted_count #return deleted_count, deleted_submissions return deleted_objects,deleted_submissions
Retrieve the database id of an open clinvar submission for a user and institute if none is available then create a new submission and return it
def get_open_clinvar_submission(self, user_id, institute_id): """Retrieve the database id of an open clinvar submission for a user and institute, if none is available then create a new submission and return it Args: user_id(str): a user ID institute_id(str): an institute ID Returns: submission(obj) : an open clinvar submission object """ LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id) query = dict(user_id=user_id, institute_id=institute_id, status='open') submission = self.clinvar_submission_collection.find_one(query) # If there is no open submission for this user and institute, create one if submission is None: submission_id = self.create_submission(user_id, institute_id) submission = self.clinvar_submission_collection.find_one({'_id':submission_id}) return submission
saves an official clinvar submission ID in a clinvar submission object
def update_clinvar_id(self, clinvar_id, submission_id ): """saves an official clinvar submission ID in a clinvar submission object Args: clinvar_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission submission_id(str): submission_id(str) : id of the submission to be updated Returns: updated_submission(obj): a clinvar submission object, updated """ updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, { '$set' : {'clinvar_subm_id' : clinvar_id, 'updated_at': datetime.now()} }, upsert=True, return_document=pymongo.ReturnDocument.AFTER ) return updated_submission
Returns the official Clinvar submission ID for a submission object
def get_clinvar_id(self, submission_id): """Returns the official Clinvar submission ID for a submission object Args: submission_id(str): submission_id(str) : id of the submission Returns: clinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission """ submission_obj = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)}) clinvar_subm_id = submission_obj.get('clinvar_subm_id') # This key does not exist if it was not previously provided by user return clinvar_subm_id
Adds submission_objects to clinvar collection and update the coresponding submission object with their id
def add_to_submission(self, submission_id, submission_objects): """Adds submission_objects to clinvar collection and update the coresponding submission object with their id Args: submission_id(str) : id of the submission to be updated submission_objects(tuple): a tuple of 2 elements coresponding to a list of variants and a list of case data objects to add to submission Returns: updated_submission(obj): an open clinvar submission object, updated """ LOG.info("Adding new variants and case data to clinvar submission '%s'", submission_id) # Insert variant submission_objects into clinvar collection # Loop over the objects for var_obj in submission_objects[0]: try: result = self.clinvar_collection.insert_one(var_obj) self.clinvar_submission_collection.update_one({'_id':submission_id}, {'$push': { 'variant_data' : str(result.inserted_id) }}, upsert=True) except pymongo.errors.DuplicateKeyError: LOG.error("Attepted to insert a clinvar variant which is already in DB!") # Insert casedata submission_objects into clinvar collection if submission_objects[1]: # Loop over the objects for case_obj in submission_objects[1]: try: result = self.clinvar_collection.insert_one(case_obj) self.clinvar_submission_collection.update_one({'_id':submission_id}, {'$push': { 'case_data': str(result.inserted_id)}}, upsert=True) except pymongo.errors.DuplicateKeyError: LOG.error("One or more casedata object is already present in clinvar collection!") updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id':submission_id}, { '$set' : {'updated_at': datetime.now()} }, return_document=pymongo.ReturnDocument.AFTER ) return updated_submission
Set a clinvar submission ID to closed
def update_clinvar_submission_status(self, user_id, submission_id, status): """Set a clinvar submission ID to 'closed' Args: submission_id(str): the ID of the clinvar submission to close Return updated_submission(obj): the submission object with a 'closed' status """ LOG.info('closing clinvar submission "%s"', submission_id) if status == 'open': # just close the submission its status does not affect the other submissions for this user # Close all other submissions for this user and then open the desired one self.clinvar_submission_collection.update_many( {'user_id' : user_id}, {'$set' : {'status' : 'closed', 'updated_at' : datetime.now()} } ) updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id' : ObjectId(submission_id)}, {'$set' : {'status' : status, 'updated_at' : datetime.now()} }, return_document=pymongo.ReturnDocument.AFTER ) return updated_submission
Collect all open and closed clinvar submission created by a user for an institute
def clinvar_submissions(self, user_id, institute_id): """Collect all open and closed clinvar submission created by a user for an institute Args: user_id(str): a user ID institute_id(str): an institute ID Returns: submissions(list): a list of clinvar submission objects """ LOG.info("Retrieving all clinvar submissions for user '%s', institute '%s'", user_id, institute_id) # get first all submission objects query = dict(user_id=user_id, institute_id=institute_id) results = list(self.clinvar_submission_collection.find(query)) submissions = [] for result in results: submission = {} submission['_id'] = result.get('_id') submission['status'] = result.get('status') submission['user_id'] = result.get('user_id') submission['institute_id'] = result.get('institute_id') submission['created_at'] = result.get('created_at') submission['updated_at'] = result.get('updated_at') if 'clinvar_subm_id' in result: submission['clinvar_subm_id'] = result['clinvar_subm_id'] if result.get('variant_data'): submission['variant_data'] = self.clinvar_collection.find({'_id': { "$in": result['variant_data'] } }) if result.get('case_data'): submission['case_data'] = self.clinvar_collection.find({'_id' : { "$in": result['case_data'] } }) submissions.append(submission) return submissions
Collects a list of objects from the clinvar collection ( variants of case data ) as specified by the key_id in the clinvar submission
def clinvar_objs(self, submission_id, key_id): """Collects a list of objects from the clinvar collection (variants of case data) as specified by the key_id in the clinvar submission Args: submission_id(str): the _id key of a clinvar submission key_id(str) : either 'variant_data' or 'case_data'. It's a key in a clinvar_submission object. Its value is a list of ids of clinvar objects (either variants of casedata objects) Returns: clinvar_objects(list) : a list of clinvar objects (either variants of casedata) """ # Get a submission object submission = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)}) # a list of clinvar object ids, they can be of csv_type 'variant' or 'casedata' if submission.get(key_id): clinvar_obj_ids = list(submission.get(key_id)) clinvar_objects = self.clinvar_collection.find({'_id' : { "$in": clinvar_obj_ids }}) return list(clinvar_objects) else: return None
Remove a variant object from clinvar database and update the relative submission object
def delete_clinvar_object(self, object_id, object_type, submission_id): """Remove a variant object from clinvar database and update the relative submission object Args: object_id(str) : the id of an object to remove from clinvar_collection database collection (a variant of a case) object_type(str) : either 'variant_data' or 'case_data'. It's a key in the clinvar_submission object. submission_id(str): the _id key of a clinvar submission Returns: updated_submission(obj): an updated clinvar submission """ LOG.info("Deleting clinvar object %s (%s)", object_id, object_type) # If it's a variant object to be removed: # remove reference to it in the submission object 'variant_data' list field # remove the variant object from clinvar collection # remove casedata object from clinvar collection # remove reference to it in the submission object 'caset_data' list field # if it's a casedata object to be removed: # remove reference to it in the submission object 'caset_data' list field # remove casedata object from clinvar collection result = '' if object_type == 'variant_data': # pull out a variant from submission object self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, {'$pull': {'variant_data': object_id} }) variant_object = self.clinvar_collection.find_one( {'_id': object_id} ) linking_id = variant_object.get("linking_id") #it's the original ID of the variant in scout, it's linking clinvar variants and casedata objects together # remove any object with that linking_id from clinvar_collection. This removes variant and casedata result = self.clinvar_collection.delete_many( {'linking_id': linking_id } ) else: # remove case_data but keep variant in submission # delete the object itself from clinvar_collection result = self.clinvar_collection.delete_one( {'_id': object_id } ) # in any case remove reference to it in the submission object 'caset_data' list field self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, {'$pull': {'case_data': object_id} }) updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id':submission_id}, { '$set' : {'updated_at': datetime.now()} }, return_document=pymongo.ReturnDocument.AFTER ) return updated_submission
Get all variants included in clinvar submissions for a case
def case_to_clinVars(self, case_id): """Get all variants included in clinvar submissions for a case Args: case_id(str): a case _id Returns: submission_variants(dict): keys are variant ids and values are variant submission objects """ query = dict(case_id=case_id, csv_type='variant') clinvar_objs = list(self.clinvar_collection.find(query)) submitted_vars = {} for clinvar in clinvar_objs: submitted_vars[clinvar.get('local_id')] = clinvar return submitted_vars
Parse hpo phenotype Args: hpo_line ( str ): A iterable with hpo phenotype lines Yields: hpo_info ( dict )
def parse_hpo_phenotype(hpo_line): """Parse hpo phenotype Args: hpo_line(str): A iterable with hpo phenotype lines Yields: hpo_info(dict) """ hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} hpo_info['hpo_id'] = hpo_line[0] hpo_info['description'] = hpo_line[1] hpo_info['hgnc_symbol'] = hpo_line[3] return hpo_info
Parse hpo gene information Args: hpo_line ( str ): A iterable with hpo phenotype lines Yields: hpo_info ( dict )
def parse_hpo_gene(hpo_line): """Parse hpo gene information Args: hpo_line(str): A iterable with hpo phenotype lines Yields: hpo_info(dict) """ if not len(hpo_line) > 3: return {} hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} hpo_info['hgnc_symbol'] = hpo_line[1] hpo_info['description'] = hpo_line[2] hpo_info['hpo_id'] = hpo_line[3] return hpo_info
Parse hpo disease line Args: hpo_line ( str )
def parse_hpo_disease(hpo_line): """Parse hpo disease line Args: hpo_line(str) """ hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} disease = hpo_line[0].split(':') hpo_info['source'] = disease[0] hpo_info['disease_nr'] = int(disease[1]) hpo_info['hgnc_symbol'] = None hpo_info['hpo_term'] = None if len(hpo_line) >= 3: hpo_info['hgnc_symbol'] = hpo_line[2] if len(hpo_line) >= 4: hpo_info['hpo_term'] = hpo_line[3] return hpo_info
Parse hpo phenotypes Group the genes that a phenotype is associated to in genes Args: hpo_lines ( iterable ( str )): A file handle to the hpo phenotypes file Returns: hpo_terms ( dict ): A dictionary with hpo_ids as keys and terms as values { <hpo_id >: { hpo_id: str description: str hgnc_symbols: list ( str ) # [ <hgnc_symbol >... ] } }
def parse_hpo_phenotypes(hpo_lines): """Parse hpo phenotypes Group the genes that a phenotype is associated to in 'genes' Args: hpo_lines(iterable(str)): A file handle to the hpo phenotypes file Returns: hpo_terms(dict): A dictionary with hpo_ids as keys and terms as values { <hpo_id>: { 'hpo_id':str, 'description': str, 'hgnc_symbols': list(str), # [<hgnc_symbol>, ...] } } """ hpo_terms = {} LOG.info("Parsing hpo phenotypes...") for index, line in enumerate(hpo_lines): if index > 0 and len(line) > 0: hpo_info = parse_hpo_phenotype(line) hpo_term = hpo_info['hpo_id'] hgnc_symbol = hpo_info['hgnc_symbol'] if hpo_term in hpo_terms: hpo_terms[hpo_term]['hgnc_symbols'].append(hgnc_symbol) else: hpo_terms[hpo_term] = { 'hpo_id':hpo_term, 'description': hpo_info['description'], 'hgnc_symbols': [hgnc_symbol] } LOG.info("Parsing done.") return hpo_terms
Parse hpo disease phenotypes Args: hpo_lines ( iterable ( str )) Returns: diseases ( dict ): A dictionary with mim numbers as keys
def parse_hpo_diseases(hpo_lines): """Parse hpo disease phenotypes Args: hpo_lines(iterable(str)) Returns: diseases(dict): A dictionary with mim numbers as keys """ diseases = {} LOG.info("Parsing hpo diseases...") for index, line in enumerate(hpo_lines): # First line is a header if index == 0: continue # Skip empty lines if not len(line) > 3: continue # Parse the info disease_info = parse_hpo_disease(line) # Skip the line if there where no info if not disease_info: continue disease_nr = disease_info['disease_nr'] hgnc_symbol = disease_info['hgnc_symbol'] hpo_term = disease_info['hpo_term'] source = disease_info['source'] disease_id = "{0}:{1}".format(source, disease_nr) if disease_id not in diseases: diseases[disease_id] = { 'disease_nr': disease_nr, 'source': source, 'hgnc_symbols': set(), 'hpo_terms': set(), } if hgnc_symbol: diseases[disease_id]['hgnc_symbols'].add(hgnc_symbol) if hpo_term: diseases[disease_id]['hpo_terms'].add(hpo_term) LOG.info("Parsing done.") return diseases
Parse the map from hpo term to hgnc symbol Args: lines ( iterable ( str )): Yields: hpo_to_gene ( dict ): A dictionary with information on how a term map to a hgnc symbol
def parse_hpo_to_genes(hpo_lines): """Parse the map from hpo term to hgnc symbol Args: lines(iterable(str)): Yields: hpo_to_gene(dict): A dictionary with information on how a term map to a hgnc symbol """ for line in hpo_lines: if line.startswith('#') or len(line) < 1: continue line = line.rstrip().split('\t') hpo_id = line[0] hgnc_symbol = line[3] yield { 'hpo_id': hpo_id, 'hgnc_symbol': hgnc_symbol }
Parse HPO gene information Args: hpo_lines ( iterable ( str )) Returns: diseases ( dict ): A dictionary with hgnc symbols as keys
def parse_hpo_genes(hpo_lines): """Parse HPO gene information Args: hpo_lines(iterable(str)) Returns: diseases(dict): A dictionary with hgnc symbols as keys """ LOG.info("Parsing HPO genes ...") genes = {} for index, line in enumerate(hpo_lines): # First line is header if index == 0: continue if len(line) < 5: continue gene_info = parse_hpo_gene(line) hgnc_symbol = gene_info['hgnc_symbol'] description = gene_info['description'] if hgnc_symbol not in genes: genes[hgnc_symbol] = { 'hgnc_symbol': hgnc_symbol } gene = genes[hgnc_symbol] if description == 'Incomplete penetrance': gene['incomplete_penetrance'] = True if description == 'Autosomal dominant inheritance': gene['ad'] = True if description == 'Autosomal recessive inheritance': gene['ar'] = True if description == 'Mithochondrial inheritance': gene['mt'] = True if description == 'X-linked dominant inheritance': gene['xd'] = True if description == 'X-linked recessive inheritance': gene['xr'] = True if description == 'Y-linked inheritance': gene['x'] = True if description == 'X-linked inheritance': gene['y'] = True LOG.info("Parsing done.") return genes
Get a set with all genes that have incomplete penetrance according to HPO Args: hpo_lines ( iterable ( str )) Returns: incomplete_penetrance_genes ( set ): A set with the hgnc symbols of all genes with incomplete penetrance
def get_incomplete_penetrance_genes(hpo_lines): """Get a set with all genes that have incomplete penetrance according to HPO Args: hpo_lines(iterable(str)) Returns: incomplete_penetrance_genes(set): A set with the hgnc symbols of all genes with incomplete penetrance """ genes = parse_hpo_genes(hpo_lines) incomplete_penetrance_genes = set() for hgnc_symbol in genes: if genes[hgnc_symbol].get('incomplete_penetrance'): incomplete_penetrance_genes.add(hgnc_symbol) return incomplete_penetrance_genes
Parse a. obo formated hpo line
def parse_hpo_obo(hpo_lines): """Parse a .obo formated hpo line""" term = {} for line in hpo_lines: if len(line) == 0: continue line = line.rstrip() # New term starts with [Term] if line == '[Term]': if term: yield term term = {} elif line.startswith('id'): term['hpo_id'] = line[4:] elif line.startswith('name'): term['description'] = line[6:] elif line.startswith('alt_id'): if 'aliases' not in term: term['aliases'] = [] term['aliases'].append(line[8:]) elif line.startswith('is_a'): if 'ancestors' not in term: term['ancestors'] = [] term['ancestors'].append(line[6:16]) if term: yield term
Render seach box for genes.
def genes(): """Render seach box for genes.""" query = request.args.get('query', '') if '|' in query: hgnc_id = int(query.split(' | ', 1)[0]) return redirect(url_for('.gene', hgnc_id=hgnc_id)) gene_q = store.all_genes().limit(20) return dict(genes=gene_q)
Render information about a gene.
def gene(hgnc_id=None, hgnc_symbol=None): """Render information about a gene.""" if hgnc_symbol: query = store.hgnc_genes(hgnc_symbol) if query.count() == 1: hgnc_id = query.first()['hgnc_id'] else: return redirect(url_for('.genes', query=hgnc_symbol)) try: genes = controllers.gene(store, hgnc_id) except ValueError as error: return abort(404) return genes
Return JSON data about genes.
def api_genes(): """Return JSON data about genes.""" query = request.args.get('query') json_out = controllers.genes_to_json(store, query) return jsonify(json_out)
Make sure that the gene panels exist in the database Also check if the default panels are defined in gene panels
def check_panels(adapter, panels, default_panels=None): """Make sure that the gene panels exist in the database Also check if the default panels are defined in gene panels Args: adapter(MongoAdapter) panels(list(str)): A list with panel names Returns: panels_exists(bool) """ default_panels = default_panels or [] panels_exist = True for panel in default_panels: if panel not in panels: log.warning("Default panels have to be defined in panels") panels_exist = False for panel in panels: if not adapter.gene_panel(panel): log.warning("Panel {} does not exist in database".format(panel)) panels_exist = False return panels_exist
Load all variants in a region defined by a HGNC id
def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None): """Load all variants in a region defined by a HGNC id Args: adapter (MongoAdapter) case_id (str): Case id hgnc_id (int): If all variants from a gene should be uploaded chrom (str): If variants from coordinates should be uploaded start (int): Start position for region end (int): Stop position for region """ if hgnc_id: gene_obj = adapter.hgnc_gene(hgnc_id) if not gene_obj: ValueError("Gene {} does not exist in database".format(hgnc_id)) chrom = gene_obj['chromosome'] start = gene_obj['start'] end = gene_obj['end'] case_obj = adapter.case(case_id=case_id) if not case_obj: raise ValueError("Case {} does not exist in database".format(case_id)) log.info("Load clinical SNV variants for case: {0} region: chr {1}, start" " {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='snv', chrom=chrom, start=start, end=end) vcf_sv_file = case_obj['vcf_files'].get('vcf_sv') if vcf_sv_file: log.info("Load clinical SV variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='sv', chrom=chrom, start=start, end=end) vcf_str_file = case_obj['vcf_files'].get('vcf_str') if vcf_str_file: log.info("Load clinical STR variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='str', chrom=chrom, start=start, end=end) if case_obj['is_research']: log.info("Load research SNV variants for case: {0} region: chr {1}, " "start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='snv', chrom=chrom, start=start, end=end) vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research') if vcf_sv_research: log.info("Load research SV variants for case: {0} region: chr {1}," " start {2}, end {3}".format(case_obj['_id'], chrom, start, end)) adapter.load_variants(case_obj=case_obj, variant_type='research', category='sv', chrom=chrom, start=start, end=end)
Load a new case from a Scout config.
def load_scout(adapter, config, ped=None, update=False): """Load a new case from a Scout config. Args: adapter(MongoAdapter) config(dict): loading info ped(Iterable(str)): Pedigree ingformation update(bool): If existing case should be updated """ log.info("Check that the panels exists") if not check_panels(adapter, config.get('gene_panels', []), config.get('default_gene_panels')): raise ConfigError("Some panel(s) does not exist in the database") case_obj = adapter.load_case(config, update=update) return case_obj
Template decorator.
def templated(template=None): """Template decorator. Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/ """ def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): template_name = template if template_name is None: template_name = request.endpoint.replace('.', '/') + '.html' context = f(*args, **kwargs) if context is None: context = {} elif not isinstance(context, dict): return context return render_template(template_name, **context) return decorated_function return decorator
Fetch insitiute and case objects.
def institute_and_case(store, institute_id, case_name=None): """Fetch insitiute and case objects.""" institute_obj = store.institute(institute_id) if institute_obj is None and institute_id != 'favicon.ico': flash("Can't find institute: {}".format(institute_id), 'warning') return abort(404) if case_name: if case_name: case_obj = store.case(institute_id=institute_id, display_name=case_name) if case_obj is None: return abort(404) # validate that user has access to the institute if not current_user.is_admin: if institute_id not in current_user.institutes: if not case_name or not any(inst_id in case_obj['collaborators'] for inst_id in current_user.institutes): # you don't have access!! flash("You don't have acccess to: {}".format(institute_id),'danger') return abort(403) # you have access! if case_name: return institute_obj, case_obj else: return institute_obj
Preprocess institute objects.
def user_institutes(store, login_user): """Preprocess institute objects.""" if login_user.is_admin: institutes = store.institutes() else: institutes = [store.institute(inst_id) for inst_id in login_user.institutes] return institutes
Get the hgnc id for a gene
def get_hgnc_id(gene_info, adapter): """Get the hgnc id for a gene The proprity order will be 1. if there is a hgnc id this one will be choosen 2. if the hgnc symbol matches a genes proper hgnc symbol 3. if the symbol ony matches aliases on several genes one will be choosen at random Args: gene_info(dict) adapter Returns: true_id(int) """ hgnc_id = gene_info.get('hgnc_id') hgnc_symbol = gene_info.get('hgnc_symbol') true_id = None if hgnc_id: true_id = int(hgnc_id) else: gene_result = adapter.hgnc_genes(hgnc_symbol) if gene_result.count() == 0: raise Exception("No gene could be found for {}".format(hgnc_symbol)) for gene in gene_result: if hgnc_symbol.upper() == gene.hgnc_symbol.upper(): true_id = gene.hgnc_id if not gene_info['hgnc_id']: true_id = gene.hgnc_id return true_id
Update a panel in the database
def panel(context, panel, version, update_date, update_version): """ Update a panel in the database """ adapter = context.obj['adapter'] # Check that the panel exists panel_obj = adapter.gene_panel(panel, version=version) if not panel_obj: LOG.warning("Panel %s (version %s) could not be found" % (panel, version)) context.abort() date_obj = None if update_date: try: date_obj = get_date(update_date) except Exception as err: LOG.warning(err) context.abort() update_panel( adapter, panel, panel_version=panel_obj['version'], new_version=update_version, new_date=date_obj )
Update disease terms in mongo database.
def diseases(context, api_key): """ Update disease terms in mongo database. """ adapter = context.obj['adapter'] # Fetch the omim information api_key = api_key or context.obj.get('omim_api_key') if not api_key: LOG.warning("Please provide a omim api key to load the omim gene panel") context.abort() try: mim_files = fetch_mim_files(api_key, genemap2=True) except Exception as err: LOG.warning(err) context.abort() LOG.info("Dropping DiseaseTerms") adapter.disease_term_collection.drop() LOG.debug("DiseaseTerms dropped") load_disease_terms( adapter=adapter, genemap_lines=mim_files['genemap2'], ) LOG.info("Successfully loaded all disease terms")
Load the hpo terms and hpo diseases into database Args: adapter ( MongoAdapter ) disease_lines ( iterable ( str )): These are the omim genemap2 information hpo_lines ( iterable ( str )) disease_lines ( iterable ( str )) hpo_gene_lines ( iterable ( str ))
def load_hpo(adapter, disease_lines, hpo_disease_lines=None, hpo_lines=None, hpo_gene_lines=None): """Load the hpo terms and hpo diseases into database Args: adapter(MongoAdapter) disease_lines(iterable(str)): These are the omim genemap2 information hpo_lines(iterable(str)) disease_lines(iterable(str)) hpo_gene_lines(iterable(str)) """ # Create a map from gene aliases to gene objects alias_genes = adapter.genes_by_alias() # Fetch the hpo terms if no file if not hpo_lines: hpo_lines = fetch_hpo_terms() # Fetch the hpo gene information if no file if not hpo_gene_lines: hpo_gene_lines = fetch_hpo_to_genes() # Fetch the hpo phenotype information if no file if not hpo_disease_lines: hpo_disease_lines = fetch_hpo_phenotype_to_terms() load_hpo_terms(adapter, hpo_lines, hpo_gene_lines, alias_genes) load_disease_terms(adapter, disease_lines, alias_genes, hpo_disease_lines)
Load the hpo terms into the database Parse the hpo lines build the objects and add them to the database Args: adapter ( MongoAdapter ) hpo_lines ( iterable ( str )) hpo_gene_lines ( iterable ( str ))
def load_hpo_terms(adapter, hpo_lines=None, hpo_gene_lines=None, alias_genes=None): """Load the hpo terms into the database Parse the hpo lines, build the objects and add them to the database Args: adapter(MongoAdapter) hpo_lines(iterable(str)) hpo_gene_lines(iterable(str)) """ # Store the hpo terms hpo_terms = {} # Fetch the hpo terms if no file if not hpo_lines: hpo_lines = fetch_hpo_terms() # Fetch the hpo gene information if no file if not hpo_gene_lines: hpo_gene_lines = fetch_hpo_to_genes() # Parse the terms # This will yield dictionaries with information about the terms LOG.info("Parsing hpo terms") for term in parse_hpo_obo(hpo_lines): hpo_terms[term['hpo_id']] = term # Get a map with hgnc symbols to hgnc ids from scout if not alias_genes: alias_genes = adapter.genes_by_alias() LOG.info("Adding gene information to hpo terms ...") for hpo_to_symbol in parse_hpo_to_genes(hpo_gene_lines): hgnc_symbol = hpo_to_symbol['hgnc_symbol'] hpo_id = hpo_to_symbol['hpo_id'] # Fetch gene info to get correct hgnc id gene_info = alias_genes.get(hgnc_symbol) if not gene_info: continue hgnc_id = gene_info['true'] if hpo_id not in hpo_terms: continue hpo_term = hpo_terms[hpo_id] if not 'genes' in hpo_term: hpo_term['genes'] = set() hpo_term['genes'].add(hgnc_id) start_time = datetime.now() LOG.info("Loading the hpo terms...") nr_terms = len(hpo_terms) hpo_bulk = [] with progressbar(hpo_terms.values(), label="Loading hpo terms", length=nr_terms) as bar: for hpo_info in bar: hpo_bulk.append(build_hpo_term(hpo_info)) if len(hpo_bulk) > 10000: adapter.load_hpo_bulk(hpo_bulk) hpo_bulk = [] if hpo_bulk: adapter.load_hpo_bulk(hpo_bulk) LOG.info("Loading done. Nr of terms loaded {0}".format(nr_terms)) LOG.info("Time to load terms: {0}".format(datetime.now() - start_time))
Load the omim phenotypes into the database Parse the phenotypes from genemap2. txt and find the associated hpo terms from ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes. txt.
def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None): """Load the omim phenotypes into the database Parse the phenotypes from genemap2.txt and find the associated hpo terms from ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt. Args: adapter(MongoAdapter) genemap_lines(iterable(str)) genes(dict): Dictionary with all genes found in database hpo_disease_lines(iterable(str)) """ # Get a map with hgnc symbols to hgnc ids from scout if not genes: genes = adapter.genes_by_alias() # Fetch the disease terms from omim disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines) if not hpo_disease_lines: hpo_disease_lines = fetch_hpo_phenotype_to_terms() hpo_diseases = parse_hpo_diseases(hpo_disease_lines) start_time = datetime.now() nr_diseases = None LOG.info("Loading the hpo disease...") for nr_diseases, disease_number in enumerate(disease_terms): disease_info = disease_terms[disease_number] disease_id = "OMIM:{0}".format(disease_number) if disease_id in hpo_diseases: hpo_terms = hpo_diseases[disease_id]['hpo_terms'] if hpo_terms: disease_info['hpo_terms'] = hpo_terms disease_obj = build_disease_term(disease_info, genes) adapter.load_disease_term(disease_obj) LOG.info("Loading done. Nr of diseases loaded {0}".format(nr_diseases)) LOG.info("Time to load diseases: {0}".format(datetime.now() - start_time))
Add the frequencies to a variant
def parse_frequencies(variant, transcripts): """Add the frequencies to a variant Frequencies are parsed either directly from keys in info fieds or from the transcripts is they are annotated there. Args: variant(cyvcf2.Variant): A parsed vcf variant transcripts(iterable(dict)): Parsed transcripts Returns: frequencies(dict): A dictionary with the relevant frequencies """ frequencies = {} # These lists could be extended... thousand_genomes_keys = ['1000GAF'] thousand_genomes_max_keys = ['1000G_MAX_AF'] exac_keys = ['EXACAF'] exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF'] gnomad_keys = ['GNOMADAF', 'GNOMAD_AF'] gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX'] for test_key in thousand_genomes_keys: thousand_g = parse_frequency(variant, test_key) if thousand_g: frequencies['thousand_g'] = thousand_g break for test_key in thousand_genomes_max_keys: thousand_g_max = parse_frequency(variant, test_key) if thousand_g_max: frequencies['thousand_g_max'] = thousand_g_max break for test_key in exac_keys: exac = parse_frequency(variant, test_key) if exac: frequencies['exac'] = exac break for test_key in exac_max_keys: exac_max = parse_frequency(variant, test_key) if exac_max: frequencies['exac_max'] = exac_max break for test_key in gnomad_keys: gnomad = parse_frequency(variant, test_key) if gnomad: frequencies['gnomad'] = gnomad break for test_key in gnomad_max_keys: gnomad_max = parse_frequency(variant, test_key) if gnomad_max: frequencies['gnomad_max'] = gnomad_max break # Search transcripts if not found in VCF if not frequencies: for transcript in transcripts: exac = transcript.get('exac_maf') exac_max = transcript.get('exac_max') thousand_g = transcript.get('thousand_g_maf') thousandg_max = transcript.get('thousandg_max') gnomad = transcript.get('gnomad_maf') gnomad_max = transcript.get('gnomad_max') if exac: frequencies['exac'] = exac if exac_max: frequencies['exac_max'] = exac_max if thousand_g: frequencies['thousand_g'] = thousand_g if thousandg_max: frequencies['thousand_g_max'] = thousandg_max if gnomad: frequencies['gnomad'] = gnomad if gnomad_max: frequencies['gnomad_max'] = gnomad_max #These are SV-specific frequencies thousand_g_left = parse_frequency(variant, 'left_1000GAF') if thousand_g_left: frequencies['thousand_g_left'] = thousand_g_left thousand_g_right = parse_frequency(variant, 'right_1000GAF') if thousand_g_right: frequencies['thousand_g_right'] = thousand_g_right return frequencies
Parse any frequency from the info dict
def parse_frequency(variant, info_key): """Parse any frequency from the info dict Args: variant(cyvcf2.Variant) info_key(str) Returns: frequency(float): or None if frequency does not exist """ raw_annotation = variant.INFO.get(info_key) raw_annotation = None if raw_annotation == '.' else raw_annotation frequency = float(raw_annotation) if raw_annotation else None return frequency