INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Show all indexes in the database
|
def index(context, collection_name):
"""Show all indexes in the database"""
LOG.info("Running scout view index")
adapter = context.obj['adapter']
i = 0
click.echo("collection\tindex")
for collection_name in adapter.collections():
for index in adapter.indexes(collection_name):
click.echo("{0}\t{1}".format(collection_name, index))
i += 1
if i == 0:
LOG.info("No indexes found")
|
Update the phenotype for a institute. If -- add the groups will be added to the default groups. Else the groups will be replaced.
|
def groups(context, institute_id, phenotype_group, group_abbreviation, group_file, add):
"""
Update the phenotype for a institute.
If --add the groups will be added to the default groups. Else the groups will be replaced.
"""
adapter = context.obj['adapter']
LOG.info("Running scout update institute")
if group_file:
phenotype_group = []
group_abbreviation = []
for line in group_file:
if line.startswith('#'):
continue
if len(line) < 7:
continue
line = line.rstrip().split('\t')
phenotype_group.append(line[0])
if line[1]:
group_abbreviation.append(line[1])
if not phenotype_group:
LOG.info("Please provide some groups")
return
if (phenotype_group and group_abbreviation):
if not len(phenotype_group) == len(group_abbreviation):
LOG.warning("Specify same number of groups and abbreviations")
return
# try:
adapter.update_institute(
internal_id=institute_id,
phenotype_groups=phenotype_group,
group_abbreviations=group_abbreviation,
add_groups = add,
)
|
Get a list with compounds objects for this variant.
|
def parse_compounds(compound_info, case_id, variant_type):
"""Get a list with compounds objects for this variant.
Arguments:
compound_info(str): A Variant dictionary
case_id (str): unique family id
variant_type(str): 'research' or 'clinical'
Returns:
compounds(list(dict)): A list of compounds
"""
# We need the case to construct the correct id
compounds = []
if compound_info:
for family_info in compound_info.split(','):
splitted_entry = family_info.split(':')
# This is the family id
if splitted_entry[0] == case_id:
for compound in splitted_entry[1].split('|'):
splitted_compound = compound.split('>')
compound_obj = {}
compound_name = splitted_compound[0]
compound_obj['variant'] = generate_md5_key(compound_name.split('_') +
[variant_type, case_id])
try:
compound_score = float(splitted_compound[1])
except (TypeError, IndexError):
compound_score = 0.0
compound_obj['score'] = compound_score
compound_obj['display_name'] = compound_name
compounds.append(compound_obj)
return compounds
|
Export all genes from a build
|
def genes(context, build, json):
"""Export all genes from a build"""
LOG.info("Running scout export genes")
adapter = context.obj['adapter']
result = adapter.all_genes(build=build)
if json:
click.echo(dumps(result))
return
gene_string = ("{0}\t{1}\t{2}\t{3}\t{4}")
click.echo("#Chromosom\tStart\tEnd\tHgnc_id\tHgnc_symbol")
for gene_obj in result:
click.echo(gene_string.format(
gene_obj['chromosome'],
gene_obj['start'],
gene_obj['end'],
gene_obj['hgnc_id'],
gene_obj['hgnc_symbol'],
))
|
Build a Individual object
|
def build_individual(ind):
"""Build a Individual object
Args:
ind (dict): A dictionary with individual information
Returns:
ind_obj (dict): A Individual object
dict(
individual_id = str, # required
display_name = str,
sex = str,
phenotype = int,
father = str, # Individual id of father
mother = str, # Individual id of mother
capture_kits = list, # List of names of capture kits
bam_file = str, # Path to bam file
vcf2cytosure = str, # Path to CGH file
analysis_type = str, # choices=ANALYSIS_TYPES
)
"""
try:
ind_obj = dict(
individual_id=ind['individual_id']
)
log.info("Building Individual with id:{0}".format(ind['individual_id']))
except KeyError as err:
raise PedigreeError("Individual is missing individual_id")
ind_obj['display_name'] = ind.get('display_name', ind_obj['individual_id'])
sex = ind.get('sex', 'unknown')
# Convert sex to .ped
try:
# Check if sex is coded as an integer
int(sex)
ind_obj['sex'] = str(sex)
except ValueError as err:
try:
# Sex are numbers in the database
ind_obj['sex'] = REV_SEX_MAP[sex]
except KeyError as err:
raise(PedigreeError("Unknown sex: %s" % sex))
phenotype = ind.get('phenotype', 'unknown')
# Make the phenotype integers
try:
ped_phenotype = REV_PHENOTYPE_MAP[phenotype]
if ped_phenotype == -9:
ped_phenotype = 0
ind_obj['phenotype'] = ped_phenotype
except KeyError as err:
raise(PedigreeError("Unknown phenotype: %s" % phenotype))
ind_obj['father'] = ind.get('father')
ind_obj['mother'] = ind.get('mother')
ind_obj['capture_kits'] = ind.get('capture_kits', [])
ind_obj['bam_file'] = ind.get('bam_file')
ind_obj['mt_bam'] = ind.get('mt_bam')
ind_obj['vcf2cytosure'] = ind.get('vcf2cytosure')
ind_obj['confirmed_sex'] = ind.get('confirmed_sex')
ind_obj['confirmed_parent'] = ind.get('confirmed_parent')
ind_obj['predicted_ancestry'] = ind.get('predicted_ancestry')
# Check if the analysis type is ok
# Can be anyone of ('wgs', 'wes', 'mixed', 'unknown')
analysis_type = ind.get('analysis_type', 'unknown')
if not analysis_type in ANALYSIS_TYPES:
raise PedigreeError("Analysis type %s not allowed", analysis_type)
ind_obj['analysis_type'] = analysis_type
if 'tmb' in ind:
ind_obj['tmb'] = ind['tmb']
if 'msi' in ind:
ind_obj['msi'] = ind['msi']
if 'tumor_purity' in ind:
ind_obj['tumor_purity'] = ind['tumor_purity']
if 'tumor_type' in ind:
ind_obj['tumor_type'] = ind['tumor_type']
return ind_obj
|
Upload variants to a case
|
def variants(context, case_id, institute, force, cancer, cancer_research, sv,
sv_research, snv, snv_research, str_clinical, chrom, start, end, hgnc_id,
hgnc_symbol, rank_treshold):
"""Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
"""
LOG.info("Running scout load variants")
adapter = context.obj['adapter']
if institute:
case_id = "{0}-{1}".format(institute, case_id)
else:
institute = case_id.split('-')[0]
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
LOG.info("No matching case found")
context.abort()
files = [
{'category': 'cancer', 'variant_type': 'clinical', 'upload': cancer},
{'category': 'cancer', 'variant_type': 'research', 'upload': cancer_research},
{'category': 'sv', 'variant_type': 'clinical', 'upload': sv},
{'category': 'sv', 'variant_type': 'research', 'upload': sv_research},
{'category': 'snv', 'variant_type': 'clinical', 'upload': snv},
{'category': 'snv', 'variant_type': 'research', 'upload': snv_research},
{'category': 'str', 'variant_type': 'clinical', 'upload': str_clinical},
]
gene_obj = None
if (hgnc_id or hgnc_symbol):
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if hgnc_symbol:
for res in adapter.gene_by_alias(hgnc_symbol):
gene_obj = res
if not gene_obj:
LOG.warning("The gene could not be found")
context.abort()
i = 0
for file_type in files:
variant_type = file_type['variant_type']
category = file_type['category']
if file_type['upload']:
i += 1
if variant_type == 'research':
if not (force or case_obj['research_requested']):
LOG.warn("research not requested, use '--force'")
context.abort()
LOG.info("Delete {0} {1} variants for case {2}".format(
variant_type, category, case_id))
adapter.delete_variants(case_id=case_obj['_id'],
variant_type=variant_type,
category=category)
LOG.info("Load {0} {1} variants for case {2}".format(
variant_type, category, case_id))
try:
adapter.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=rank_treshold,
chrom=chrom,
start=start,
end=end,
gene_obj=gene_obj
)
except Exception as e:
LOG.warning(e)
context.abort()
if i == 0:
LOG.info("No files where specified to upload variants from")
|
Return a variant.
|
def case(institute_id, case_name):
"""Return a variant."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if case_obj is None:
return abort(404)
return Response(json_util.dumps(case_obj), mimetype='application/json')
|
Display a specific SNV variant.
|
def variant(institute_id, case_name, variant_id):
"""Display a specific SNV variant."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
return Response(json_util.dumps(variant_obj), mimetype='application/json')
|
Show all collections in the database
|
def collections(context):
"""Show all collections in the database"""
LOG.info("Running scout view collections")
adapter = context.obj['adapter']
for collection_name in adapter.collections():
click.echo(collection_name)
|
Create a new institute and add it to the database
|
def institute(ctx, internal_id, display_name, sanger_recipients):
"""
Create a new institute and add it to the database
"""
adapter = ctx.obj['adapter']
if not internal_id:
logger.warning("A institute has to have an internal id")
ctx.abort()
if not display_name:
display_name = internal_id
if sanger_recipients:
sanger_recipients = list(sanger_recipients)
try:
load_institute(
adapter=adapter,
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients
)
except Exception as e:
logger.warning(e)
ctx.abort()
|
Update an institute
|
def institute(context, institute_id, sanger_recipient, coverage_cutoff, frequency_cutoff,
display_name, remove_sanger):
"""
Update an institute
"""
adapter = context.obj['adapter']
LOG.info("Running scout update institute")
try:
adapter.update_institute(
internal_id=institute_id,
sanger_recipient=sanger_recipient,
coverage_cutoff=coverage_cutoff,
frequency_cutoff=frequency_cutoff,
display_name=display_name,
remove_sanger=remove_sanger,
)
except Exception as err:
LOG.warning(err)
context.abort()
|
Return a opened file
|
def get_file_handle(file_path):
"""Return a opened file"""
if file_path.endswith('.gz'):
file_handle = getreader('utf-8')(gzip.open(file_path, 'r'), errors='replace')
else:
file_handle = open(file_path, 'r', encoding='utf-8')
return file_handle
|
Increments the day by converting to a datetime. date ().
|
def _inc_day(year, month, day, net):
"""Increments the day by converting to a datetime.date()."""
d = date(year, month, day)
new_d = d + timezone.timedelta(days=net)
return new_d.year, new_d.month, new_d.day
|
Get the net of any next and prev querystrings.
|
def get_net(req):
"""Get the net of any 'next' and 'prev' querystrings."""
try:
nxt, prev = map(
int, (req.GET.get('cal_next', 0), req.GET.get('cal_prev', 0))
)
net = nxt - prev
except Exception:
net = 0
return net
|
Group events that occur on the same day then sort them alphabetically by title then sort by day. Returns a list of tuples that looks like [ ( day: [ events ] ) ] where day is the day of the event ( s ) and [ events ] is an alphabetically sorted list of the events for the day.
|
def order_events(events, d=False):
"""
Group events that occur on the same day, then sort them alphabetically
by title, then sort by day. Returns a list of tuples that looks like
[(day: [events])], where day is the day of the event(s), and [events]
is an alphabetically sorted list of the events for the day.
"""
ordered_events = {}
for event in events:
try:
for occ in event.occurrence:
try:
ordered_events[occ].append(event)
except Exception:
ordered_events[occ] = [event]
except AttributeError: # no occurrence for this event
# This shouldn't happen, since an event w/o an occurrence
# shouldn't get this far, but if it does, just skip it since
# it shouldn't be displayed on the calendar anyway.
pass
if d:
# return as a dict without sorting by date
return ordered_events
else:
# return ordered_events as a list tuples sorted by date
return sorted(ordered_events.items())
|
Returns what the next and prev querystrings should be.
|
def get_next_and_prev(net):
"""Returns what the next and prev querystrings should be."""
if net == 0:
nxt = prev = 1
elif net > 0:
nxt = net + 1
prev = -(net - 1)
else:
nxt = net + 1
prev = abs(net) + 1
return nxt, prev
|
Checks that the year is within 50 years from now.
|
def _check_year(year, month, error, error_msg):
"""Checks that the year is within 50 years from now."""
if year not in xrange((now.year - 50), (now.year + 51)):
year = now.year
month = now.month
error = error_msg
return year, month, error
|
If month_orig which is the month given in the url BEFORE any next/ prev query strings have been applied is out of range sets month to the current month and returns an error message. Also Returns an error message if the year given is +/ - 50 years from now. If month which is the month given in the url AFTER any next/ prev query strings have been applied is out of range adjusts it to be in range ( by also adjusting the year ).
|
def clean_year_month(year, month, month_orig):
"""
If 'month_orig', which is the month given in the url BEFORE any next/prev
query strings have been applied, is out of range, sets month to the
current month and returns an error message. Also Returns an error
message if the year given is +/- 50 years from now.
If 'month', which is the month given in the url AFTER any next/prev
query strings have been applied, is out of range, adjusts it to be
in range (by also adjusting the year).
"""
error = False
error_msg = "The date given was invalid."
if month_orig not in xrange(1, 13) and month_orig is not None:
month = now.month
error = error_msg
# This takes care of 'next' query strings making month > 12
while month > 12:
month -= 12
year += 1
# This takes care of 'prev' query strings making month < 1
while month < 1:
month += 12
year -= 1
year, month, error = _check_year(year, month, error, error_msg)
return year, month, error
|
Make sure any event day we send back for weekday repeating events is not a weekend.
|
def check_weekday(year, month, day, reverse=False):
"""
Make sure any event day we send back for weekday repeating
events is not a weekend.
"""
d = date(year, month, day)
while d.weekday() in (5, 6):
if reverse:
d -= timedelta(days=1)
else:
d += timedelta(days=1)
return d.year, d.month, d.day
|
Parse all data necessary for loading a case into scout
|
def parse_case_data(config=None, ped=None, owner=None, vcf_snv=None,
vcf_sv=None, vcf_cancer=None, vcf_str=None, peddy_ped=None,
peddy_sex=None, peddy_check=None, delivery_report=None, multiqc=None):
"""Parse all data necessary for loading a case into scout
This can be done either by providing a VCF file and other information
on the command line. Or all the information can be specified in a config file.
Please see Scout documentation for further instructions.
Args:
config(dict): A yaml formatted config file
ped(iterable(str)): A ped formatted family file
owner(str): The institute that owns a case
vcf_snv(str): Path to a vcf file
vcf_str(str): Path to a VCF file
vcf_sv(str): Path to a vcf file
vcf_cancer(str): Path to a vcf file
peddy_ped(str): Path to a peddy ped
multiqc(str): Path to dir with multiqc information
Returns:
config_data(dict): Holds all the necessary information for loading
Scout
"""
config_data = copy.deepcopy(config) or {}
# Default the analysis date to now if not specified in load config
if 'analysis_date' not in config_data:
config_data['analysis_date'] = datetime.datetime.now()
# If the family information is in a ped file we nned to parse that
if ped:
family_id, samples = parse_ped(ped)
config_data['family'] = family_id
config_data['samples'] = samples
# Each case has to have a owner. If not provided in config file it needs to be given as a
# argument
if 'owner' not in config_data:
if not owner:
raise SyntaxError("Case has no owner")
else:
config_data['owner'] = owner
if 'gene_panels' in config_data:
# handle whitespace in gene panel names
config_data['gene_panels'] = [panel.strip() for panel in
config_data['gene_panels']]
config_data['default_gene_panels'] = [panel.strip() for panel in
config_data['default_gene_panels']]
##################### Add information from peddy if existing #####################
config_data['peddy_ped'] = peddy_ped or config_data.get('peddy_ped')
config_data['peddy_sex_check'] = peddy_sex or config_data.get('peddy_sex')
config_data['peddy_ped_check'] = peddy_check or config_data.get('peddy_check')
# This will add information from peddy to the individuals
add_peddy_information(config_data)
##################### Add multiqc information #####################
config_data['multiqc'] = multiqc or config_data.get('multiqc')
config_data['vcf_snv'] = vcf_snv if vcf_snv else config_data.get('vcf_snv')
config_data['vcf_sv'] = vcf_sv if vcf_sv else config_data.get('vcf_sv')
config_data['vcf_str'] = vcf_str if vcf_str else config_data.get('vcf_str')
log.debug("Config vcf_str set to {0}".format(config_data['vcf_str']))
config_data['vcf_cancer'] = vcf_cancer if vcf_cancer else config_data.get('vcf_cancer')
config_data['delivery_report'] = delivery_report if delivery_report else config_data.get('delivery_report')
config_data['rank_model_version'] = config_data.get('rank_model_version')
config_data['rank_score_threshold'] = config_data.get('rank_score_threshold', 0)
config_data['track'] = config_data.get('track', 'rare')
if config_data['vcf_cancer']:
config_data['track'] = 'cancer'
return config_data
|
Add information from peddy outfiles to the individuals
|
def add_peddy_information(config_data):
"""Add information from peddy outfiles to the individuals"""
ped_info = {}
ped_check = {}
sex_check = {}
relations = []
if config_data.get('peddy_ped'):
file_handle = open(config_data['peddy_ped'], 'r')
for ind_info in parse_peddy_ped(file_handle):
ped_info[ind_info['sample_id']] = ind_info
if config_data.get('peddy_ped_check'):
file_handle = open(config_data['peddy_ped_check'], 'r')
for pair_info in parse_peddy_ped_check(file_handle):
ped_check[(pair_info['sample_a'], pair_info['sample_b'])] = pair_info
if config_data.get('peddy_sex_check'):
file_handle = open(config_data['peddy_sex_check'], 'r')
for ind_info in parse_peddy_sex_check(file_handle):
sex_check[ind_info['sample_id']] = ind_info
if not ped_info:
return
analysis_inds = {}
for ind in config_data['samples']:
ind_id = ind['sample_id']
analysis_inds[ind_id] = ind
for ind_id in analysis_inds:
ind = analysis_inds[ind_id]
# Check if peddy has inferred the ancestry
if ind_id in ped_info:
ind['predicted_ancestry'] = ped_info[ind_id].get('ancestry-prediction', 'UNKNOWN')
# Check if peddy has inferred the sex
if ind_id in sex_check:
if sex_check[ind_id]['error']:
ind['confirmed_sex'] = False
else:
ind['confirmed_sex'] = True
# Check if peddy har confirmed parental relations
for parent in ['mother', 'father']:
# If we are looking at individual with parents
if ind[parent] != '0':
# Check if the child/parent pair is in peddy data
for pair in ped_check:
if (ind_id in pair and ind[parent] in pair):
# If there is a parent error we mark that
if ped_check[pair]['parent_error']:
analysis_inds[ind[parent]]['confirmed_parent'] = False
else:
# Else if parent confirmation has not been done
if 'confirmed_parent' not in analysis_inds[ind[parent]]:
# Set confirmatio to True
analysis_inds[ind[parent]]['confirmed_parent'] = True
|
Parse individual information
|
def parse_individual(sample):
"""Parse individual information
Args:
sample (dict)
Returns:
{
'individual_id': str,
'father': str,
'mother': str,
'display_name': str,
'sex': str,
'phenotype': str,
'bam_file': str,
'vcf2cytosure': str,
'analysis_type': str,
'capture_kits': list(str),
}
"""
ind_info = {}
if 'sample_id' not in sample:
raise PedigreeError("One sample is missing 'sample_id'")
sample_id = sample['sample_id']
# Check the sex
if 'sex' not in sample:
raise PedigreeError("Sample %s is missing 'sex'" % sample_id)
sex = sample['sex']
if sex not in REV_SEX_MAP:
log.warning("'sex' is only allowed to have values from {}"
.format(', '.join(list(REV_SEX_MAP.keys()))))
raise PedigreeError("Individual %s has wrong formated sex" % sample_id)
# Check the phenotype
if 'phenotype' not in sample:
raise PedigreeError("Sample %s is missing 'phenotype'"
% sample_id)
phenotype = sample['phenotype']
if phenotype not in REV_PHENOTYPE_MAP:
log.warning("'phenotype' is only allowed to have values from {}"
.format(', '.join(list(REV_PHENOTYPE_MAP.keys()))))
raise PedigreeError("Individual %s has wrong formated phenotype" % sample_id)
ind_info['individual_id'] = sample_id
ind_info['display_name'] = sample.get('sample_name', sample['sample_id'])
ind_info['sex'] = sex
ind_info['phenotype'] = phenotype
ind_info['father'] = sample.get('father')
ind_info['mother'] = sample.get('mother')
ind_info['confirmed_parent'] = sample.get('confirmed_parent')
ind_info['confirmed_sex'] = sample.get('confirmed_sex')
ind_info['predicted_ancestry'] = sample.get('predicted_ancestry')
bam_file = sample.get('bam_path')
if bam_file:
ind_info['bam_file'] = bam_file
mt_bam = sample.get('mt_bam')
if mt_bam:
ind_info['mt_bam'] = mt_bam
analysis_type = sample.get('analysis_type')
if analysis_type:
ind_info['analysis_type'] = analysis_type
ind_info['capture_kits'] = ([sample.get('capture_kit')]
if 'capture_kit' in sample else [])
# Path to downloadable vcf2cytosure file
vcf2cytosure = sample.get('vcf2cytosure')
if vcf2cytosure:
ind_info['vcf2cytosure'] = vcf2cytosure
# Cancer specific values
tumor_type = sample.get('tumor_type')
if tumor_type:
ind_info['tumor_type'] = tumor_type
tumor_mutational_burden = sample.get('tmb')
if tumor_mutational_burden:
ind_info['tmb'] = tumor_mutational_burden
msi = sample.get('msi')
if msi:
ind_info['msi'] = msi
tumor_purity = sample.get('tumor_purity')
if tumor_purity:
ind_info['tumor_purity'] = tumor_purity
return ind_info
|
Parse the individual information
|
def parse_individuals(samples):
"""Parse the individual information
Reformat sample information to proper individuals
Args:
samples(list(dict))
Returns:
individuals(list(dict))
"""
individuals = []
if len(samples) == 0:
raise PedigreeError("No samples could be found")
ind_ids = set()
for sample_info in samples:
parsed_ind = parse_individual(sample_info)
individuals.append(parsed_ind)
ind_ids.add(parsed_ind['individual_id'])
# Check if relations are correct
for parsed_ind in individuals:
father = parsed_ind['father']
if (father and father != '0'):
if father not in ind_ids:
raise PedigreeError('father %s does not exist in family' % father)
mother = parsed_ind['mother']
if (mother and mother != '0'):
if mother not in ind_ids:
raise PedigreeError('mother %s does not exist in family' % mother)
return individuals
|
Parse case information from config or PED files.
|
def parse_case(config):
"""Parse case information from config or PED files.
Args:
config (dict): case config with detailed information
Returns:
dict: parsed case data
"""
if 'owner' not in config:
raise ConfigError("A case has to have a owner")
if 'family' not in config:
raise ConfigError("A case has to have a 'family'")
individuals = parse_individuals(config['samples'])
case_data = {
'owner': config['owner'],
'collaborators': [config['owner']],
'case_id': config['family'],
'display_name': config.get('family_name', config['family']),
'genome_build': config.get('human_genome_build'),
'rank_model_version': config.get('rank_model_version'),
'rank_score_threshold': config.get('rank_score_threshold', 0),
'analysis_date': config['analysis_date'],
'individuals': individuals,
'vcf_files': {
'vcf_snv': config.get('vcf_snv'),
'vcf_sv': config.get('vcf_sv'),
'vcf_str': config.get('vcf_str'),
'vcf_cancer': config.get('vcf_cancer'),
'vcf_snv_research': config.get('vcf_snv_research'),
'vcf_sv_research': config.get('vcf_sv_research'),
'vcf_cancer_research': config.get('vcf_cancer_research'),
},
'default_panels': config.get('default_gene_panels', []),
'gene_panels': config.get('gene_panels', []),
'assignee': config.get('assignee'),
'peddy_ped': config.get('peddy_ped'),
'peddy_sex': config.get('peddy_sex'),
'peddy_check': config.get('peddy_check'),
'delivery_report': config.get('delivery_report'),
'multiqc': config.get('multiqc'),
'track': config.get('track', 'rare'),
}
# add the pedigree figure, this is a xml file which is dumped in the db
if 'madeline' in config:
mad_path = Path(config['madeline'])
if not mad_path.exists():
raise ValueError("madeline path not found: {}".format(mad_path))
with mad_path.open('r') as in_handle:
case_data['madeline_info'] = in_handle.read()
if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']):
case_data['track'] = 'cancer'
return case_data
|
Parse out minimal family information from a PED file.
|
def parse_ped(ped_stream, family_type='ped'):
"""Parse out minimal family information from a PED file.
Args:
ped_stream(iterable(str))
family_type(str): Format of the pedigree information
Returns:
family_id(str), samples(list[dict])
"""
pedigree = FamilyParser(ped_stream, family_type=family_type)
if len(pedigree.families) != 1:
raise PedigreeError("Only one case per ped file is allowed")
family_id = list(pedigree.families.keys())[0]
family = pedigree.families[family_id]
samples = [{
'sample_id': ind_id,
'father': individual.father,
'mother': individual.mother,
# Convert sex to human readable
'sex': SEX_MAP[individual.sex],
'phenotype': PHENOTYPE_MAP[int(individual.phenotype)],
} for ind_id, individual in family.individuals.items()]
return family_id, samples
|
Build a evaluation object ready to be inserted to database
|
def build_evaluation(variant_specific, variant_id, user_id, user_name,
institute_id, case_id, classification, criteria):
"""Build a evaluation object ready to be inserted to database
Args:
variant_specific(str): md5 string for the specific variant
variant_id(str): md5 string for the common variant
user_id(str)
user_name(str)
institute_id(str)
case_id(str)
classification(str): The ACMG classification
criteria(list(dict)): A list of dictionaries with ACMG criterias
Returns:
evaluation_obj(dict): Correctly formatted evaluation object
"""
criteria = criteria or []
evaluation_obj = dict(
variant_specific = variant_specific,
variant_id = variant_id,
institute_id = institute_id,
case_id = case_id,
classification = classification,
user_id = user_id,
user_name = user_name,
created_at = datetime.datetime.now(),
)
criteria_objs = []
for info in criteria:
criteria_obj = {}
# This allways has to exist
# We might want to check if the term is valid here...
criteria_obj['term'] = info['term']
if 'comment' in info:
criteria_obj['comment'] = info['comment']
if 'links' in info:
criteria_obj['links'] = info['links']
criteria_objs.append(criteria_obj)
evaluation_obj['criteria'] = criteria_objs
return evaluation_obj
|
Export all mitochondrial variants for each sample of a case and write them to an excel file
|
def mt_report(context, case_id, test, outpath=None):
"""Export all mitochondrial variants for each sample of a case
and write them to an excel file
Args:
adapter(MongoAdapter)
case_id(str)
test(bool): True if the function is called for testing purposes
outpath(str): path to output file
Returns:
written_files(int): number of written or simulated files
"""
LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id))
adapter = context.obj['adapter']
query = {'chrom':'MT'}
case_obj = adapter.case(case_id=case_id)
if not case_obj:
LOG.warning('Could not find a scout case with id "{}". No report was created.'.format(case_id))
context.abort()
samples = case_obj.get('individuals')
mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants= -1, sort_key='position'))
if not mt_variants:
LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id))
context.abort()
today = datetime.datetime.now().strftime('%Y-%m-%d')
# set up outfolder
if not outpath:
outpath = str(os.getcwd())
# get document lines for each of the cases's individuals
# Write excel document for each sample in case
written_files = 0
for sample in samples:
sample_id = sample['individual_id']
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)
# set up document name
document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx'
workbook = Workbook(os.path.join(outpath,document_name))
Report_Sheet = workbook.add_worksheet()
if test and sample_lines and workbook:
written_files +=1
continue
# Write the column header
row = 0
for col,field in enumerate(MT_EXPORT_HEADER):
Report_Sheet.write(row,col,field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(sample_lines,1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(outpath,document_name)):
written_files += 1
if test:
LOG.info("Number of excel files that can be written to folder {0}: {1}".format(outpath, written_files))
else:
LOG.info("Number of excel files written to folder {0}: {1}".format(outpath, written_files))
return written_files
|
Build a genotype call Args: gt_call ( dict ) Returns: gt_obj ( dict ) gt_call = dict ( sample_id = str display_name = str genotype_call = str allele_depths = list # int read_depth = int genotype_quality = int )
|
def build_genotype(gt_call):
"""Build a genotype call
Args:
gt_call(dict)
Returns:
gt_obj(dict)
gt_call = dict(
sample_id = str,
display_name = str,
genotype_call = str,
allele_depths = list, # int
read_depth = int,
genotype_quality = int,
)
"""
gt_obj = dict(
sample_id = gt_call['individual_id'],
display_name = gt_call['display_name'],
genotype_call = gt_call['genotype_call'],
allele_depths = [gt_call['ref_depth'], gt_call['alt_depth']],
read_depth = gt_call['read_depth'],
genotype_quality = gt_call['genotype_quality']
)
return gt_obj
|
Check if the criterias for Pathogenic is fullfilled
|
def is_pathogenic(pvs, ps_terms, pm_terms, pp_terms):
"""Check if the criterias for Pathogenic is fullfilled
The following are descriptions of Pathogenic clasification from ACMG paper:
Pathogenic
(i) 1 Very strong (PVS1) AND
(a) ≥1 Strong (PS1–PS4) OR
(b) ≥2 Moderate (PM1–PM6) OR
(c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR
(d) ≥2 Supporting (PP1–PP5)
(ii) ≥2 Strong (PS1–PS4) OR
(iii) 1 Strong (PS1–PS4) AND
(a)≥3 Moderate (PM1–PM6) OR
(b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR
(c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Pathogenic level
"""
if pvs:
# Pathogenic (i)(a):
if ps_terms:
return True
if pm_terms:
# Pathogenic (i)(c):
if pp_terms:
return True
# Pathogenic (i)(b):
if len(pm_terms) >= 2:
return True
# Pathogenic (i)(d):
if len(pp_terms) >= 2:
return True
if ps_terms:
# Pathogenic (ii):
if len(ps_terms) >= 2:
return True
# Pathogenic (iii)(a):
if pm_terms:
if len(pm_terms) >= 3:
return True
elif len(pm_terms) >= 2:
if len(pp_terms) >= 2:
return True
elif len(pp_terms) >= 4:
return True
return False
|
Check if the criterias for Likely Pathogenic is fullfilled
|
def is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms):
"""Check if the criterias for Likely Pathogenic is fullfilled
The following are descriptions of Likely Pathogenic clasification from ACMG paper:
Likely pathogenic
(i) 1 Very strong (PVS1) AND 1 moderate (PM1– PM6) OR
(ii) 1 Strong (PS1–PS4) AND 1–2 moderate (PM1–PM6) OR
(iii) 1 Strong (PS1–PS4) AND ≥2 supporting (PP1–PP5) OR
(iv) ≥3 Moderate (PM1–PM6) OR
(v) 2 Moderate (PM1–PM6) AND ≥2 supporting (PP1–PP5) OR
(vi) 1 Moderate (PM1–PM6) AND ≥4 supportin (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Likely Pathogenic level
"""
if pvs:
# Likely Pathogenic (i):
if pm_terms:
return True
if ps_terms:
# Likely Pathogenic (ii):
if pm_terms:
return True
# Likely Pathogenic (iii):
if len(pp_terms) >= 2:
return True
if pm_terms:
# Likely Pathogenic (iv):
if len(pm_terms) >= 3:
return True
# Likely Pathogenic (v):
elif len(pm_terms) >= 2:
if len(pp_terms) >= 2:
return True
# Likely Pathogenic (vi):
elif len(pp_terms) >= 4:
return True
return False
|
Check if criterias for Likely Benign are fullfilled
|
def is_likely_benign(bs_terms, bp_terms):
"""Check if criterias for Likely Benign are fullfilled
The following are descriptions of Likely Benign clasification from ACMG paper:
Likely Benign
(i) 1 Strong (BS1–BS4) and 1 supporting (BP1– BP7) OR
(ii) ≥2 Supporting (BP1–BP7)
Args:
bs_terms(list(str)): Terms that indicate strong evidence for benign variant
bp_terms(list(str)): Terms that indicate supporting evidence for benign variant
Returns:
bool: if classification indicates Benign level
"""
if bs_terms:
# Likely Benign (i)
if bp_terms:
return True
# Likely Benign (ii)
if len(bp_terms) >= 2:
return True
return False
|
Use the algorithm described in ACMG paper to get a ACMG calssification
|
def get_acmg(acmg_terms):
"""Use the algorithm described in ACMG paper to get a ACMG calssification
Args:
acmg_terms(set(str)): A collection of prediction terms
Returns:
prediction(int):
0 - Uncertain Significanse
1 - Benign
2 - Likely Benign
3 - Likely Pathogenic
4 - Pathogenic
"""
prediction = 'uncertain_significance'
# This variable indicates if Pathogenecity Very Strong exists
pvs = False
# Collection of terms with Pathogenecity Strong
ps_terms = []
# Collection of terms with Pathogenecity moderate
pm_terms = []
# Collection of terms with Pathogenecity supporting
pp_terms = []
# This variable indicates if Benign impact stand-alone exists
ba = False
# Collection of terms with Benign evidence Strong
bs_terms = []
# Collection of terms with supporting Benign evidence
bp_terms = []
for term in acmg_terms:
if term.startswith('PVS'):
pvs = True
elif term.startswith('PS'):
ps_terms.append(term)
elif term.startswith('PM'):
pm_terms.append(term)
elif term.startswith('PP'):
pp_terms.append(term)
elif term.startswith('BA'):
ba = True
elif term.startswith('BS'):
bs_terms.append(term)
elif term.startswith('BP'):
bp_terms.append(term)
# We need to start by checking for Pathogenecity
pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
benign = is_benign(ba, bs_terms)
likely_benign = is_likely_benign(bs_terms, bp_terms)
if (pathogenic or likely_pathogenic):
if (benign or likely_benign):
prediction = 'uncertain_significance'
elif pathogenic:
prediction = 'pathogenic'
else:
prediction = 'likely_pathogenic'
else:
if benign:
prediction = 'benign'
if likely_benign:
prediction = 'likely_benign'
return prediction
|
Add extra information about genes from gene panels
|
def add_gene_info(self, variant_obj, gene_panels=None):
"""Add extra information about genes from gene panels
Args:
variant_obj(dict): A variant from the database
gene_panels(list(dict)): List of panels from database
"""
gene_panels = gene_panels or []
# Add a variable that checks if there are any refseq transcripts
variant_obj['has_refseq'] = False
# We need to check if there are any additional information in the gene panels
# extra_info will hold information from gene panels
# Collect all extra info from the panels in a dictionary with hgnc_id as keys
extra_info = {}
for panel_obj in gene_panels:
for gene_info in panel_obj['genes']:
hgnc_id = gene_info['hgnc_id']
if hgnc_id not in extra_info:
extra_info[hgnc_id] = []
extra_info[hgnc_id].append(gene_info)
# Loop over the genes in the variant object to add information
# from hgnc_genes and panel genes to the variant object
for variant_gene in variant_obj.get('genes', []):
hgnc_id = variant_gene['hgnc_id']
# Get the hgnc_gene
hgnc_gene = self.hgnc_gene(hgnc_id)
if not hgnc_gene:
continue
# Create a dictionary with transcripts information
# Use ensembl transcript id as keys
transcripts_dict = {}
# Add transcript information from the hgnc gene
for transcript in hgnc_gene.get('transcripts', []):
tx_id = transcript['ensembl_transcript_id']
transcripts_dict[tx_id] = transcript
# Add the transcripts to the gene object
hgnc_gene['transcripts_dict'] = transcripts_dict
if hgnc_gene.get('incomplete_penetrance'):
variant_gene['omim_penetrance'] = True
############# PANEL SPECIFIC INFORMATION #############
# Panels can have extra information about genes and transcripts
panel_info = extra_info.get(hgnc_id, [])
# Manually annotated disease associated transcripts
disease_associated = set()
# We need to strip the version to compare against others
disease_associated_no_version = set()
manual_penetrance = False
mosaicism = False
manual_inheritance = set()
# We need to loop since there can be information from multiple panels
for gene_info in panel_info:
# Check if there are manually annotated disease transcripts
for tx in gene_info.get('disease_associated_transcripts', []):
# We remove the version of transcript at this stage
stripped = re.sub(r'\.[0-9]', '', tx)
disease_associated_no_version.add(stripped)
disease_associated.add(tx)
if gene_info.get('reduced_penetrance'):
manual_penetrance = True
if gene_info.get('mosaicism'):
mosaicism = True
manual_inheritance.update(gene_info.get('inheritance_models', []))
variant_gene['disease_associated_transcripts'] = list(disease_associated)
variant_gene['manual_penetrance'] = manual_penetrance
variant_gene['mosaicism'] = mosaicism
variant_gene['manual_inheritance'] = list(manual_inheritance)
# Now add the information from hgnc and panels
# to the transcripts on the variant
# First loop over the variants transcripts
for transcript in variant_gene.get('transcripts', []):
tx_id = transcript['transcript_id']
if not tx_id in transcripts_dict:
continue
# This is the common information about the transcript
hgnc_transcript = transcripts_dict[tx_id]
# Check in the common information if it is a primary transcript
if hgnc_transcript.get('is_primary'):
transcript['is_primary'] = True
# If the transcript has a ref seq identifier we add that
# to the variants transcript
if not hgnc_transcript.get('refseq_id'):
continue
refseq_id = hgnc_transcript['refseq_id']
transcript['refseq_id'] = refseq_id
variant_obj['has_refseq'] = True
# Check if the refseq id are disease associated
if refseq_id in disease_associated_no_version:
transcript['is_disease_associated'] = True
# Since a ensemble transcript can have multiple refseq identifiers we add all of
# those
transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[])
variant_gene['common'] = hgnc_gene
# Add the associated disease terms
variant_gene['disease_terms'] = self.disease_terms(hgnc_id)
return variant_obj
|
Returns variants specified in question for a specific case.
|
def variants(self, case_id, query=None, variant_ids=None, category='snv',
nr_of_variants=10, skip=0, sort_key='variant_rank'):
"""Returns variants specified in question for a specific case.
If skip not equal to 0 skip the first n variants.
Arguments:
case_id(str): A string that represents the case
query(dict): A dictionary with querys for the database
variant_ids(List[str])
category(str): 'sv', 'str', 'snv' or 'cancer'
nr_of_variants(int): if -1 return all variants
skip(int): How many variants to skip
sort_key: ['variant_rank', 'rank_score', 'position']
Yields:
result(Iterable[Variant])
"""
LOG.debug("Fetching variants from {0}".format(case_id))
if variant_ids:
nr_of_variants = len(variant_ids)
elif nr_of_variants == -1:
nr_of_variants = 0 # This will return all variants
else:
nr_of_variants = skip + nr_of_variants
mongo_query = self.build_query(case_id, query=query,
variant_ids=variant_ids,
category=category)
sorting = []
if sort_key == 'variant_rank':
sorting = [('variant_rank', pymongo.ASCENDING)]
if sort_key == 'rank_score':
sorting = [('rank_score', pymongo.DESCENDING)]
if sort_key == 'position':
sorting = [('position', pymongo.ASCENDING)]
result = self.variant_collection.find(
mongo_query,
skip=skip,
limit=nr_of_variants
).sort(sorting)
return result
|
Return all variants with sanger information
|
def sanger_variants(self, institute_id=None, case_id=None):
"""Return all variants with sanger information
Args:
institute_id(str)
case_id(str)
Returns:
res(pymongo.Cursor): A Cursor with all variants with sanger activity
"""
query = {'validation': {'$exists': True}}
if institute_id:
query['institute_id'] = institute_id
if case_id:
query['case_id'] = case_id
return self.variant_collection.find(query)
|
Returns the specified variant.
|
def variant(self, document_id, gene_panels=None, case_id=None):
"""Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
"""
query = {}
if case_id:
# search for a variant in a case
query['case_id'] = case_id
query['variant_id'] = document_id
else:
# search with a unique id
query['_id'] = document_id
variant_obj = self.variant_collection.find_one(query)
if variant_obj:
variant_obj = self.add_gene_info(variant_obj, gene_panels)
if variant_obj['chromosome'] in ['X', 'Y']:
## TODO add the build here
variant_obj['is_par'] = is_par(variant_obj['chromosome'],
variant_obj['position'])
return variant_obj
|
Return all variants seen in a given gene.
|
def gene_variants(self, query=None,
category='snv', variant_type=['clinical'],
nr_of_variants=50, skip=0):
"""Return all variants seen in a given gene.
If skip not equal to 0 skip the first n variants.
Arguments:
query(dict): A dictionary with querys for the database, including
variant_type: 'clinical', 'research'
category(str): 'sv', 'str', 'snv' or 'cancer'
nr_of_variants(int): if -1 return all variants
skip(int): How many variants to skip
"""
mongo_variant_query = self.build_variant_query(query=query,
category=category, variant_type=variant_type)
sorting = [('rank_score', pymongo.DESCENDING)]
if nr_of_variants == -1:
nr_of_variants = 0 # This will return all variants
else:
nr_of_variants = skip + nr_of_variants
result = self.variant_collection.find(
mongo_variant_query
).sort(sorting).skip(skip).limit(nr_of_variants)
return result
|
Return all verified variants for a given institute
|
def verified(self, institute_id):
"""Return all verified variants for a given institute
Args:
institute_id(str): institute id
Returns:
res(list): a list with validated variants
"""
query = {
'verb' : 'validate',
'institute' : institute_id,
}
res = []
validate_events = self.event_collection.find(query)
for validated in list(validate_events):
case_id = validated['case']
var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])
case_obj = self.case(case_id=case_id)
if not case_obj or not var_obj:
continue # Take into account that stuff might have been removed from database
var_obj['case_obj'] = {
'display_name' : case_obj['display_name'],
'individuals' : case_obj['individuals']
}
res.append(var_obj)
return res
|
Return all causative variants for an institute
|
def get_causatives(self, institute_id, case_id=None):
"""Return all causative variants for an institute
Args:
institute_id(str)
case_id(str)
Yields:
str: variant document id
"""
causatives = []
if case_id:
case_obj = self.case_collection.find_one(
{"_id": case_id}
)
causatives = [causative for causative in case_obj['causatives']]
elif institute_id:
query = self.case_collection.aggregate([
{'$match': {'collaborators': institute_id, 'causatives': {'$exists': True}}},
{'$unwind': '$causatives'},
{'$group': {'_id': '$causatives'}}
])
causatives = [item['_id'] for item in query]
return causatives
|
Check if there are any variants that are previously marked causative
|
def check_causatives(self, case_obj=None, institute_obj=None):
"""Check if there are any variants that are previously marked causative
Loop through all variants that are marked 'causative' for an
institute and check if any of the variants are present in the
current case.
Args:
case_obj (dict): A Case object
institute_obj (dict): check across the whole institute
Returns:
causatives(iterable(Variant))
"""
institute_id = case_obj['owner'] if case_obj else institute_obj['_id']
institute_causative_variant_ids = self.get_causatives(institute_id)
if len(institute_causative_variant_ids) == 0:
return []
if case_obj:
# exclude variants that are marked causative in "case_obj"
case_causative_ids = set(case_obj.get('causatives', []))
institute_causative_variant_ids = list(
set(institute_causative_variant_ids).difference(case_causative_ids)
)
# convert from unique ids to general "variant_id"
query = self.variant_collection.find(
{'_id': {'$in': institute_causative_variant_ids}},
{'variant_id': 1}
)
positional_variant_ids = [item['variant_id'] for item in query]
filters = {'variant_id': {'$in': positional_variant_ids}}
if case_obj:
filters['case_id'] = case_obj['_id']
else:
filters['institute'] = institute_obj['_id']
return self.variant_collection.find(filters)
|
Find the same variant in other cases marked causative.
|
def other_causatives(self, case_obj, variant_obj):
"""Find the same variant in other cases marked causative.
Args:
case_obj(dict)
variant_obj(dict)
Yields:
other_variant(dict)
"""
# variant id without "*_[variant_type]"
variant_id = variant_obj['display_name'].rsplit('_', 1)[0]
institute_causatives = self.get_causatives(variant_obj['institute'])
for causative_id in institute_causatives:
other_variant = self.variant(causative_id)
if not other_variant:
continue
not_same_case = other_variant['case_id'] != case_obj['_id']
same_variant = other_variant['display_name'].startswith(variant_id)
if not_same_case and same_variant:
yield other_variant
|
Delete variants of one type for a case
|
def delete_variants(self, case_id, variant_type, category=None):
"""Delete variants of one type for a case
This is used when a case is reanalyzed
Args:
case_id(str): The case id
variant_type(str): 'research' or 'clinical'
category(str): 'snv', 'sv' or 'cancer'
"""
category = category or ''
LOG.info("Deleting old {0} {1} variants for case {2}".format(
variant_type, category, case_id))
query = {'case_id': case_id, 'variant_type': variant_type}
if category:
query['category'] = category
result = self.variant_collection.delete_many(query)
LOG.info("{0} variants deleted".format(result.deleted_count))
|
Return overlapping variants.
|
def overlapping(self, variant_obj):
"""Return overlapping variants.
Look at the genes that a variant overlaps to.
Then return all variants that overlap these genes.
If variant_obj is sv it will return the overlapping snvs and oposite
There is a problem when SVs are huge since there are to many overlapping variants.
Args:
variant_obj(dict)
Returns:
variants(iterable(dict))
"""
#This is the category of the variants that we want to collect
category = 'snv' if variant_obj['category'] == 'sv' else 'sv'
query = {
'$and': [
{'case_id': variant_obj['case_id']},
{'category': category},
{'hgnc_ids' : { '$in' : variant_obj['hgnc_ids']}}
]
}
sort_key = [('rank_score', pymongo.DESCENDING)]
# We collect the 30 most severe overlapping variants
variants = self.variant_collection.find(query).sort(sort_key).limit(30)
return variants
|
Returns variants that has been evaluated
|
def evaluated_variants(self, case_id):
"""Returns variants that has been evaluated
Return all variants, snvs/indels and svs from case case_id
which have a entry for 'acmg_classification', 'manual_rank', 'dismiss_variant'
or if they are commented.
Args:
case_id(str)
Returns:
variants(iterable(Variant))
"""
# Get all variants that have been evaluated in some way for a case
query = {
'$and': [
{'case_id': case_id},
{
'$or': [
{'acmg_classification': {'$exists': True}},
{'manual_rank': {'$exists': True}},
{'dismiss_variant': {'$exists': True}},
]
}
],
}
# Collect the result in a dictionary
variants = {}
for var in self.variant_collection.find(query):
variants[var['variant_id']] = self.add_gene_info(var)
# Collect all variant comments from the case
event_query = {
'$and': [
{'case': case_id},
{'category': 'variant'},
{'verb': 'comment'},
]
}
# Get all variantids for commented variants
comment_variants = {event['variant_id'] for event in self.event_collection.find(event_query)}
# Get the variant objects for commented variants, if they exist
for var_id in comment_variants:
# Skip if we already added the variant
if var_id in variants:
continue
# Get the variant with variant_id (not _id!)
variant_obj = self.variant(var_id, case_id=case_id)
# There could be cases with comments that refers to non existing variants
# if a case has been reanalysed
if not variant_obj:
continue
variant_obj['is_commented'] = True
variants[var_id] = variant_obj
# Return a list with the variant objects
return variants.values()
|
Produce a reduced vcf with variants from the specified coordinates This is used for the alignment viewer.
|
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None,
gene_obj=None, variant_type='clinical', category='snv',
rank_threshold=None):
"""Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
"""
rank_threshold = rank_threshold or -100
variant_file = None
if variant_type == 'clinical':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv')
elif category == 'str':
variant_file = case_obj['vcf_files'].get('vcf_str')
elif variant_type == 'research':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv_research')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv_research')
if not variant_file:
raise SyntaxError("Vcf file does not seem to exist")
vcf_obj = VCF(variant_file)
region = ""
if gene_obj:
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
if chrom:
if (start and end):
region = "{0}:{1}-{2}".format(chrom, start, end)
else:
region = "{0}".format(chrom)
else:
rank_threshold = rank_threshold or 5
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
file_name = str(pathlib.Path(temp.name))
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) > 3:
temp.write(header_line + '\n')
for variant in vcf_obj(region):
temp.write(str(variant))
return file_name
|
Given a list of variants get variant objects found in a specific patient
|
def sample_variants(self, variants, sample_name, category = 'snv'):
"""Given a list of variants get variant objects found in a specific patient
Args:
variants(list): a list of variant ids
sample_name(str): a sample display name
category(str): 'snv', 'sv' ..
Returns:
result(iterable(Variant))
"""
LOG.info('Retrieving variants for subject : {0}'.format(sample_name))
has_allele = re.compile('1|2') # a non wild-type allele is called at least once in this sample
query = {
'$and': [
{'_id' : { '$in' : variants}},
{'category' : category},
{'samples': {
'$elemMatch': { 'display_name' : sample_name, 'genotype_call': { '$regex' : has_allele } }
}}
]
}
result = self.variant_collection.find(query)
return result
|
Get a client to the mongo database
|
def get_connection(host='localhost', port=27017, username=None, password=None,
uri=None, mongodb=None, authdb=None, timeout=20, *args, **kwargs):
"""Get a client to the mongo database
host(str): Host of database
port(int): Port of database
username(str)
password(str)
uri(str)
authdb (str): database to use for authentication
timeout(int): How long should the client try to connect
"""
authdb = authdb or mongodb
if uri is None:
if username and password:
uri = ("mongodb://{}:{}@{}:{}/{}"
.format(quote_plus(username), quote_plus(password), host, port, authdb))
log_uri = ("mongodb://{}:****@{}:{}/{}"
.format(quote_plus(username), host, port, authdb))
else:
log_uri = uri = "mongodb://%s:%s" % (host, port)
LOG.info("Try to connect to %s" % log_uri)
try:
client = MongoClient(uri, serverSelectionTimeoutMS=timeout)
except ServerSelectionTimeoutError as err:
LOG.warning("Connection Refused")
raise ConnectionFailure
LOG.info("Connection established")
return client
|
Creates a list of submission objects ( variant and case - data ) from the clinvar submission form in blueprints/ variants/ clinvar. html.
|
def set_submission_objects(form_fields):
"""Creates a list of submission objects (variant and case-data) from the clinvar submission form in blueprints/variants/clinvar.html.
Args:
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
Returns:
submission_objects(list): a list of variant and case-data submission objects, ready to be included in the clinvar database collection
"""
variant_ids = get_submission_variants(form_fields) # A list of variant IDs present in the submitted form
# Extract list of variant objects to be submitted
variant_objs = get_objects_from_form(variant_ids, form_fields, 'variant')
# Extract list of casedata objects to be submitted
casedata_objs = get_objects_from_form(variant_ids, form_fields, 'casedata')
return (variant_objs, casedata_objs)
|
Extract the objects to be saved in the clinvar database collection. object_type param specifies if these objects are variant or casedata objects
|
def get_objects_from_form(variant_ids, form_fields, object_type):
"""Extract the objects to be saved in the clinvar database collection.
object_type param specifies if these objects are variant or casedata objects
Args:
variant_ids(list): list of database variant ids
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
object_type(str): either 'variant' or 'case_data'
Returns:
submission_objects(list): list of submission objects of either type 'variant' or 'casedata'
"""
submission_fields = []
if object_type == 'variant':
submission_fields = CLINVAR_HEADER
else: #collect casedata objects
submission_fields = CASEDATA_HEADER
# A list of objects (variants of casedata info) to be saved into clinvar database collection
submission_objects = []
# Loop over the form fields and collect the data:
for variant_id in variant_ids: # loop over the variants
subm_obj = {} # A new submission object for each
# Don't included casedata for a variant unless specified by user
if object_type == 'casedata' and 'casedata_'+variant_id not in form_fields:
continue
subm_obj['csv_type'] = object_type
subm_obj['case_id'] = form_fields.get('case_id')
subm_obj['category'] = form_fields.get('category@'+variant_id)
for key, values in submission_fields.items(): # loop over the form info fields
field_value = form_fields.get(key+'@'+variant_id)
if field_value and not field_value == '-':
if key == 'ref_seq': # split this field into
refseq_raw = field_value.split('|')
subm_obj['ref_seq'] = refseq_raw[0]
subm_obj['hgvs'] = refseq_raw[1]
else:
subm_obj[key] = field_value
# Create a unique ID for the database
# For casedata : = caseID_sampleID_variantID
# For variants : ID = caseID_variantID
if object_type == 'casedata':
subm_obj['_id'] = str(subm_obj['case_id']) + '_' + variant_id + '_' + str(subm_obj['individual_id'])
else:
subm_obj['_id'] = str(subm_obj['case_id']) + '_' + variant_id
submission_objects.append(subm_obj)
return submission_objects
|
Extracts a list of variant ids from the clinvar submission form in blueprints/ variants/ clinvar. html ( creation of a new clinvar submission ).
|
def get_submission_variants(form_fields):
"""Extracts a list of variant ids from the clinvar submission form in blueprints/variants/clinvar.html (creation of a new clinvar submission).
Args:
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
Returns:
clinvars: A list of variant IDs
"""
clinvars = []
# if the html checkbox named 'all_vars' is checked in the html form, then all pinned variants from a case should be included in the clinvar submission file,
# otherwise just the selected one.
if 'all_vars' in form_fields:
for field, value in form_fields.items():
if field.startswith('local_id'):
clinvars.append(form_fields[field].replace('local_id@',''))
else:
clinvars = [form_fields['main_var']] #also a list, but has one element
return clinvars
|
Determine which fields to include in csv header by checking a list of submission objects
|
def clinvar_submission_header(submission_objs, csv_type):
"""Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER
"""
complete_header = {} # header containing all available fields
custom_header = {} # header reflecting the real data included in the submission objects
if csv_type == 'variant_data' :
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for header_key, header_value in complete_header.items(): # loop over the info fields provided in each submission object
for clinvar_obj in submission_objs: # loop over the submission objects
for key, value in clinvar_obj.items(): # loop over the keys and values of the clinvar objects
if not header_key in custom_header and header_key == key: # add to custom header if missing and specified in submission object
custom_header[header_key] = header_value
return custom_header
|
Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
|
def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"'+submission_obj.get(header_key)+'"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(','.join(csv_line))
return submission_lines
|
Load all the transcripts
|
def load_transcripts(adapter, transcripts_lines=None, build='37', ensembl_genes=None):
"""Load all the transcripts
Transcript information is from ensembl.
Args:
adapter(MongoAdapter)
transcripts_lines(iterable): iterable with ensembl transcript lines
build(str)
ensembl_genes(dict): Map from ensembl_id -> HgncGene
Returns:
transcript_objs(list): A list with all transcript objects
"""
# Fetch all genes with ensemblid as keys
ensembl_genes = ensembl_genes or adapter.ensembl_genes(build)
if transcripts_lines is None:
transcripts_lines = fetch_ensembl_transcripts(build=build)
# Map with all transcripts enstid -> parsed transcript
transcripts_dict = parse_transcripts(transcripts_lines)
for ens_tx_id in list(transcripts_dict):
parsed_tx = transcripts_dict[ens_tx_id]
# Get the ens gene id
ens_gene_id = parsed_tx['ensembl_gene_id']
# Fetch the internal gene object to find out the correct hgnc id
gene_obj = ensembl_genes.get(ens_gene_id)
# If the gene is non existing in scout we skip the transcript
if not gene_obj:
transcripts_dict.pop(ens_tx_id)
LOG.debug("Gene %s does not exist in build %s", ens_gene_id, build)
continue
# Add the correct hgnc id
parsed_tx['hgnc_id'] = gene_obj['hgnc_id']
# Primary transcript information is collected from HGNC
parsed_tx['primary_transcripts'] = set(gene_obj.get('primary_transcripts', []))
ref_seq_transcripts = 0
nr_primary_transcripts = 0
nr_transcripts = len(transcripts_dict)
transcript_objs = []
with progressbar(transcripts_dict.values(), label="Building transcripts", length=nr_transcripts) as bar:
for tx_data in bar:
#################### Get the correct refseq identifier ####################
# We need to decide one refseq identifier for each transcript, if there are any to
# choose from. The algorithm is as follows:
# If there is ONE mrna this is choosen
# If there are several mrna the one that is in 'primary_transcripts' is choosen
# Else one is choosen at random
# The same follows for the other categories where nc_rna has precedense over mrna_predicted
# We will store all refseq identifiers in a "refseq_identifiers" list as well
tx_data['is_primary'] = False
primary_transcripts = tx_data['primary_transcripts']
refseq_identifier = None
refseq_identifiers = []
for category in TRANSCRIPT_CATEGORIES:
identifiers = tx_data[category]
if not identifiers:
continue
for refseq_id in identifiers:
# Add all refseq identifiers to refseq_identifiers
refseq_identifiers.append(refseq_id)
ref_seq_transcripts += 1
if refseq_id in primary_transcripts:
refseq_identifier = refseq_id
tx_data['is_primary'] = True
nr_primary_transcripts += 1
if not refseq_identifier:
refseq_identifier = refseq_id
if refseq_identifier:
tx_data['refseq_id'] = refseq_identifier
if refseq_identifiers:
tx_data['refseq_identifiers'] = refseq_identifiers
#################### #################### ####################
# Build the transcript object
tx_obj = build_transcript(tx_data, build)
transcript_objs.append(tx_obj)
# Load all transcripts
LOG.info("Loading transcripts...")
if len(transcript_objs) > 0:
adapter.load_transcript_bulk(transcript_objs)
LOG.info('Number of transcripts in build %s: %s', build, nr_transcripts)
LOG.info('Number of transcripts with refseq identifier: %s', ref_seq_transcripts)
LOG.info('Number of primary transcripts: %s', nr_primary_transcripts)
return transcript_objs
|
Add a gene panel to the database.
|
def panel(context, path, date, display_name, version, panel_type, panel_id, institute, omim, api_key, panel_app):
"""Add a gene panel to the database."""
adapter = context.obj['adapter']
institute = institute or 'cust000'
if omim:
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
context.abort()
#Check if OMIM-AUTO exists
if adapter.gene_panel(panel_id='OMIM-AUTO'):
LOG.warning("OMIM-AUTO already exists in database")
LOG.info("To create a new version use scout update omim")
return
# Here we know that there is no panel loaded
try:
adapter.load_omim_panel(api_key, institute=institute)
except Exception as err:
LOG.error(err)
context.abort()
if panel_app:
# try:
load_panel_app(adapter, panel_id, institute=institute)
# except Exception as err:
# LOG.warning(err)
# context.abort()
if (omim or panel_app):
return
if path is None:
LOG.info("Please provide a panel")
return
try:
load_panel(path, adapter, date, display_name, version, panel_type, panel_id, institute)
except Exception as err:
LOG.warning(err)
context.abort()
|
Build a Exon object object
|
def build_exon(exon_info, build='37'):
"""Build a Exon object object
Args:
exon_info(dict): Exon information
Returns:
exon_obj(Exon)
"exon_id": str, # str(chrom-start-end)
"chrom": str,
"start": int,
"end": int,
"transcript": str, # ENST ID
"hgnc_id": int, # HGNC_id
"rank": int, # Order of exon in transcript
"build": str, # Genome build
"""
try:
chrom = exon_info['chrom']
except KeyError:
raise KeyError("Exons has to have a chromosome")
try:
start = int(exon_info['start'])
except KeyError:
raise KeyError("Exon has to have a start")
except TypeError:
raise TypeError("Exon start has to be integer")
try:
end = int(exon_info['end'])
except KeyError:
raise KeyError("Exon has to have a end")
except TypeError:
raise TypeError("Exon end has to be integer")
try:
rank = int(exon_info['rank'])
except KeyError:
raise KeyError("Exon has to have a rank")
except TypeError:
raise TypeError("Exon rank has to be integer")
try:
exon_id = exon_info['exon_id']
except KeyError:
raise KeyError("Exons has to have a id")
try:
transcript = exon_info['transcript']
except KeyError:
raise KeyError("Exons has to have a transcript")
try:
hgnc_id = int(exon_info['hgnc_id'])
except KeyError:
raise KeyError("Exons has to have a hgnc_id")
except TypeError:
raise TypeError("hgnc_id has to be integer")
exon_obj = Exon(
exon_id = exon_id,
chrom = chrom,
start = start,
end = end,
rank = rank,
transcript = transcript,
hgnc_id = hgnc_id,
build = build,
)
return exon_obj
|
Delete a version of a gene panel or all versions of a gene panel
|
def panel(context, panel_id, version):
"""Delete a version of a gene panel or all versions of a gene panel"""
LOG.info("Running scout delete panel")
adapter = context.obj['adapter']
panel_objs = adapter.gene_panels(panel_id=panel_id, version=version)
if panel_objs.count() == 0:
LOG.info("No panels found")
for panel_obj in panel_objs:
adapter.delete_panel(panel_obj)
|
Delete all indexes in the database
|
def index(context):
"""Delete all indexes in the database"""
LOG.info("Running scout delete index")
adapter = context.obj['adapter']
for collection in adapter.db.collection_names():
adapter.db[collection].drop_indexes()
LOG.info("All indexes deleted")
|
Delete a user from the database
|
def user(context, mail):
"""Delete a user from the database"""
LOG.info("Running scout delete user")
adapter = context.obj['adapter']
user_obj = adapter.user(mail)
if not user_obj:
LOG.warning("User {0} could not be found in database".format(mail))
else:
adapter.delete_user(mail)
|
Delete all genes in the database
|
def genes(context, build):
"""Delete all genes in the database"""
LOG.info("Running scout delete genes")
adapter = context.obj['adapter']
if build:
LOG.info("Dropping genes collection for build: %s", build)
else:
LOG.info("Dropping genes collection")
adapter.drop_genes()
|
Delete all exons in the database
|
def exons(context, build):
"""Delete all exons in the database"""
LOG.info("Running scout delete exons")
adapter = context.obj['adapter']
adapter.drop_exons(build)
|
Delete a case and it s variants from the database
|
def case(context, institute, case_id, display_name):
"""Delete a case and it's variants from the database"""
adapter = context.obj['adapter']
if not (case_id or display_name):
click.echo("Please specify what case to delete")
context.abort()
if display_name:
if not institute:
click.echo("Please specify the owner of the case that should be "
"deleted with flag '-i/--institute'.")
context.abort()
case_id = "{0}-{1}".format(institute, display_name)
LOG.info("Running deleting case {0}".format(case_id))
case = adapter.delete_case(
case_id=case_id,
institute_id=institute,
display_name=display_name
)
if case.deleted_count == 1:
adapter.delete_variants(case_id=case_id, variant_type='clinical')
adapter.delete_variants(case_id=case_id, variant_type='research')
else:
LOG.warning("Case does not exist in database")
context.abort()
|
Show all individuals from all cases in the database
|
def individuals(context, institute, causatives, case_id):
"""Show all individuals from all cases in the database"""
LOG.info("Running scout view individuals")
adapter = context.obj['adapter']
individuals = []
if case_id:
case = adapter.case(case_id=case_id)
if case:
cases = [case]
else:
LOG.info("Could not find case %s", case_id)
return
else:
cases = [case_obj for case_obj in
adapter.cases(
collaborator=institute,
has_causatives=causatives)]
if len(cases) == 0:
LOG.info("Could not find cases that match criteria")
return
individuals = (ind_obj for case_obj in cases for ind_obj in case_obj['individuals'])
click.echo("#case_id\tind_id\tdisplay_name\tsex\tphenotype\tmother\tfather")
for case in cases:
for ind_obj in case['individuals']:
ind_info = [
case['_id'], ind_obj['individual_id'],
ind_obj['display_name'], SEX_MAP[int(ind_obj['sex'])],
PHENOTYPE_MAP[ind_obj['phenotype']], ind_obj['mother'],
ind_obj['father']
]
click.echo('\t'.join(ind_info))
|
Extract all phenotype - associated terms for a case. Drawback of this method is that it returns the same phenotype terms for each affected individual of the case. Args: case_obj ( dict ): a scout case object Returns: features ( list ): a list of phenotype objects that looks like this: [ { id: HP: 0001644 label: Dilated cardiomyopathy observed: yes }... ]
|
def hpo_terms(case_obj):
"""Extract all phenotype-associated terms for a case. Drawback of this method is that
it returns the same phenotype terms for each affected individual
of the case.
Args:
case_obj(dict): a scout case object
Returns:
features(list): a list of phenotype objects that looks like this:
[
{
"id": "HP:0001644",
"label": "Dilated cardiomyopathy",
"observed": "yes"
},
...
]
"""
LOG.info('Collecting phenotype terms for case {}'.format(case_obj.get('display_name')))
features = []
case_features = case_obj.get('phenotype_terms')
if case_features:
# re-structure case features to mirror matchmaker feature fields:
for feature in case_features:
feature_obj = {
"id" : feature.get('phenotype_id'),
"label" : feature.get('feature'),
"observed" : "yes"
}
features.append(feature_obj)
return features
|
Extract all OMIM phenotypes available for the case Args: case_obj ( dict ): a scout case object Returns: disorders ( list ): a list of OMIM disorder objects
|
def omim_terms(case_obj):
"""Extract all OMIM phenotypes available for the case
Args:
case_obj(dict): a scout case object
Returns:
disorders(list): a list of OMIM disorder objects
"""
LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get('display_name')))
disorders = []
case_disorders = case_obj.get('diagnosis_phenotypes') # array of OMIM terms
if case_disorders:
for disorder in case_disorders:
disorder_obj = {
"id" : ':'.join([ 'MIM', str(disorder)])
}
disorders.append(disorder_obj)
return disorders
|
Extract and parse matchmaker - like genomic features from pinned variants of a patient Args: store ( MongoAdapter ): connection to the database case_obj ( dict ): a scout case object sample_name ( str ): sample display name genes_only ( bool ): if True only gene names will be included in genomic features
|
def genomic_features(store, case_obj, sample_name, genes_only):
"""Extract and parse matchmaker-like genomic features from pinned variants
of a patient
Args:
store(MongoAdapter) : connection to the database
case_obj(dict): a scout case object
sample_name(str): sample display name
genes_only(bool): if True only gene names will be included in genomic features
Returns:
g_features(list): a list of genomic feature objects that looks like this:
[
{
"gene": {
"id": "LIMS2"
},
"variant": {
"alternateBases": "C",
"assembly": "GRCh37",
"end": 128412081,
"referenceBases": "G",
"referenceName": "2",
"start": 128412080
},
"zygosity": 1
},
....
]
"""
g_features = []
# genome build is required
build = case_obj['genome_build']
if not build in ['37', '38']:
build = 'GRCh37'
else:
build = 'GRCh'+build
individual_pinned_snvs = list(store.sample_variants( variants=case_obj.get('suspects'),
sample_name=sample_name))
# if genes_only is True don't add duplicated genes
gene_set = set()
for var in individual_pinned_snvs:
# a variant could hit one or several genes so create a genomic feature for each of these genes
hgnc_genes = var.get('hgnc_ids')
# Looks like MatchMaker Exchange API accepts only variants that hit genes :(
if not hgnc_genes:
continue
for hgnc_id in hgnc_genes:
gene_obj = store.hgnc_gene(hgnc_id)
if not gene_obj:
continue
g_feature = {
'gene': {'id': gene_obj.get('hgnc_symbol')}
}
if genes_only and not hgnc_id in gene_set: # if only gene names should be shared
gene_set.add(hgnc_id)
g_features.append(g_feature)
continue
# if also variants should be shared:
g_feature['variant'] = {
'referenceName' : var['chromosome'],
'start' : var['position'],
'end' : var['end'],
'assembly' : build,
'referenceBases' :var['reference'],
'alternateBases' : var['alternative'],
'shareVariantLevelData' : True
}
zygosity = None
# collect zygosity for the given sample
zygosities = var['samples'] # it's a list with zygosity situation for each sample of the case
for zyg in zygosities:
if zyg.get('display_name') == sample_name: # sample of interest
zygosity = zyg['genotype_call'].count('1') + zyg['genotype_call'].count('2')
g_feature['zygosity'] = zygosity
g_features.append(g_feature)
return g_features
|
Parse a list of matchmaker matches objects and returns a readable list of matches to display in matchmaker matches view.
|
def parse_matches(patient_id, match_objs):
"""Parse a list of matchmaker matches objects and returns
a readable list of matches to display in matchmaker matches view.
Args:
patient_id(str): id of a mme patient
match_objs(list): list of match objs returned by MME server for the patient
# match_objs looks like this:
[
{
'node' : { id : node_id , label: node_label},
'patients' : [
{ 'patient': {patient1_data} },
{ 'patient': {patient2_data} },
..
]
},
..
]
Returns:
parsed_matches(list): a list of parsed match objects
"""
LOG.info('Parsing MatchMaker matches for patient {}'.format(patient_id))
parsed_matches = []
for match_obj in match_objs:
# convert match date from millisecond to readable date
milliseconds_date = match_obj['created']['$date']
mdate = datetime.datetime.fromtimestamp(milliseconds_date/1000.0)
match_type = 'external'
matching_patients = []
parsed_match = {
'match_oid' : match_obj['_id']['$oid'],# save match object ID
'match_date' : mdate
}
# if patient was used as query patient:
if match_obj['data']['patient']['id'] == patient_id:
match_results = match_obj['results'] # List of matching patients
for node_result in match_results:
if match_obj['match_type'] == 'internal':
match_type = 'internal'
for patient in node_result['patients']:
match_patient = {
'patient_id' : patient['patient']['id'],
'score' : patient['score'],
'patient' : patient['patient'],
'node' : node_result['node']
}
matching_patients.append(match_patient)
else: # else if patient was returned as a match result for another patient
m_patient = match_obj['data']['patient']
contact_institution = m_patient['contact'].get('institution')
if contact_institution and 'Scout software user' in contact_institution:
match_type = 'internal'
# loop over match results to capture score for matching
score = None
for res in match_obj['results']:
for patient in res['patients']:
LOG.info('Looping in else, patient:{}'.format(patient['patient']['id']))
if patient['patient']['id'] == patient_id:
score = patient['score']
match_patient = {
'patient_id' : m_patient['id'],
'score' : score,
'patient' : m_patient,
'node' : res['node']
}
matching_patients.append(match_patient)
parsed_match['match_type'] = match_type
parsed_match['patients'] = matching_patients
parsed_matches.append(parsed_match)
# sort results by descending score
parsed_matches = sorted(parsed_matches, key=lambda k: k['match_date'], reverse=True)
return parsed_matches
|
Display cases from the database
|
def cases(context, institute, display_name, case_id, nr_variants, variants_treshold):
"""Display cases from the database"""
LOG.info("Running scout view institutes")
adapter = context.obj['adapter']
models = []
if case_id:
case_obj = adapter.case(case_id=case_id)
if case_obj:
models.append(case_obj)
else:
models = adapter.cases(collaborator=institute, name_query=display_name)
models = [case_obj for case_obj in models]
if not models:
LOG.info("No cases could be found")
return
header = ['case_id', 'display_name', 'institute']
if variants_treshold:
LOG.info("Only show cases with more than %s variants", variants_treshold)
nr_variants = True
if nr_variants:
LOG.info("Displaying number of variants for each case")
header.append('clinical')
header.append('research')
click.echo("#"+'\t'.join(header))
for model in models:
output_str = "{:<12}\t{:<12}\t{:<12}"
output_values = [model['_id'],model['display_name'],model['owner']]
if nr_variants:
output_str += "\t{:<12}\t{:<12}"
nr_clinical = 0
nr_research = 0
variants = adapter.variant_collection.find({'case_id':model['_id']})
i = 0
for i, var in enumerate(variants, 1):
if var['variant_type'] == 'clinical':
nr_clinical += 1
else:
nr_research += 1
output_values.extend([nr_clinical, nr_research])
if variants_treshold and i < variants_treshold:
LOG.debug("Case %s had to few variants, skipping", model['_id'])
continue
click.echo(output_str.format(*output_values))
|
Returns the currently active user as an object.
|
def load_user(user_email):
"""Returns the currently active user as an object."""
user_obj = store.user(user_email)
user_inst = LoginUser(user_obj) if user_obj else None
return user_inst
|
Login a user if they have access.
|
def login():
"""Login a user if they have access."""
# store potential next param URL in the session
if 'next' in request.args:
session['next_url'] = request.args['next']
if current_app.config.get('GOOGLE'):
callback_url = url_for('.authorized', _external=True)
return google.authorize(callback=callback_url)
user_email = request.args.get('email')
user_obj = store.user(user_email)
if user_obj is None:
flash("email not whitelisted: {}".format(user_email), 'warning')
return redirect(url_for('public.index'))
return perform_login(user_obj)
|
Updates a case after a submission to MatchMaker Exchange Args: case_obj ( dict ): a scout case object user_obj ( dict ): a scout user object mme_subm_obj ( dict ): contains MME submission params and server response Returns: updated_case ( dict ): the updated scout case
|
def case_mme_update(self, case_obj, user_obj, mme_subm_obj):
"""Updates a case after a submission to MatchMaker Exchange
Args:
case_obj(dict): a scout case object
user_obj(dict): a scout user object
mme_subm_obj(dict): contains MME submission params and server response
Returns:
updated_case(dict): the updated scout case
"""
created = None
patient_ids = []
updated = datetime.now()
if 'mme_submission' in case_obj and case_obj['mme_submission']:
created = case_obj['mme_submission']['created_at']
else:
created = updated
patients = [ resp['patient'] for resp in mme_subm_obj.get('server_responses')]
subm_obj = {
'created_at' : created,
'updated_at' : updated,
'patients' : patients, # list of submitted patient data
'subm_user' : user_obj['_id'], # submitting user
'sex' : mme_subm_obj['sex'],
'features' : mme_subm_obj['features'],
'disorders' : mme_subm_obj['disorders'],
'genes_only' : mme_subm_obj['genes_only']
}
case_obj['mme_submission'] = subm_obj
updated_case = self.update_case(case_obj)
# create events for subjects add in MatchMaker for this case
institute_obj = self.institute(case_obj['owner'])
for individual in case_obj['individuals']:
if individual['phenotype'] == 2: # affected
# create event for patient
self.create_event(institute=institute_obj, case=case_obj, user=user_obj,
link='', category='case', verb='mme_add', subject=individual['display_name'],
level='specific')
return updated_case
|
Delete a MatchMaker submission from a case record and creates the related event. Args: case_obj ( dict ): a scout case object user_obj ( dict ): a scout user object Returns: updated_case ( dict ): the updated scout case
|
def case_mme_delete(self, case_obj, user_obj):
"""Delete a MatchMaker submission from a case record
and creates the related event.
Args:
case_obj(dict): a scout case object
user_obj(dict): a scout user object
Returns:
updated_case(dict): the updated scout case
"""
institute_obj = self.institute(case_obj['owner'])
# create events for subjects removal from Matchmaker this cas
for individual in case_obj['individuals']:
if individual['phenotype'] == 2: # affected
# create event for patient removal
self.create_event(institute=institute_obj, case=case_obj, user=user_obj,
link='', category='case', verb='mme_remove', subject=individual['display_name'],
level='specific')
# Reset mme_submission field for this case
case_obj['mme_submission'] = None
updated_case = self.update_case(case_obj)
return updated_case
|
Build a institute object
|
def build_institute(internal_id, display_name, sanger_recipients=None,
coverage_cutoff=None, frequency_cutoff=None):
"""Build a institute object
Args:
internal_id(str)
display_name(str)
sanger_recipients(list(str)): List with email addresses
Returns:
institute_obj(scout.models.Institute)
"""
LOG.info("Building institute %s with display name %s", internal_id,display_name)
institute_obj = Institute(
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients,
coverage_cutoff = coverage_cutoff,
frequency_cutoff = frequency_cutoff
)
for key in list(institute_obj):
if institute_obj[key] is None:
institute_obj.pop(key)
return institute_obj
|
Delete a event
|
def delete_event(self, event_id):
"""Delete a event
Arguments:
event_id (str): The database key for the event
"""
LOG.info("Deleting event{0}".format(event_id))
if not isinstance(event_id, ObjectId):
event_id = ObjectId(event_id)
self.event_collection.delete_one({'_id': event_id})
LOG.debug("Event {0} deleted".format(event_id))
|
Create a Event with the parameters given.
|
def create_event(self, institute, case, user, link, category, verb,
subject, level='specific', variant=None, content=None,
panel=None):
"""Create a Event with the parameters given.
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User
link (str): The url to be used in the event
category (str): case or variant
verb (str): What type of event
subject (str): What is operated on
level (str): 'specific' or 'global'. Default is 'specific'
variant (dict): A variant
content (str): The content of the comment
Returns:
event(dict): The inserted event
"""
variant = variant or {}
event = dict(
institute=institute['_id'],
case=case['_id'],
user_id=user['_id'],
user_name=user['name'],
link=link,
category=category,
verb=verb,
subject=subject,
level=level,
variant_id=variant.get('variant_id'),
content=content,
panel=panel,
created_at=datetime.now(),
updated_at=datetime.now(),
)
LOG.debug("Saving Event")
self.event_collection.insert_one(event)
LOG.debug("Event Saved")
return event
|
Fetch events from the database.
|
def events(self, institute, case=None, variant_id=None, level=None,
comments=False, panel=None):
"""Fetch events from the database.
Args:
institute (dict): A institute
case (dict): A case
variant_id (str, optional): global variant id
level (str, optional): restrict comments to 'specific' or 'global'
comments (bool, optional): restrict events to include only comments
panel (str): A panel name
Returns:
pymongo.Cursor: Query result
"""
query = {}
if variant_id:
if comments:
# If it's comment-related event collect global and variant-specific comment events
LOG.debug("Fetching all comments for institute {0} case {1} variant {2}".format(
institute['_id'], case['_id'], variant_id))
query = {
'$or': [
{
'category' : 'variant',
'variant_id' : variant_id,
'verb' : 'comment',
'level' : 'global'
},
{
'category' : 'variant',
'variant_id' : variant_id,
'institute' : institute['_id'],
'case' : case['_id'],
'verb' : 'comment',
'level' : 'specific'
}
]
}
else: # Collect other variant-specific events which are not comments
query['institute'] = institute['_id']
query['category'] = 'variant'
query['variant_id'] = variant_id
query['case'] = case['_id']
else:
query['institute'] = institute['_id']
if panel:
query['panel'] = panel
# If no variant_id or panel we know that it is a case level comment
else:
query['category'] = 'case'
if case:
query['case'] = case['_id']
if comments:
query['verb'] = 'comment'
return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)
|
Fetch all events by a specific user.
|
def user_events(self, user_obj=None):
"""Fetch all events by a specific user."""
query = dict(user_id=user_obj['_id']) if user_obj else dict()
return self.event_collection.find(query)
|
Add a new phenotype term to a case
|
def add_phenotype(self, institute, case, user, link, hpo_term=None,
omim_term=None, is_group=False):
"""Add a new phenotype term to a case
Create a phenotype term and event with the given information
Args:
institute (Institute): A Institute object
case (Case): Case object
user (User): A User object
link (str): The url to be used in the event
hpo_term (str): A hpo id
omim_term (str): A omim id
is_group (bool): is phenotype term a group?
"""
hpo_results = []
try:
if hpo_term:
hpo_results = [hpo_term]
elif omim_term:
LOG.debug("Fetching info for mim term {0}".format(omim_term))
disease_obj = self.disease_term(omim_term)
if disease_obj:
for hpo_term in disease_obj.get('hpo_terms', []):
hpo_results.append(hpo_term)
else:
raise ValueError('Must supply either hpo or omim term')
except ValueError as e:
## TODO Should ve raise a more proper exception here?
raise e
existing_terms = set(term['phenotype_id'] for term in
case.get('phenotype_terms', []))
updated_case = case
phenotype_terms = []
for hpo_term in hpo_results:
LOG.debug("Fetching info for hpo term {0}".format(hpo_term))
hpo_obj = self.hpo_term(hpo_term)
if hpo_obj is None:
raise ValueError("Hpo term: %s does not exist in database" % hpo_term)
phenotype_id = hpo_obj['_id']
description = hpo_obj['description']
if phenotype_id not in existing_terms:
phenotype_term = dict(phenotype_id=phenotype_id, feature=description)
phenotype_terms.append(phenotype_term)
LOG.info("Creating event for adding phenotype term for case"
" {0}".format(case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='add_phenotype',
subject=case['display_name'],
content=phenotype_id
)
if is_group:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$addToSet': {
'phenotype_terms': {'$each': phenotype_terms},
'phenotype_groups': {'$each': phenotype_terms},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$addToSet': {
'phenotype_terms': {'$each': phenotype_terms},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case
|
Remove an existing phenotype from a case
|
def remove_phenotype(self, institute, case, user, link, phenotype_id,
is_group=False):
"""Remove an existing phenotype from a case
Args:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (dict): The url to be used in the event
phenotype_id (str): A phenotype id
Returns:
updated_case(dict)
"""
LOG.info("Removing HPO term from case {0}".format(case['display_name']))
if is_group:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
'phenotype_groups': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.info("Creating event for removing phenotype term {0}" \
" from case {1}".format(phenotype_id, case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='remove_phenotype',
subject=case['display_name']
)
LOG.debug("Case updated")
return updated_case
|
Add a comment to a variant or a case.
|
def comment(self, institute, case, user, link, variant=None,
content="", comment_level="specific"):
"""Add a comment to a variant or a case.
This function will create an Event to log that a user have commented on
a variant. If a variant id is given it will be a variant comment.
A variant comment can be 'global' or specific. The global comments will
be shown for this variation in all cases while the specific comments
will only be shown for a specific case.
Arguments:
institute (dict): A Institute object
case (dict): A Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
content (str): The content of the comment
comment_level (str): Any one of 'specific' or 'global'.
Default is 'specific'
Return:
comment(dict): The comment event that was inserted
"""
if not comment_level in COMMENT_LEVELS:
raise SyntaxError("Comment levels can only be in {}".format(','.join(COMMENT_LEVELS)))
if variant:
LOG.info("Creating event for a {0} comment on variant {1}".format(
comment_level, variant['display_name']))
comment = self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='comment',
level=comment_level,
variant=variant,
subject=variant['display_name'],
content=content
)
else:
LOG.info("Creating event for a comment on case {0}".format(
case['display_name']))
comment = self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='comment',
subject=case['display_name'],
content=content
)
return comment
|
Parse the genotype calls for a variant
|
def parse_genotypes(variant, individuals, individual_positions):
"""Parse the genotype calls for a variant
Args:
variant(cyvcf2.Variant)
individuals: List[dict]
individual_positions(dict)
Returns:
genotypes(list(dict)): A list of genotypes
"""
genotypes = []
for ind in individuals:
pos = individual_positions[ind['individual_id']]
genotypes.append(parse_genotype(variant, ind, pos))
return genotypes
|
Get the genotype information in the proper format
|
def parse_genotype(variant, ind, pos):
"""Get the genotype information in the proper format
Sv specific format fields:
##FORMAT=<ID=DV,Number=1,Type=Integer,
Description="Number of paired-ends that support the event">
##FORMAT=<ID=PE,Number=1,Type=Integer,
Description="Number of paired-ends that support the event">
##FORMAT=<ID=PR,Number=.,Type=Integer,
Description="Spanning paired-read support for the ref and alt alleles
in the order listed">
##FORMAT=<ID=RC,Number=1,Type=Integer,
Description="Raw high-quality read counts for the SV">
##FORMAT=<ID=RCL,Number=1,Type=Integer,
Description="Raw high-quality read counts for the left control region">
##FORMAT=<ID=RCR,Number=1,Type=Integer,
Description="Raw high-quality read counts for the right control region">
##FORMAT=<ID=RR,Number=1,Type=Integer,
Description="# high-quality reference junction reads">
##FORMAT=<ID=RV,Number=1,Type=Integer,
Description="# high-quality variant junction reads">
##FORMAT=<ID=SR,Number=1,Type=Integer,
Description="Number of split reads that support the event">
Args:
variant(cyvcf2.Variant)
ind_id(dict): A dictionary with individual information
pos(int): What position the ind has in vcf
Returns:
gt_call(dict)
"""
gt_call = {}
ind_id = ind['individual_id']
gt_call['individual_id'] = ind_id
gt_call['display_name'] = ind['display_name']
# Fill the object with the relevant information:
genotype = variant.genotypes[pos]
ref_call = genotype[0]
alt_call = genotype[1]
gt_call['genotype_call'] = '/'.join([GENOTYPE_MAP[ref_call],
GENOTYPE_MAP[alt_call]])
paired_end_alt = None
paired_end_ref = None
split_read_alt = None
split_read_ref = None
# Check if PE is annotated
# This is the number of paired end reads that supports the variant
if 'PE' in variant.FORMAT:
try:
value = int(variant.format('PE')[pos])
if not value < 0:
paired_end_alt = value
except ValueError as e:
pass
# Check if PR is annotated
# Number of paired end reads that supports ref and alt
if 'PR' in variant.FORMAT:
values = variant.format('PR')[pos]
try:
alt_value = int(values[1])
ref_value = int(values[0])
if not alt_value < 0:
paired_end_alt = alt_value
if not ref_value < 0:
paired_end_ref = ref_value
except ValueError as r:
pass
# Check if 'SR' is annotated
if 'SR' in variant.FORMAT:
values = variant.format('SR')[pos]
alt_value = 0
ref_value = 0
if len(values) == 1:
alt_value = int(values[0])
elif len(values) == 2:
alt_value = int(values[1])
ref_value = int(values[0])
if not alt_value < 0:
split_read_alt = alt_value
if not ref_value < 0:
split_read_ref = ref_value
# Number of paired ends that supports the event
if 'DV' in variant.FORMAT:
values = variant.format('DV')[pos]
alt_value = int(values[0])
if not alt_value < 0:
paired_end_alt = alt_value
# Number of paired ends that supports the reference
if 'DR' in variant.FORMAT:
values = variant.format('DR')[pos]
ref_value = int(values[0])
if not alt_value < 0:
paired_end_ref = ref_value
# Number of split reads that supports the event
if 'RV' in variant.FORMAT:
values = variant.format('RV')[pos]
alt_value = int(values[0])
if not alt_value < 0:
split_read_alt = alt_value
# Number of split reads that supports the reference
if 'RR' in variant.FORMAT:
values = variant.format('RR')[pos]
ref_value = int(values[0])
if not ref_value < 0:
split_read_ref = ref_value
alt_depth = int(variant.gt_alt_depths[pos])
if alt_depth == -1:
if 'VD' in variant.FORMAT:
alt_depth = int(variant.format('VD')[pos][0])
if (paired_end_alt != None or split_read_alt != None):
alt_depth = 0
if paired_end_alt:
alt_depth += paired_end_alt
if split_read_alt:
alt_depth += split_read_alt
gt_call['alt_depth'] = alt_depth
ref_depth = int(variant.gt_ref_depths[pos])
if ref_depth == -1:
if (paired_end_ref != None or split_read_ref != None):
ref_depth = 0
if paired_end_ref:
ref_depth += paired_end_ref
if split_read_ref:
ref_depth += split_read_ref
gt_call['ref_depth'] = ref_depth
alt_frequency = float(variant.gt_alt_freqs[pos])
if alt_frequency == -1:
if 'AF' in variant.FORMAT:
alt_frequency = float(variant.format('AF')[pos][0])
read_depth = int(variant.gt_depths[pos])
if read_depth == -1:
# If read depth could not be parsed by cyvcf2, try to get it manually
if 'DP' in variant.FORMAT:
read_depth = int(variant.format('DP')[pos][0])
elif (alt_depth != -1 or ref_depth != -1):
read_depth = 0
if alt_depth != -1:
read_depth += alt_depth
if ref_depth != -1:
read_depth += alt_depth
gt_call['read_depth'] = read_depth
gt_call['alt_frequency'] = alt_frequency
gt_call['genotype_quality'] = int(variant.gt_quals[pos])
return gt_call
|
Check if a variant is in the Pseudo Autosomal Region or not Args: chromosome ( str ) position ( int ) build ( str ): The genome build Returns: bool
|
def is_par(chromosome, position, build='37'):
"""Check if a variant is in the Pseudo Autosomal Region or not
Args:
chromosome(str)
position(int)
build(str): The genome build
Returns:
bool
"""
chrom_match = CHR_PATTERN.match(chromosome)
chrom = chrom_match.group(2)
# PAR regions are only on X and Y
if not chrom in ['X','Y']:
return False
# Check if variant is in first PAR region
if PAR_COORDINATES[build][chrom].search(position):
return True
return False
|
Check if the variant is in the interval given by the coordinates
|
def check_coordinates(chromosome, pos, coordinates):
"""Check if the variant is in the interval given by the coordinates
Args:
chromosome(str): Variant chromosome
pos(int): Variant position
coordinates(dict): Dictionary with the region of interest
"""
chrom_match = CHR_PATTERN.match(chromosome)
chrom = chrom_match.group(2)
if chrom != coordinates['chrom']:
return False
if (pos >= coordinates['start'] and pos <= coordinates['end']):
return True
return False
|
Export all genes in gene panels Exports the union of genes in one or several gene panels to a bed like format with coordinates. Args: adapter ( scout. adapter. MongoAdapter ) panels ( iterable ( str )): Iterable with panel ids bed ( bool ): If lines should be bed formated
|
def export_panels(adapter, panels, versions=None, build='37'):
"""Export all genes in gene panels
Exports the union of genes in one or several gene panels to a bed like format with coordinates.
Args:
adapter(scout.adapter.MongoAdapter)
panels(iterable(str)): Iterable with panel ids
bed(bool): If lines should be bed formated
"""
if versions and (len(versions) != len(panels)):
raise SyntaxError("If version specify for each panel")
headers = []
build_string = ("##genome_build={}")
headers.append(build_string.format(build))
header_string = ("##gene_panel={0},version={1},updated_at={2},display_name={3}")
contig_string = ("##contig={0}")
bed_string = ("{0}\t{1}\t{2}\t{3}\t{4}")
# Save all gene ids found in the collection if panels
panel_geneids = set()
# Save all chromosomes found in the collection if panels
chromosomes_found = set()
# Store all hgnc geneobjs
hgnc_geneobjs = []
# Loop over the panels
for i,panel_id in enumerate(panels):
version = None
if versions:
version = versions[i]
panel_obj = adapter.gene_panel(panel_id, version=version)
if not panel_obj:
LOG.warning("Panel {0} version {1} could not be found".format(panel_id, version))
continue
headers.append(header_string.format(
panel_obj['panel_name'],
panel_obj['version'],
panel_obj['date'].date(),
panel_obj['display_name'],
))
# Collect the hgnc ids from all genes found
for gene_obj in panel_obj['genes']:
panel_geneids.add(gene_obj['hgnc_id'])
gene_objs = adapter.hgncid_to_gene(build=build)
for hgnc_id in panel_geneids:
hgnc_geneobj = gene_objs.get(hgnc_id)
if hgnc_geneobj is None:
LOG.warn("missing HGNC gene: %s", hgnc_id)
continue
chrom = hgnc_geneobj['chromosome']
start = hgnc_geneobj['start']
chrom_int = CHROMOSOME_INTEGERS.get(chrom)
if not chrom_int:
LOG.warn("Chromosome %s out of scope", chrom)
continue
hgnc_geneobjs.append((chrom_int, start, hgnc_geneobj))
chromosomes_found.add(chrom)
# Sort the genes:
hgnc_geneobjs.sort(key=lambda tup: (tup[0], tup[1]))
for chrom in CHROMOSOMES:
if chrom in chromosomes_found:
headers.append(contig_string.format(chrom))
headers.append("#chromosome\tgene_start\tgene_stop\thgnc_id\thgnc_symbol")
for header in headers:
yield header
for hgnc_gene in hgnc_geneobjs:
gene_obj = hgnc_gene[-1]
gene_line = bed_string.format(gene_obj['chromosome'], gene_obj['start'],
gene_obj['end'], gene_obj['hgnc_id'],
gene_obj['hgnc_symbol'])
yield gene_line
|
Export the genes of a gene panel Takes a list of gene panel names and return the lines of the gene panels. Unlike export_panels this function only export the genes and extra information not the coordinates. Args: adapter ( MongoAdapter ) panels ( list ( str )) version ( float ): Version number only works when one panel Yields: gene panel lines
|
def export_gene_panels(adapter, panels, version=None):
"""Export the genes of a gene panel
Takes a list of gene panel names and return the lines of the gene panels.
Unlike export_panels this function only export the genes and extra information,
not the coordinates.
Args:
adapter(MongoAdapter)
panels(list(str))
version(float): Version number, only works when one panel
Yields:
gene panel lines
"""
if version and len(panels) > 1:
raise SyntaxError("Version only possible with one panel")
bed_string = ("{0}\t{1}\t{2}\t{3}\t{4}\t{5}")
headers = []
# Dictionary with hgnc ids as keys and panel gene information as value.
panel_geneobjs = dict()
for panel_id in panels:
panel_obj = adapter.gene_panel(panel_id, version=version)
if not panel_obj:
LOG.warning("Panel %s could not be found", panel_id)
continue
for gene_obj in panel_obj['genes']:
panel_geneobjs[gene_obj['hgnc_id']] = gene_obj
if len(panel_geneobjs) == 0:
return
headers.append('#hgnc_id\thgnc_symbol\tdisease_associated_transcripts\t'
'reduced_penetrance\tmosaicism\tdatabase_entry_version')
for header in headers:
yield header
for hgnc_id in panel_geneobjs:
gene_obj = panel_geneobjs[hgnc_id]
gene_line = bed_string.format(
gene_obj['hgnc_id'], gene_obj['symbol'],
','.join(gene_obj.get('disease_associated_transcripts', [])),
gene_obj.get('reduced_penetrance', ''),
gene_obj.get('mosaicism', ''),
gene_obj.get('database_entry_version', ''),
)
yield gene_line
|
Render search box and view for HPO phenotype terms
|
def hpo_terms():
"""Render search box and view for HPO phenotype terms"""
if request.method == 'GET':
data = controllers.hpo_terms(store= store, limit=100)
return data
else: # POST. user is searching for a specific term or phenotype
search_term = request.form.get('hpo_term')
limit = request.form.get('limit')
data = controllers.hpo_terms(store= store, query = search_term, limit=limit)
return dict(data, query=search_term, limit=limit)
|
Export all transcripts to. bed like format
|
def transcripts(context, build):
"""Export all transcripts to .bed like format"""
LOG.info("Running scout export transcripts")
adapter = context.obj['adapter']
header = ["#Chrom\tStart\tEnd\tTranscript\tRefSeq\tHgncID"]
for line in header:
click.echo(line)
transcript_string = ("{0}\t{1}\t{2}\t{3}\t{4}\t{5}")
for tx_obj in export_transcripts(adapter):
click.echo(transcript_string.format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj.get('refseq_id',''),
tx_obj['hgnc_id'],
)
)
|
Load exons into the scout database
|
def exons(context, build):
"""Load exons into the scout database"""
adapter = context.obj['adapter']
start = datetime.now()
# Test if there are any exons loaded
nr_exons = adapter.exons(build=build).count()
if nr_exons:
LOG.warning("Dropping all exons ")
adapter.drop_exons(build=build)
LOG.info("Exons dropped")
# Load the exons
ensembl_exons = fetch_ensembl_exons(build=build)
load_exons(adapter, ensembl_exons, build)
adapter.update_indexes()
LOG.info("Time to load exons: {0}".format(datetime.now() - start))
|
Show all indexes in the database
|
def intervals(context, build):
"""Show all indexes in the database"""
LOG.info("Running scout view index")
adapter = context.obj['adapter']
intervals = adapter.get_coding_intervals(build)
nr_intervals = 0
longest = 0
for chrom in CHROMOSOMES:
for iv in intervals[chrom]:
iv_len = iv.end - iv.begin
if iv_len > longest:
longest = iv_len
int_nr = len(intervals.get(chrom, []))
click.echo("{0}\t{1}".format(chrom, int_nr))
nr_intervals += int_nr
LOG.info("Total nr intervals:%s", nr_intervals)
LOG.info("Total nr genes:%s", adapter.all_genes(build).count())
LOG.info("Longest interval:%s", longest)
|
Load all variants in a region to a existing case
|
def region(context, hgnc_id, case_id, chromosome, start, end):
"""Load all variants in a region to a existing case"""
adapter = context.obj['adapter']
load_region(
adapter=adapter, case_id=case_id, hgnc_id=hgnc_id, chrom=chromosome, start=start, end=end
)
|
Helper function for getting category/ tag kwargs.
|
def _get_kwargs(self, category, tag):
"""Helper function for getting category/tag kwargs."""
vals = {
'categories__title__iexact': category,
'tags__name__iexact': tag
}
kwargs = {}
for k, v in vals.items():
if v:
kwargs[k] = v
return kwargs
|
Returns two datetimes: first day and last day of given year&month
|
def get_first_and_last(year, month):
"""Returns two datetimes: first day and last day of given year&month"""
ym_first = make_aware(
datetime.datetime(year, month, 1),
get_default_timezone()
)
ym_last = make_aware(
datetime.datetime(year, month, monthrange(year, month)[1], 23, 59, 59, 1000000-1),
get_default_timezone()
)
return ym_first, ym_last
|
Returns all events that have an occurrence within the given month & year.
|
def all_month_events(self, year, month, category=None, tag=None,
loc=False, cncl=False):
"""
Returns all events that have an occurrence within the given
month & year.
"""
kwargs = self._get_kwargs(category, tag)
ym_first, ym_last = self.get_first_and_last(year, month)
pref = []
if loc:
pref.append("location")
if cncl:
pref.append("cancellations")
# for yearly repeat, we need to check the start and end date months
# b/c yearly events should occur every year in the same month
r = Q(repeat="YEARLY")
dstart_mo = Q(start_date__month=month)
dend_mo = Q(end_date__month=month)
dstart_yr = Q(start_date__year=year)
dend_yr = Q(end_date__year=year)
return self.model.objects.filter(
# only events that are still repeating
r & (dstart_mo | dend_mo) | # yearly repeat
(~Q(repeat="NEVER")) | # all other repeats
((dstart_yr | dend_yr) & (dstart_mo | dend_yr)), # non-repeating
Q(end_repeat=None) | Q(end_repeat__gte=ym_first),
start_date__lte=ym_last # no events that haven't started yet
).filter(**kwargs).prefetch_related(*pref).order_by('start_date').distinct()
|
Returns a queryset of events that will occur again after now. Used to help generate a list of upcoming events.
|
def live(self, now):
"""
Returns a queryset of events that will occur again after 'now'.
Used to help generate a list of upcoming events.
"""
return self.model.objects.filter(
Q(end_repeat=None) | Q(end_repeat__gte=now) |
Q(start_date__gte=now) | Q(end_date__gte=now)
).exclude( # exclude single day events that won't occur again
start_date__lt=now, end_date__lt=now,
repeat="NEVER", end_repeat=None,
).prefetch_related('cancellations')
|
Build a user object Args: user_info ( dict ): A dictionary with user information Returns: user_obj ( scout. models. User )
|
def build_user(user_info):
"""Build a user object
Args:
user_info(dict): A dictionary with user information
Returns:
user_obj(scout.models.User)
"""
try:
email = user_info['email']
except KeyError as err:
raise KeyError("A user has to have a email")
try:
name = user_info['name']
except KeyError as err:
raise KeyError("A user has to have a name")
user_obj = User(email=email, name=name)
##TODO check that these are on the correct format
if 'roles' in user_info:
user_obj['roles'] = user_info['roles']
if 'location' in user_info:
user_obj['location'] = user_info['location']
if 'institutes' in user_info:
user_obj['institutes'] = user_info['institutes']
return user_obj
|
Recursively parse requirements from nested pip files.
|
def parse_reqs(req_path='./requirements.txt'):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with io.open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as handle:
# remove comments and empty lines
lines = (line.strip() for line in handle
if line.strip() and not line.startswith('#'))
for line in lines:
# check for nested requirements files
if line.startswith('-r'):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires
|
Check if gene is already added to a panel.
|
def existing_gene(store, panel_obj, hgnc_id):
"""Check if gene is already added to a panel."""
existing_genes = {gene['hgnc_id']: gene for gene in panel_obj['genes']}
return existing_genes.get(hgnc_id)
|
Update an existing gene panel with genes.
|
def update_panel(store, panel_name, csv_lines, option):
"""Update an existing gene panel with genes.
Args:
store(scout.adapter.MongoAdapter)
panel_name(str)
csv_lines(iterable(str)): Stream with genes
option(str): 'add' or 'replace'
Returns:
panel_obj(dict)
"""
new_genes= []
panel_obj = store.gene_panel(panel_name)
if panel_obj is None:
return None
try:
new_genes = parse_genes(csv_lines) # a list of gene dictionaries containing gene info
except SyntaxError as error:
flash(error.args[0], 'danger')
return None
# if existing genes are to be replaced by those in csv_lines
if option == 'replace':
# all existing genes should be deleted
for gene in panel_obj['genes']:
#create extra key to use in pending actions:
gene['hgnc_symbol'] = gene['symbol']
store.add_pending(panel_obj, gene, action='delete', info=None)
for new_gene in new_genes:
if not new_gene['hgnc_id']:
flash("gene missing hgnc id: {}".format(new_gene['hgnc_symbol']),'danger')
continue
gene_obj = store.hgnc_gene(new_gene['hgnc_id'])
if gene_obj is None:
flash("gene not found: {} - {}".format(new_gene['hgnc_id'], new_gene['hgnc_symbol']),'danger')
continue
if new_gene['hgnc_symbol'] and gene_obj['hgnc_symbol'] != new_gene['hgnc_symbol']:
flash("symbol mis-match: {0} | {1}".format(
gene_obj['hgnc_symbol'], new_gene['hgnc_symbol']), 'warning')
info_data = {
'disease_associated_transcripts': new_gene['transcripts'],
'reduced_penetrance': new_gene['reduced_penetrance'],
'mosaicism': new_gene['mosaicism'],
'inheritance_models': new_gene['inheritance_models'],
'database_entry_version': new_gene['database_entry_version'],
}
if option == 'replace': # there will be no existing genes for sure, because we're replacing them all
action = 'add'
else: # add option. Add if genes is not existing. otherwise edit it
existing_genes = {gene['hgnc_id'] for gene in panel_obj['genes']}
action = 'edit' if gene_obj['hgnc_id'] in existing_genes else 'add'
store.add_pending(panel_obj, gene_obj, action=action, info=info_data)
return panel_obj
|
Create a new gene panel.
|
def new_panel(store, institute_id, panel_name, display_name, csv_lines):
"""Create a new gene panel.
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
panel_name(str)
display_name(str)
csv_lines(iterable(str)): Stream with genes
Returns:
panel_id: the ID of the new panel document created or None
"""
institute_obj = store.institute(institute_id)
if institute_obj is None:
flash("{}: institute not found".format(institute_id))
return None
panel_obj = store.gene_panel(panel_name)
if panel_obj:
flash("panel already exists: {} - {}".format(panel_obj['panel_name'],
panel_obj['display_name']))
return None
log.debug("parse genes from CSV input")
try:
new_genes = parse_genes(csv_lines)
except SyntaxError as error:
flash(error.args[0], 'danger')
return None
log.debug("build new gene panel")
panel_id = None
try:
panel_data = build_panel(dict(
panel_name=panel_name,
institute=institute_obj['_id'],
version=1.0,
date=dt.datetime.now(),
display_name=display_name,
genes=new_genes,
), store)
panel_id= store.add_gene_panel(panel_data)
except Exception as err:
log.error('An error occurred while adding the gene panel {}'.format(err))
return panel_id
|
Preprocess a panel of genes.
|
def panel_export(store, panel_obj):
"""Preprocess a panel of genes."""
panel_obj['institute'] = store.institute(panel_obj['institute'])
full_name = "{}({})".format(panel_obj['display_name'], panel_obj['version'])
panel_obj['name_and_version'] = full_name
return dict(panel=panel_obj)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.