partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
ClockTree.date_uncertainty_due_to_rate
use previously calculated variation of the rate to estimate the uncertainty in a particular numdate due to rate variation. Parameters ---------- node : PhyloTree.Clade node for which the confidence interval is to be calculated interval : tuple, optional Array of length two, or tuple, defining the bounds of the confidence interval
treetime/clock_tree.py
def date_uncertainty_due_to_rate(self, node, interval=(0.05, 0.095)): """use previously calculated variation of the rate to estimate the uncertainty in a particular numdate due to rate variation. Parameters ---------- node : PhyloTree.Clade node for which the confidence interval is to be calculated interval : tuple, optional Array of length two, or tuple, defining the bounds of the confidence interval """ if hasattr(node, "numdate_rate_variation"): from scipy.special import erfinv nsig = [np.sqrt(2.0)*erfinv(-1.0 + 2.0*x) if x*(1.0-x) else 0 for x in interval] l,c,u = [x[1] for x in node.numdate_rate_variation] return np.array([c + x*np.abs(y-c) for x,y in zip(nsig, (l,u))]) else: return None
def date_uncertainty_due_to_rate(self, node, interval=(0.05, 0.095)): """use previously calculated variation of the rate to estimate the uncertainty in a particular numdate due to rate variation. Parameters ---------- node : PhyloTree.Clade node for which the confidence interval is to be calculated interval : tuple, optional Array of length two, or tuple, defining the bounds of the confidence interval """ if hasattr(node, "numdate_rate_variation"): from scipy.special import erfinv nsig = [np.sqrt(2.0)*erfinv(-1.0 + 2.0*x) if x*(1.0-x) else 0 for x in interval] l,c,u = [x[1] for x in node.numdate_rate_variation] return np.array([c + x*np.abs(y-c) for x,y in zip(nsig, (l,u))]) else: return None
[ "use", "previously", "calculated", "variation", "of", "the", "rate", "to", "estimate", "the", "uncertainty", "in", "a", "particular", "numdate", "due", "to", "rate", "variation", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/clock_tree.py#L760-L780
[ "def", "date_uncertainty_due_to_rate", "(", "self", ",", "node", ",", "interval", "=", "(", "0.05", ",", "0.095", ")", ")", ":", "if", "hasattr", "(", "node", ",", "\"numdate_rate_variation\"", ")", ":", "from", "scipy", ".", "special", "import", "erfinv", "nsig", "=", "[", "np", ".", "sqrt", "(", "2.0", ")", "*", "erfinv", "(", "-", "1.0", "+", "2.0", "*", "x", ")", "if", "x", "*", "(", "1.0", "-", "x", ")", "else", "0", "for", "x", "in", "interval", "]", "l", ",", "c", ",", "u", "=", "[", "x", "[", "1", "]", "for", "x", "in", "node", ".", "numdate_rate_variation", "]", "return", "np", ".", "array", "(", "[", "c", "+", "x", "*", "np", ".", "abs", "(", "y", "-", "c", ")", "for", "x", ",", "y", "in", "zip", "(", "nsig", ",", "(", "l", ",", "u", ")", ")", "]", ")", "else", ":", "return", "None" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
ClockTree.get_confidence_interval
If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the 90% (or other) confidence interval, defined as the range where 5% of probability is below and above. Note that this does not necessarily contain the highest probability position. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the confidence interval is to be calculated interval : tuple, list Array of length two, or tuple, defining the bounds of the confidence interval Returns ------- confidence_interval : numpy array Array with two numerical dates delineating the confidence interval
treetime/clock_tree.py
def get_confidence_interval(self, node, interval = (0.05, 0.95)): ''' If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the 90% (or other) confidence interval, defined as the range where 5% of probability is below and above. Note that this does not necessarily contain the highest probability position. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the confidence interval is to be calculated interval : tuple, list Array of length two, or tuple, defining the bounds of the confidence interval Returns ------- confidence_interval : numpy array Array with two numerical dates delineating the confidence interval ''' rate_contribution = self.date_uncertainty_due_to_rate(node, interval) if hasattr(node, "marginal_inverse_cdf"): min_date, max_date = [self.date2dist.to_numdate(x) for x in (node.marginal_pos_LH.xmax, node.marginal_pos_LH.xmin)] if node.marginal_inverse_cdf=="delta": return np.array([node.numdate, node.numdate]) else: mutation_contribution = self.date2dist.to_numdate(node.marginal_inverse_cdf(np.array(interval))[::-1]) else: min_date, max_date = [-np.inf, np.inf] return self.combine_confidence(node.numdate, (min_date, max_date), c1=rate_contribution, c2=mutation_contribution)
def get_confidence_interval(self, node, interval = (0.05, 0.95)): ''' If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the 90% (or other) confidence interval, defined as the range where 5% of probability is below and above. Note that this does not necessarily contain the highest probability position. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the confidence interval is to be calculated interval : tuple, list Array of length two, or tuple, defining the bounds of the confidence interval Returns ------- confidence_interval : numpy array Array with two numerical dates delineating the confidence interval ''' rate_contribution = self.date_uncertainty_due_to_rate(node, interval) if hasattr(node, "marginal_inverse_cdf"): min_date, max_date = [self.date2dist.to_numdate(x) for x in (node.marginal_pos_LH.xmax, node.marginal_pos_LH.xmin)] if node.marginal_inverse_cdf=="delta": return np.array([node.numdate, node.numdate]) else: mutation_contribution = self.date2dist.to_numdate(node.marginal_inverse_cdf(np.array(interval))[::-1]) else: min_date, max_date = [-np.inf, np.inf] return self.combine_confidence(node.numdate, (min_date, max_date), c1=rate_contribution, c2=mutation_contribution)
[ "If", "temporal", "reconstruction", "was", "done", "using", "the", "marginal", "ML", "mode", "the", "entire", "distribution", "of", "times", "is", "available", ".", "This", "function", "determines", "the", "90%", "(", "or", "other", ")", "confidence", "interval", "defined", "as", "the", "range", "where", "5%", "of", "probability", "is", "below", "and", "above", ".", "Note", "that", "this", "does", "not", "necessarily", "contain", "the", "highest", "probability", "position", ".", "In", "absense", "of", "marginal", "reconstruction", "it", "will", "return", "uncertainty", "based", "on", "rate", "variation", ".", "If", "both", "are", "present", "the", "wider", "interval", "will", "be", "returned", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/clock_tree.py#L798-L836
[ "def", "get_confidence_interval", "(", "self", ",", "node", ",", "interval", "=", "(", "0.05", ",", "0.95", ")", ")", ":", "rate_contribution", "=", "self", ".", "date_uncertainty_due_to_rate", "(", "node", ",", "interval", ")", "if", "hasattr", "(", "node", ",", "\"marginal_inverse_cdf\"", ")", ":", "min_date", ",", "max_date", "=", "[", "self", ".", "date2dist", ".", "to_numdate", "(", "x", ")", "for", "x", "in", "(", "node", ".", "marginal_pos_LH", ".", "xmax", ",", "node", ".", "marginal_pos_LH", ".", "xmin", ")", "]", "if", "node", ".", "marginal_inverse_cdf", "==", "\"delta\"", ":", "return", "np", ".", "array", "(", "[", "node", ".", "numdate", ",", "node", ".", "numdate", "]", ")", "else", ":", "mutation_contribution", "=", "self", ".", "date2dist", ".", "to_numdate", "(", "node", ".", "marginal_inverse_cdf", "(", "np", ".", "array", "(", "interval", ")", ")", "[", ":", ":", "-", "1", "]", ")", "else", ":", "min_date", ",", "max_date", "=", "[", "-", "np", ".", "inf", ",", "np", ".", "inf", "]", "return", "self", ".", "combine_confidence", "(", "node", ".", "numdate", ",", "(", "min_date", ",", "max_date", ")", ",", "c1", "=", "rate_contribution", ",", "c2", "=", "mutation_contribution", ")" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
ClockTree.get_max_posterior_region
If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the interval around the highest posterior probability region that contains the specified fraction of the probability mass. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the posterior region is to be calculated interval : float Float specifying who much of the posterior probability is to be contained in the region Returns ------- max_posterior_region : numpy array Array with two numerical dates delineating the high posterior region
treetime/clock_tree.py
def get_max_posterior_region(self, node, fraction = 0.9): ''' If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the interval around the highest posterior probability region that contains the specified fraction of the probability mass. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the posterior region is to be calculated interval : float Float specifying who much of the posterior probability is to be contained in the region Returns ------- max_posterior_region : numpy array Array with two numerical dates delineating the high posterior region ''' if node.marginal_inverse_cdf=="delta": return np.array([node.numdate, node.numdate]) min_max = (node.marginal_pos_LH.xmin, node.marginal_pos_LH.xmax) min_date, max_date = [self.date2dist.to_numdate(x) for x in min_max][::-1] if node.marginal_pos_LH.peak_pos == min_max[0]: #peak on the left return self.get_confidence_interval(node, (0, fraction)) elif node.marginal_pos_LH.peak_pos == min_max[1]: #peak on the right return self.get_confidence_interval(node, (1.0-fraction ,1.0)) else: # peak in the center of the distribution rate_contribution = self.date_uncertainty_due_to_rate(node, ((1-fraction)*0.5, 1.0-(1.0-fraction)*0.5)) # construct height to position interpolators left and right of the peak # this assumes there is only one peak --- might fail in odd cases from scipy.interpolate import interp1d from scipy.optimize import minimize_scalar as minimize pidx = np.argmin(node.marginal_pos_LH.y) pval = np.min(node.marginal_pos_LH.y) left = interp1d(node.marginal_pos_LH.y[:(pidx+1)]-pval, node.marginal_pos_LH.x[:(pidx+1)], kind='linear', fill_value=min_max[0], bounds_error=False) right = interp1d(node.marginal_pos_LH.y[pidx:]-pval, node.marginal_pos_LH.x[pidx:], kind='linear', fill_value=min_max[1], bounds_error=False) # function to minimize -- squared difference between prob mass and desired fracion def func(x, thres): interval = np.array([left(x), right(x)]).squeeze() return (thres - np.diff(node.marginal_cdf(np.array(interval))))**2 # minimza and determine success sol = minimize(func, bracket=[0,10], args=(fraction,)) if sol['success']: mutation_contribution = self.date2dist.to_numdate(np.array([right(sol['x']), left(sol['x'])]).squeeze()) else: # on failure, return standard confidence interval mutation_contribution = None return self.combine_confidence(node.numdate, (min_date, max_date), c1=rate_contribution, c2=mutation_contribution)
def get_max_posterior_region(self, node, fraction = 0.9): ''' If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. This function determines the interval around the highest posterior probability region that contains the specified fraction of the probability mass. In absense of marginal reconstruction, it will return uncertainty based on rate variation. If both are present, the wider interval will be returned. Parameters ---------- node : PhyloTree.Clade The node for which the posterior region is to be calculated interval : float Float specifying who much of the posterior probability is to be contained in the region Returns ------- max_posterior_region : numpy array Array with two numerical dates delineating the high posterior region ''' if node.marginal_inverse_cdf=="delta": return np.array([node.numdate, node.numdate]) min_max = (node.marginal_pos_LH.xmin, node.marginal_pos_LH.xmax) min_date, max_date = [self.date2dist.to_numdate(x) for x in min_max][::-1] if node.marginal_pos_LH.peak_pos == min_max[0]: #peak on the left return self.get_confidence_interval(node, (0, fraction)) elif node.marginal_pos_LH.peak_pos == min_max[1]: #peak on the right return self.get_confidence_interval(node, (1.0-fraction ,1.0)) else: # peak in the center of the distribution rate_contribution = self.date_uncertainty_due_to_rate(node, ((1-fraction)*0.5, 1.0-(1.0-fraction)*0.5)) # construct height to position interpolators left and right of the peak # this assumes there is only one peak --- might fail in odd cases from scipy.interpolate import interp1d from scipy.optimize import minimize_scalar as minimize pidx = np.argmin(node.marginal_pos_LH.y) pval = np.min(node.marginal_pos_LH.y) left = interp1d(node.marginal_pos_LH.y[:(pidx+1)]-pval, node.marginal_pos_LH.x[:(pidx+1)], kind='linear', fill_value=min_max[0], bounds_error=False) right = interp1d(node.marginal_pos_LH.y[pidx:]-pval, node.marginal_pos_LH.x[pidx:], kind='linear', fill_value=min_max[1], bounds_error=False) # function to minimize -- squared difference between prob mass and desired fracion def func(x, thres): interval = np.array([left(x), right(x)]).squeeze() return (thres - np.diff(node.marginal_cdf(np.array(interval))))**2 # minimza and determine success sol = minimize(func, bracket=[0,10], args=(fraction,)) if sol['success']: mutation_contribution = self.date2dist.to_numdate(np.array([right(sol['x']), left(sol['x'])]).squeeze()) else: # on failure, return standard confidence interval mutation_contribution = None return self.combine_confidence(node.numdate, (min_date, max_date), c1=rate_contribution, c2=mutation_contribution)
[ "If", "temporal", "reconstruction", "was", "done", "using", "the", "marginal", "ML", "mode", "the", "entire", "distribution", "of", "times", "is", "available", ".", "This", "function", "determines", "the", "interval", "around", "the", "highest", "posterior", "probability", "region", "that", "contains", "the", "specified", "fraction", "of", "the", "probability", "mass", ".", "In", "absense", "of", "marginal", "reconstruction", "it", "will", "return", "uncertainty", "based", "on", "rate", "variation", ".", "If", "both", "are", "present", "the", "wider", "interval", "will", "be", "returned", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/clock_tree.py#L838-L899
[ "def", "get_max_posterior_region", "(", "self", ",", "node", ",", "fraction", "=", "0.9", ")", ":", "if", "node", ".", "marginal_inverse_cdf", "==", "\"delta\"", ":", "return", "np", ".", "array", "(", "[", "node", ".", "numdate", ",", "node", ".", "numdate", "]", ")", "min_max", "=", "(", "node", ".", "marginal_pos_LH", ".", "xmin", ",", "node", ".", "marginal_pos_LH", ".", "xmax", ")", "min_date", ",", "max_date", "=", "[", "self", ".", "date2dist", ".", "to_numdate", "(", "x", ")", "for", "x", "in", "min_max", "]", "[", ":", ":", "-", "1", "]", "if", "node", ".", "marginal_pos_LH", ".", "peak_pos", "==", "min_max", "[", "0", "]", ":", "#peak on the left", "return", "self", ".", "get_confidence_interval", "(", "node", ",", "(", "0", ",", "fraction", ")", ")", "elif", "node", ".", "marginal_pos_LH", ".", "peak_pos", "==", "min_max", "[", "1", "]", ":", "#peak on the right", "return", "self", ".", "get_confidence_interval", "(", "node", ",", "(", "1.0", "-", "fraction", ",", "1.0", ")", ")", "else", ":", "# peak in the center of the distribution", "rate_contribution", "=", "self", ".", "date_uncertainty_due_to_rate", "(", "node", ",", "(", "(", "1", "-", "fraction", ")", "*", "0.5", ",", "1.0", "-", "(", "1.0", "-", "fraction", ")", "*", "0.5", ")", ")", "# construct height to position interpolators left and right of the peak", "# this assumes there is only one peak --- might fail in odd cases", "from", "scipy", ".", "interpolate", "import", "interp1d", "from", "scipy", ".", "optimize", "import", "minimize_scalar", "as", "minimize", "pidx", "=", "np", ".", "argmin", "(", "node", ".", "marginal_pos_LH", ".", "y", ")", "pval", "=", "np", ".", "min", "(", "node", ".", "marginal_pos_LH", ".", "y", ")", "left", "=", "interp1d", "(", "node", ".", "marginal_pos_LH", ".", "y", "[", ":", "(", "pidx", "+", "1", ")", "]", "-", "pval", ",", "node", ".", "marginal_pos_LH", ".", "x", "[", ":", "(", "pidx", "+", "1", ")", "]", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "min_max", "[", "0", "]", ",", "bounds_error", "=", "False", ")", "right", "=", "interp1d", "(", "node", ".", "marginal_pos_LH", ".", "y", "[", "pidx", ":", "]", "-", "pval", ",", "node", ".", "marginal_pos_LH", ".", "x", "[", "pidx", ":", "]", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "min_max", "[", "1", "]", ",", "bounds_error", "=", "False", ")", "# function to minimize -- squared difference between prob mass and desired fracion", "def", "func", "(", "x", ",", "thres", ")", ":", "interval", "=", "np", ".", "array", "(", "[", "left", "(", "x", ")", ",", "right", "(", "x", ")", "]", ")", ".", "squeeze", "(", ")", "return", "(", "thres", "-", "np", ".", "diff", "(", "node", ".", "marginal_cdf", "(", "np", ".", "array", "(", "interval", ")", ")", ")", ")", "**", "2", "# minimza and determine success", "sol", "=", "minimize", "(", "func", ",", "bracket", "=", "[", "0", ",", "10", "]", ",", "args", "=", "(", "fraction", ",", ")", ")", "if", "sol", "[", "'success'", "]", ":", "mutation_contribution", "=", "self", ".", "date2dist", ".", "to_numdate", "(", "np", ".", "array", "(", "[", "right", "(", "sol", "[", "'x'", "]", ")", ",", "left", "(", "sol", "[", "'x'", "]", ")", "]", ")", ".", "squeeze", "(", ")", ")", "else", ":", "# on failure, return standard confidence interval", "mutation_contribution", "=", "None", "return", "self", ".", "combine_confidence", "(", "node", ".", "numdate", ",", "(", "min_date", ",", "max_date", ")", ",", "c1", "=", "rate_contribution", ",", "c2", "=", "mutation_contribution", ")" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
read_vcf
Reads in a vcf/vcf.gz file and associated reference sequence fasta (to which the VCF file is mapped). Parses mutations, insertions, and deletions and stores them in a nested dict, see 'returns' for the dict structure. Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are replaced with Ns at the associated sites. Positions are stored to correspond the location in the reference sequence in Python (numbering is transformed to start at 0) Parameters ---------- vcf_file : string Path to the vcf or vcf.gz file to be read in ref_file : string Path to the fasta reference file to be read in Returns -------- compress_seq : nested dict In the format: :: { 'reference':'AGCTCGA..A', 'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} }, 'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} }, 'positions': [1,4,7,10,100...] } references : string String of the reference sequence read from the Fasta, to which the variable sites are mapped sequences : nested dict Dict containing sequence names as keys which map to dicts that have position as key and the single-base mutation (or deletion) as values insertions : nested dict Dict in the same format as the above, which stores insertions and their locations. The first base of the insertion is the same as whatever is currently in that position (Ref if no mutation, mutation in 'sequences' otherwise), so the current base can be directly replaced by the bases held here. positions : list Python list of all positions with a mutation, insertion, or deletion.
treetime/vcf_utils.py
def read_vcf(vcf_file, ref_file): """ Reads in a vcf/vcf.gz file and associated reference sequence fasta (to which the VCF file is mapped). Parses mutations, insertions, and deletions and stores them in a nested dict, see 'returns' for the dict structure. Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are replaced with Ns at the associated sites. Positions are stored to correspond the location in the reference sequence in Python (numbering is transformed to start at 0) Parameters ---------- vcf_file : string Path to the vcf or vcf.gz file to be read in ref_file : string Path to the fasta reference file to be read in Returns -------- compress_seq : nested dict In the format: :: { 'reference':'AGCTCGA..A', 'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} }, 'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} }, 'positions': [1,4,7,10,100...] } references : string String of the reference sequence read from the Fasta, to which the variable sites are mapped sequences : nested dict Dict containing sequence names as keys which map to dicts that have position as key and the single-base mutation (or deletion) as values insertions : nested dict Dict in the same format as the above, which stores insertions and their locations. The first base of the insertion is the same as whatever is currently in that position (Ref if no mutation, mutation in 'sequences' otherwise), so the current base can be directly replaced by the bases held here. positions : list Python list of all positions with a mutation, insertion, or deletion. """ #Programming Note: # Note on VCF Format # ------------------- # 'Insertion where there are also deletions' (special handling) # Ex: # REF ALT Seq1 Seq2 # GC GCC,G 1/1 2/2 # Insertions formatted differently - don't know how many bp match # the Ref (unlike simple insert below). Could be mutations, also. # 'Deletion' # Ex: # REF ALT # GC G # Alt does not have to be 1 bp - any length shorter than Ref. # 'Insertion' # Ex: # REF ALT # A ATT # First base always matches Ref. # 'No indel' # Ex: # REF ALT # A G #define here, so that all sub-functions can access them sequences = defaultdict(dict) insertions = defaultdict(dict) #Currently not used, but kept in case of future use. #TreeTime handles 2-3 base ambig codes, this will allow that. def getAmbigCode(bp1, bp2, bp3=""): bps = [bp1,bp2,bp3] bps.sort() key = "".join(bps) return { 'CT': 'Y', 'AG': 'R', 'AT': 'W', 'CG': 'S', 'GT': 'K', 'AC': 'M', 'AGT': 'D', 'ACG': 'V', 'ACT': 'H', 'CGT': 'B' }[key] #Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion, #deletion, or single bp subsitution def parseCall(snps, ins, pos, ref, alt): #Insertion where there are also deletions (special handling) if len(ref) > 1 and len(alt)>len(ref): for i in range(len(ref)): #if the pos doesn't match, store in sequences if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #if about to run out of ref, store rest: if (i+1) >= len(ref): ins[pos+i] = alt[i:] #Deletion elif len(ref) > 1: for i in range(len(ref)): #if ref is longer than alt, these are deletion positions if i+1 > len(alt): snps[pos+i] = '-' #if not, there may be mutations else: if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #Insertion elif len(alt) > 1: ins[pos] = alt #No indel else: snps[pos] = alt #Parses a 'bad' (hetero or no-call) call depending on what it is def parseBadCall(snps, ins, pos, ref, ALT): #Deletion # REF ALT Seq1 Seq2 Seq3 # GCC G 1/1 0/1 ./. # Seq1 (processed by parseCall, above) will become 'G--' # Seq2 will become 'GNN' # Seq3 will become 'GNN' if len(ref) > 1: #Deleted part becomes Ns if gen[0] == '0' or gen[0] == '.': if gen[0] == '0': #if het, get first bp alt = str(ALT[int(gen[2])-1]) else: #if no-call, there is no alt, so just put Ns after 1st ref base alt = ref[0] for i in range(len(ref)): #if ref is longer than alt, these are deletion positions if i+1 > len(alt): snps[pos+i] = 'N' #if not, there may be mutations else: if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #If not deletion, need to know call type #if het, see if proposed alt is 1bp mutation elif gen[0] == '0': alt = str(ALT[int(gen[2])-1]) if len(alt)==1: #alt = getAmbigCode(ref,alt) #if want to allow ambig alt = 'N' #if you want to disregard ambig snps[pos] = alt #else a het-call insertion, so ignore. #else it's a no-call; see if all alts have a length of 1 #(meaning a simple 1bp mutation) elif len(ALT)==len("".join(ALT)): alt = 'N' snps[pos] = alt #else a no-call insertion, so ignore. #House code is *much* faster than pyvcf because we don't care about all info #about coverage, quality, counts, etc, which pyvcf goes to effort to parse #(and it's not easy as there's no standard ordering). Custom code can completely #ignore all of this. import gzip from Bio import SeqIO import numpy as np nsamp = 0 posLoc = 0 refLoc = 0 altLoc = 0 sampLoc = 9 #Use different openers depending on whether compressed opn = gzip.open if vcf_file.endswith(('.gz', '.GZ')) else open with opn(vcf_file, mode='rt') as f: for line in f: if line[0] != '#': #actual data - most common so first in 'if-list'! line = line.strip() dat = line.split('\t') POS = int(dat[posLoc]) REF = dat[refLoc] ALT = dat[altLoc].split(',') calls = np.array(dat[sampLoc:]) #get samples that differ from Ref at this site recCalls = {} for sname, sa in zip(samps, calls): if ':' in sa: #if proper VCF file (followed by quality/coverage info) gt = sa.split(':')[0] else: #if 'pseudo' VCF file (nextstrain output, or otherwise stripped) gt = sa if gt == '0' or gt == '1': #for haploid calls in VCF gt = '0/0' if gt == '0' else '1/1' #ignore if ref call: '.' or '0/0', depending on VCF if ('/' in gt and gt != '0/0') or ('|' in gt and gt != '0|0'): recCalls[sname] = gt #store the position and the alt for seq, gen in recCalls.items(): ref = REF pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering #will be from 0 because it's python! #Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls if gen[0] != '0' and gen[2] != '0' and gen[0] != '.' and gen[2] != '.': alt = str(ALT[int(gen[0])-1]) #get the index of the alternate if seq not in sequences.keys(): sequences[seq] = {} parseCall(sequences[seq],insertions[seq], pos, ref, alt) #If is heterozygote call (0/1) or no call (./.) else: #alt will differ here depending on het or no-call, must pass original parseBadCall(sequences[seq],insertions[seq], pos, ref, ALT) elif line[0] == '#' and line[1] == 'C': #header line, get all the information header = line.strip().split('\t') posLoc = header.index("POS") refLoc = header.index('REF') altLoc = header.index('ALT') sampLoc = header.index('FORMAT')+1 samps = header[sampLoc:] samps = [ x.strip() for x in samps ] #ensure no leading/trailing spaces nsamp = len(samps) #else you are a comment line, ignore. #Gather all variable positions positions = set() for seq, muts in sequences.items(): positions.update(muts.keys()) #One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet if nsamp > len(sequences): missings = set(samps).difference(sequences.keys()) for s in missings: sequences[s] = {} refSeq = SeqIO.read(ref_file, format='fasta') refSeq = refSeq.upper() #convert to uppercase to avoid unknown chars later refSeqStr = str(refSeq.seq) compress_seq = {'reference':refSeqStr, 'sequences': sequences, 'insertions': insertions, 'positions': sorted(positions)} return compress_seq
def read_vcf(vcf_file, ref_file): """ Reads in a vcf/vcf.gz file and associated reference sequence fasta (to which the VCF file is mapped). Parses mutations, insertions, and deletions and stores them in a nested dict, see 'returns' for the dict structure. Calls with heterozygous values 0/1, 0/2, etc and no-calls (./.) are replaced with Ns at the associated sites. Positions are stored to correspond the location in the reference sequence in Python (numbering is transformed to start at 0) Parameters ---------- vcf_file : string Path to the vcf or vcf.gz file to be read in ref_file : string Path to the fasta reference file to be read in Returns -------- compress_seq : nested dict In the format: :: { 'reference':'AGCTCGA..A', 'sequences': { 'seq1':{4:'A', 7:'-'}, 'seq2':{100:'C'} }, 'insertions': { 'seq1':{4:'ATT'}, 'seq3':{1:'TT', 10:'CAG'} }, 'positions': [1,4,7,10,100...] } references : string String of the reference sequence read from the Fasta, to which the variable sites are mapped sequences : nested dict Dict containing sequence names as keys which map to dicts that have position as key and the single-base mutation (or deletion) as values insertions : nested dict Dict in the same format as the above, which stores insertions and their locations. The first base of the insertion is the same as whatever is currently in that position (Ref if no mutation, mutation in 'sequences' otherwise), so the current base can be directly replaced by the bases held here. positions : list Python list of all positions with a mutation, insertion, or deletion. """ #Programming Note: # Note on VCF Format # ------------------- # 'Insertion where there are also deletions' (special handling) # Ex: # REF ALT Seq1 Seq2 # GC GCC,G 1/1 2/2 # Insertions formatted differently - don't know how many bp match # the Ref (unlike simple insert below). Could be mutations, also. # 'Deletion' # Ex: # REF ALT # GC G # Alt does not have to be 1 bp - any length shorter than Ref. # 'Insertion' # Ex: # REF ALT # A ATT # First base always matches Ref. # 'No indel' # Ex: # REF ALT # A G #define here, so that all sub-functions can access them sequences = defaultdict(dict) insertions = defaultdict(dict) #Currently not used, but kept in case of future use. #TreeTime handles 2-3 base ambig codes, this will allow that. def getAmbigCode(bp1, bp2, bp3=""): bps = [bp1,bp2,bp3] bps.sort() key = "".join(bps) return { 'CT': 'Y', 'AG': 'R', 'AT': 'W', 'CG': 'S', 'GT': 'K', 'AC': 'M', 'AGT': 'D', 'ACG': 'V', 'ACT': 'H', 'CGT': 'B' }[key] #Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion, #deletion, or single bp subsitution def parseCall(snps, ins, pos, ref, alt): #Insertion where there are also deletions (special handling) if len(ref) > 1 and len(alt)>len(ref): for i in range(len(ref)): #if the pos doesn't match, store in sequences if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #if about to run out of ref, store rest: if (i+1) >= len(ref): ins[pos+i] = alt[i:] #Deletion elif len(ref) > 1: for i in range(len(ref)): #if ref is longer than alt, these are deletion positions if i+1 > len(alt): snps[pos+i] = '-' #if not, there may be mutations else: if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #Insertion elif len(alt) > 1: ins[pos] = alt #No indel else: snps[pos] = alt #Parses a 'bad' (hetero or no-call) call depending on what it is def parseBadCall(snps, ins, pos, ref, ALT): #Deletion # REF ALT Seq1 Seq2 Seq3 # GCC G 1/1 0/1 ./. # Seq1 (processed by parseCall, above) will become 'G--' # Seq2 will become 'GNN' # Seq3 will become 'GNN' if len(ref) > 1: #Deleted part becomes Ns if gen[0] == '0' or gen[0] == '.': if gen[0] == '0': #if het, get first bp alt = str(ALT[int(gen[2])-1]) else: #if no-call, there is no alt, so just put Ns after 1st ref base alt = ref[0] for i in range(len(ref)): #if ref is longer than alt, these are deletion positions if i+1 > len(alt): snps[pos+i] = 'N' #if not, there may be mutations else: if ref[i] != alt[i]: snps[pos+i] = alt[i] if alt[i] != '.' else 'N' #'.' = no-call #If not deletion, need to know call type #if het, see if proposed alt is 1bp mutation elif gen[0] == '0': alt = str(ALT[int(gen[2])-1]) if len(alt)==1: #alt = getAmbigCode(ref,alt) #if want to allow ambig alt = 'N' #if you want to disregard ambig snps[pos] = alt #else a het-call insertion, so ignore. #else it's a no-call; see if all alts have a length of 1 #(meaning a simple 1bp mutation) elif len(ALT)==len("".join(ALT)): alt = 'N' snps[pos] = alt #else a no-call insertion, so ignore. #House code is *much* faster than pyvcf because we don't care about all info #about coverage, quality, counts, etc, which pyvcf goes to effort to parse #(and it's not easy as there's no standard ordering). Custom code can completely #ignore all of this. import gzip from Bio import SeqIO import numpy as np nsamp = 0 posLoc = 0 refLoc = 0 altLoc = 0 sampLoc = 9 #Use different openers depending on whether compressed opn = gzip.open if vcf_file.endswith(('.gz', '.GZ')) else open with opn(vcf_file, mode='rt') as f: for line in f: if line[0] != '#': #actual data - most common so first in 'if-list'! line = line.strip() dat = line.split('\t') POS = int(dat[posLoc]) REF = dat[refLoc] ALT = dat[altLoc].split(',') calls = np.array(dat[sampLoc:]) #get samples that differ from Ref at this site recCalls = {} for sname, sa in zip(samps, calls): if ':' in sa: #if proper VCF file (followed by quality/coverage info) gt = sa.split(':')[0] else: #if 'pseudo' VCF file (nextstrain output, or otherwise stripped) gt = sa if gt == '0' or gt == '1': #for haploid calls in VCF gt = '0/0' if gt == '0' else '1/1' #ignore if ref call: '.' or '0/0', depending on VCF if ('/' in gt and gt != '0/0') or ('|' in gt and gt != '0|0'): recCalls[sname] = gt #store the position and the alt for seq, gen in recCalls.items(): ref = REF pos = POS-1 #VCF numbering starts from 1, but Reference seq numbering #will be from 0 because it's python! #Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls if gen[0] != '0' and gen[2] != '0' and gen[0] != '.' and gen[2] != '.': alt = str(ALT[int(gen[0])-1]) #get the index of the alternate if seq not in sequences.keys(): sequences[seq] = {} parseCall(sequences[seq],insertions[seq], pos, ref, alt) #If is heterozygote call (0/1) or no call (./.) else: #alt will differ here depending on het or no-call, must pass original parseBadCall(sequences[seq],insertions[seq], pos, ref, ALT) elif line[0] == '#' and line[1] == 'C': #header line, get all the information header = line.strip().split('\t') posLoc = header.index("POS") refLoc = header.index('REF') altLoc = header.index('ALT') sampLoc = header.index('FORMAT')+1 samps = header[sampLoc:] samps = [ x.strip() for x in samps ] #ensure no leading/trailing spaces nsamp = len(samps) #else you are a comment line, ignore. #Gather all variable positions positions = set() for seq, muts in sequences.items(): positions.update(muts.keys()) #One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet if nsamp > len(sequences): missings = set(samps).difference(sequences.keys()) for s in missings: sequences[s] = {} refSeq = SeqIO.read(ref_file, format='fasta') refSeq = refSeq.upper() #convert to uppercase to avoid unknown chars later refSeqStr = str(refSeq.seq) compress_seq = {'reference':refSeqStr, 'sequences': sequences, 'insertions': insertions, 'positions': sorted(positions)} return compress_seq
[ "Reads", "in", "a", "vcf", "/", "vcf", ".", "gz", "file", "and", "associated", "reference", "sequence", "fasta", "(", "to", "which", "the", "VCF", "file", "is", "mapped", ")", ".", "Parses", "mutations", "insertions", "and", "deletions", "and", "stores", "them", "in", "a", "nested", "dict", "see", "returns", "for", "the", "dict", "structure", ".", "Calls", "with", "heterozygous", "values", "0", "/", "1", "0", "/", "2", "etc", "and", "no", "-", "calls", "(", ".", "/", ".", ")", "are", "replaced", "with", "Ns", "at", "the", "associated", "sites", ".", "Positions", "are", "stored", "to", "correspond", "the", "location", "in", "the", "reference", "sequence", "in", "Python", "(", "numbering", "is", "transformed", "to", "start", "at", "0", ")", "Parameters", "----------", "vcf_file", ":", "string", "Path", "to", "the", "vcf", "or", "vcf", ".", "gz", "file", "to", "be", "read", "in", "ref_file", ":", "string", "Path", "to", "the", "fasta", "reference", "file", "to", "be", "read", "in", "Returns", "--------", "compress_seq", ":", "nested", "dict", "In", "the", "format", ":", "::", "{", "reference", ":", "AGCTCGA", "..", "A", "sequences", ":", "{", "seq1", ":", "{", "4", ":", "A", "7", ":", "-", "}", "seq2", ":", "{", "100", ":", "C", "}", "}", "insertions", ":", "{", "seq1", ":", "{", "4", ":", "ATT", "}", "seq3", ":", "{", "1", ":", "TT", "10", ":", "CAG", "}", "}", "positions", ":", "[", "1", "4", "7", "10", "100", "...", "]", "}", "references", ":", "string", "String", "of", "the", "reference", "sequence", "read", "from", "the", "Fasta", "to", "which", "the", "variable", "sites", "are", "mapped", "sequences", ":", "nested", "dict", "Dict", "containing", "sequence", "names", "as", "keys", "which", "map", "to", "dicts", "that", "have", "position", "as", "key", "and", "the", "single", "-", "base", "mutation", "(", "or", "deletion", ")", "as", "values", "insertions", ":", "nested", "dict", "Dict", "in", "the", "same", "format", "as", "the", "above", "which", "stores", "insertions", "and", "their", "locations", ".", "The", "first", "base", "of", "the", "insertion", "is", "the", "same", "as", "whatever", "is", "currently", "in", "that", "position", "(", "Ref", "if", "no", "mutation", "mutation", "in", "sequences", "otherwise", ")", "so", "the", "current", "base", "can", "be", "directly", "replaced", "by", "the", "bases", "held", "here", ".", "positions", ":", "list", "Python", "list", "of", "all", "positions", "with", "a", "mutation", "insertion", "or", "deletion", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/vcf_utils.py#L6-L269
[ "def", "read_vcf", "(", "vcf_file", ",", "ref_file", ")", ":", "#Programming Note:\r", "# Note on VCF Format\r", "# -------------------\r", "# 'Insertion where there are also deletions' (special handling)\r", "# Ex:\r", "# REF ALT Seq1 Seq2\r", "# GC GCC,G 1/1 2/2\r", "# Insertions formatted differently - don't know how many bp match\r", "# the Ref (unlike simple insert below). Could be mutations, also.\r", "# 'Deletion'\r", "# Ex:\r", "# REF ALT\r", "# GC G\r", "# Alt does not have to be 1 bp - any length shorter than Ref.\r", "# 'Insertion'\r", "# Ex:\r", "# REF ALT\r", "# A ATT\r", "# First base always matches Ref.\r", "# 'No indel'\r", "# Ex:\r", "# REF ALT\r", "# A G\r", "#define here, so that all sub-functions can access them\r", "sequences", "=", "defaultdict", "(", "dict", ")", "insertions", "=", "defaultdict", "(", "dict", ")", "#Currently not used, but kept in case of future use.\r", "#TreeTime handles 2-3 base ambig codes, this will allow that.\r", "def", "getAmbigCode", "(", "bp1", ",", "bp2", ",", "bp3", "=", "\"\"", ")", ":", "bps", "=", "[", "bp1", ",", "bp2", ",", "bp3", "]", "bps", ".", "sort", "(", ")", "key", "=", "\"\"", ".", "join", "(", "bps", ")", "return", "{", "'CT'", ":", "'Y'", ",", "'AG'", ":", "'R'", ",", "'AT'", ":", "'W'", ",", "'CG'", ":", "'S'", ",", "'GT'", ":", "'K'", ",", "'AC'", ":", "'M'", ",", "'AGT'", ":", "'D'", ",", "'ACG'", ":", "'V'", ",", "'ACT'", ":", "'H'", ",", "'CGT'", ":", "'B'", "}", "[", "key", "]", "#Parses a 'normal' (not hetero or no-call) call depending if insertion+deletion, insertion,\r", "#deletion, or single bp subsitution\r", "def", "parseCall", "(", "snps", ",", "ins", ",", "pos", ",", "ref", ",", "alt", ")", ":", "#Insertion where there are also deletions (special handling)\r", "if", "len", "(", "ref", ")", ">", "1", "and", "len", "(", "alt", ")", ">", "len", "(", "ref", ")", ":", "for", "i", "in", "range", "(", "len", "(", "ref", ")", ")", ":", "#if the pos doesn't match, store in sequences\r", "if", "ref", "[", "i", "]", "!=", "alt", "[", "i", "]", ":", "snps", "[", "pos", "+", "i", "]", "=", "alt", "[", "i", "]", "if", "alt", "[", "i", "]", "!=", "'.'", "else", "'N'", "#'.' = no-call\r", "#if about to run out of ref, store rest:\r", "if", "(", "i", "+", "1", ")", ">=", "len", "(", "ref", ")", ":", "ins", "[", "pos", "+", "i", "]", "=", "alt", "[", "i", ":", "]", "#Deletion\r", "elif", "len", "(", "ref", ")", ">", "1", ":", "for", "i", "in", "range", "(", "len", "(", "ref", ")", ")", ":", "#if ref is longer than alt, these are deletion positions\r", "if", "i", "+", "1", ">", "len", "(", "alt", ")", ":", "snps", "[", "pos", "+", "i", "]", "=", "'-'", "#if not, there may be mutations\r", "else", ":", "if", "ref", "[", "i", "]", "!=", "alt", "[", "i", "]", ":", "snps", "[", "pos", "+", "i", "]", "=", "alt", "[", "i", "]", "if", "alt", "[", "i", "]", "!=", "'.'", "else", "'N'", "#'.' = no-call\r", "#Insertion\r", "elif", "len", "(", "alt", ")", ">", "1", ":", "ins", "[", "pos", "]", "=", "alt", "#No indel\r", "else", ":", "snps", "[", "pos", "]", "=", "alt", "#Parses a 'bad' (hetero or no-call) call depending on what it is\r", "def", "parseBadCall", "(", "snps", ",", "ins", ",", "pos", ",", "ref", ",", "ALT", ")", ":", "#Deletion\r", "# REF ALT Seq1 Seq2 Seq3\r", "# GCC G 1/1 0/1 ./.\r", "# Seq1 (processed by parseCall, above) will become 'G--'\r", "# Seq2 will become 'GNN'\r", "# Seq3 will become 'GNN'\r", "if", "len", "(", "ref", ")", ">", "1", ":", "#Deleted part becomes Ns\r", "if", "gen", "[", "0", "]", "==", "'0'", "or", "gen", "[", "0", "]", "==", "'.'", ":", "if", "gen", "[", "0", "]", "==", "'0'", ":", "#if het, get first bp\r", "alt", "=", "str", "(", "ALT", "[", "int", "(", "gen", "[", "2", "]", ")", "-", "1", "]", ")", "else", ":", "#if no-call, there is no alt, so just put Ns after 1st ref base\r", "alt", "=", "ref", "[", "0", "]", "for", "i", "in", "range", "(", "len", "(", "ref", ")", ")", ":", "#if ref is longer than alt, these are deletion positions\r", "if", "i", "+", "1", ">", "len", "(", "alt", ")", ":", "snps", "[", "pos", "+", "i", "]", "=", "'N'", "#if not, there may be mutations\r", "else", ":", "if", "ref", "[", "i", "]", "!=", "alt", "[", "i", "]", ":", "snps", "[", "pos", "+", "i", "]", "=", "alt", "[", "i", "]", "if", "alt", "[", "i", "]", "!=", "'.'", "else", "'N'", "#'.' = no-call\r", "#If not deletion, need to know call type\r", "#if het, see if proposed alt is 1bp mutation\r", "elif", "gen", "[", "0", "]", "==", "'0'", ":", "alt", "=", "str", "(", "ALT", "[", "int", "(", "gen", "[", "2", "]", ")", "-", "1", "]", ")", "if", "len", "(", "alt", ")", "==", "1", ":", "#alt = getAmbigCode(ref,alt) #if want to allow ambig\r", "alt", "=", "'N'", "#if you want to disregard ambig\r", "snps", "[", "pos", "]", "=", "alt", "#else a het-call insertion, so ignore.\r", "#else it's a no-call; see if all alts have a length of 1\r", "#(meaning a simple 1bp mutation)\r", "elif", "len", "(", "ALT", ")", "==", "len", "(", "\"\"", ".", "join", "(", "ALT", ")", ")", ":", "alt", "=", "'N'", "snps", "[", "pos", "]", "=", "alt", "#else a no-call insertion, so ignore.\r", "#House code is *much* faster than pyvcf because we don't care about all info\r", "#about coverage, quality, counts, etc, which pyvcf goes to effort to parse\r", "#(and it's not easy as there's no standard ordering). Custom code can completely\r", "#ignore all of this.\r", "import", "gzip", "from", "Bio", "import", "SeqIO", "import", "numpy", "as", "np", "nsamp", "=", "0", "posLoc", "=", "0", "refLoc", "=", "0", "altLoc", "=", "0", "sampLoc", "=", "9", "#Use different openers depending on whether compressed\r", "opn", "=", "gzip", ".", "open", "if", "vcf_file", ".", "endswith", "(", "(", "'.gz'", ",", "'.GZ'", ")", ")", "else", "open", "with", "opn", "(", "vcf_file", ",", "mode", "=", "'rt'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", "[", "0", "]", "!=", "'#'", ":", "#actual data - most common so first in 'if-list'!\r", "line", "=", "line", ".", "strip", "(", ")", "dat", "=", "line", ".", "split", "(", "'\\t'", ")", "POS", "=", "int", "(", "dat", "[", "posLoc", "]", ")", "REF", "=", "dat", "[", "refLoc", "]", "ALT", "=", "dat", "[", "altLoc", "]", ".", "split", "(", "','", ")", "calls", "=", "np", ".", "array", "(", "dat", "[", "sampLoc", ":", "]", ")", "#get samples that differ from Ref at this site\r", "recCalls", "=", "{", "}", "for", "sname", ",", "sa", "in", "zip", "(", "samps", ",", "calls", ")", ":", "if", "':'", "in", "sa", ":", "#if proper VCF file (followed by quality/coverage info)\r", "gt", "=", "sa", ".", "split", "(", "':'", ")", "[", "0", "]", "else", ":", "#if 'pseudo' VCF file (nextstrain output, or otherwise stripped)\r", "gt", "=", "sa", "if", "gt", "==", "'0'", "or", "gt", "==", "'1'", ":", "#for haploid calls in VCF\r", "gt", "=", "'0/0'", "if", "gt", "==", "'0'", "else", "'1/1'", "#ignore if ref call: '.' or '0/0', depending on VCF\r", "if", "(", "'/'", "in", "gt", "and", "gt", "!=", "'0/0'", ")", "or", "(", "'|'", "in", "gt", "and", "gt", "!=", "'0|0'", ")", ":", "recCalls", "[", "sname", "]", "=", "gt", "#store the position and the alt\r", "for", "seq", ",", "gen", "in", "recCalls", ".", "items", "(", ")", ":", "ref", "=", "REF", "pos", "=", "POS", "-", "1", "#VCF numbering starts from 1, but Reference seq numbering\r", "#will be from 0 because it's python!\r", "#Accepts only calls that are 1/1, 2/2 etc. Rejects hets and no-calls\r", "if", "gen", "[", "0", "]", "!=", "'0'", "and", "gen", "[", "2", "]", "!=", "'0'", "and", "gen", "[", "0", "]", "!=", "'.'", "and", "gen", "[", "2", "]", "!=", "'.'", ":", "alt", "=", "str", "(", "ALT", "[", "int", "(", "gen", "[", "0", "]", ")", "-", "1", "]", ")", "#get the index of the alternate\r", "if", "seq", "not", "in", "sequences", ".", "keys", "(", ")", ":", "sequences", "[", "seq", "]", "=", "{", "}", "parseCall", "(", "sequences", "[", "seq", "]", ",", "insertions", "[", "seq", "]", ",", "pos", ",", "ref", ",", "alt", ")", "#If is heterozygote call (0/1) or no call (./.)\r", "else", ":", "#alt will differ here depending on het or no-call, must pass original\r", "parseBadCall", "(", "sequences", "[", "seq", "]", ",", "insertions", "[", "seq", "]", ",", "pos", ",", "ref", ",", "ALT", ")", "elif", "line", "[", "0", "]", "==", "'#'", "and", "line", "[", "1", "]", "==", "'C'", ":", "#header line, get all the information\r", "header", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "posLoc", "=", "header", ".", "index", "(", "\"POS\"", ")", "refLoc", "=", "header", ".", "index", "(", "'REF'", ")", "altLoc", "=", "header", ".", "index", "(", "'ALT'", ")", "sampLoc", "=", "header", ".", "index", "(", "'FORMAT'", ")", "+", "1", "samps", "=", "header", "[", "sampLoc", ":", "]", "samps", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "samps", "]", "#ensure no leading/trailing spaces\r", "nsamp", "=", "len", "(", "samps", ")", "#else you are a comment line, ignore.\r", "#Gather all variable positions\r", "positions", "=", "set", "(", ")", "for", "seq", ",", "muts", "in", "sequences", ".", "items", "(", ")", ":", "positions", ".", "update", "(", "muts", ".", "keys", "(", ")", ")", "#One or more seqs are same as ref! (No non-ref calls) So haven't been 'seen' yet\r", "if", "nsamp", ">", "len", "(", "sequences", ")", ":", "missings", "=", "set", "(", "samps", ")", ".", "difference", "(", "sequences", ".", "keys", "(", ")", ")", "for", "s", "in", "missings", ":", "sequences", "[", "s", "]", "=", "{", "}", "refSeq", "=", "SeqIO", ".", "read", "(", "ref_file", ",", "format", "=", "'fasta'", ")", "refSeq", "=", "refSeq", ".", "upper", "(", ")", "#convert to uppercase to avoid unknown chars later\r", "refSeqStr", "=", "str", "(", "refSeq", ".", "seq", ")", "compress_seq", "=", "{", "'reference'", ":", "refSeqStr", ",", "'sequences'", ":", "sequences", ",", "'insertions'", ":", "insertions", ",", "'positions'", ":", "sorted", "(", "positions", ")", "}", "return", "compress_seq" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
write_vcf
Writes out a VCF-style file (which seems to be minimally handleable by vcftools and pyvcf) of the alignment. This is created from a dict in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf` Positions of variable sites are transformed to start at 1 to match VCF convention. Parameters ---------- tree_dict: nested dict A nested dict with keys 'sequence' 'reference' and 'positions', as is created by :py:meth:`treetime.TreeAnc.get_tree_dict` file_name: str File to which the new VCF should be written out. File names ending with '.gz' will result in the VCF automatically being gzipped.
treetime/vcf_utils.py
def write_vcf(tree_dict, file_name):#, compress=False): """ Writes out a VCF-style file (which seems to be minimally handleable by vcftools and pyvcf) of the alignment. This is created from a dict in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf` Positions of variable sites are transformed to start at 1 to match VCF convention. Parameters ---------- tree_dict: nested dict A nested dict with keys 'sequence' 'reference' and 'positions', as is created by :py:meth:`treetime.TreeAnc.get_tree_dict` file_name: str File to which the new VCF should be written out. File names ending with '.gz' will result in the VCF automatically being gzipped. """ # Programming Logic Note: # # For a sequence like: # Pos 1 2 3 4 5 6 # Ref A C T T A C # Seq1 A C - - - G # # In a dict it is stored: # Seq1:{3:'-', 4:'-', 5:'-', 6:'G'} (Numbering from 1 for simplicity) # # In a VCF it needs to be: # POS REF ALT Seq1 # 2 CTTA C 1/1 # 6 C G 1/1 # # If a position is deleted (pos 3), need to get invariable position preceeding it # # However, in alternative case, the base before a deletion is mutant, so need to check # that next position isn't a deletion (as otherwise won't be found until after the # current single bp mutation is written out) # # When deleted position found, need to gather up all adjacent mutant positions with deletions, # but not include adjacent mutant positions that aren't deletions (pos 6) # # Don't run off the 'end' of the position list if deletion is the last thing to be included # in the VCF file sequences = tree_dict['sequences'] ref = tree_dict['reference'] positions = tree_dict['positions'] def handleDeletions(i, pi, pos, ref, delete, pattern): refb = ref[pi] if delete: #Need to get the position before i-=1 #As we'll next go to this position again pi-=1 pos = pi+1 refb = ref[pi] #re-get pattern pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) sites = [] sites.append(pattern) #Gather all positions affected by deletion - but don't run off end of position list while (i+1) < len(positions) and positions[i+1] == pi+1: i+=1 pi = positions[i] pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) #Stops 'greedy' behaviour from adding mutations adjacent to deletions if any(pattern == '-'): #if part of deletion, append sites.append(pattern) refb = refb+ref[pi] else: #this is another mutation next to the deletion! i-=1 #don't append, break this loop #Rotate them into 'calls' sites = np.asarray(sites) align = np.rot90(sites) align = np.flipud(align) #Get rid of '-', and put '.' for calls that match ref #Only removes trailing '-'. This breaks VCF convension, but the standard #VCF way of handling this* is really complicated, and the situation is rare. #(*deletions and mutations at the same locations) fullpat = [] for pt in align: gp = len(pt)-1 while pt[gp] == '-': pt[gp] = '' gp-=1 pat = "".join(pt) if pat == refb: fullpat.append('.') else: fullpat.append(pat) pattern = np.array(fullpat) return i, pi, pos, refb, pattern #prepare the header of the VCF & write out header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+list(sequences.keys()) with open(file_name, 'w') as the_file: the_file.write( "##fileformat=VCFv4.2\n"+ "##source=NextStrain\n"+ "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n") the_file.write("\t".join(header)+"\n") vcfWrite = [] errorPositions = [] explainedErrors = 0 #Why so basic? Because we sometimes have to back up a position! i=0 while i < len(positions): #Get the 'pattern' of all calls at this position. #Look out specifically for current (this pos) or upcoming (next pos) deletions #But also distinguish these two, as handled differently. pi = positions[i] pos = pi+1 #change numbering to match VCF, not python, for output refb = ref[pi] #reference base at this position delete = False #deletion at this position - need to grab previous base (invariable) deleteGroup = False #deletion at next position (mutation at this pos) - do not need to get prev base #try/except is much more efficient than 'if' statements for constructing patterns, #as on average a 'variable' location will not be variable for any given sequence pattern = [] #pattern2 gets the pattern at next position to check for upcoming deletions #it's more efficient to get both here rather than loop through sequences twice! pattern2 = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) try: pattern2.append(sequences[k][pi+1]) except KeyError: pattern2.append(ref[pi+1]) pattern = np.array(pattern) pattern2 = np.array(pattern2) #If a deletion here, need to gather up all bases, and position before if any(pattern == '-'): if pos != 1: deleteGroup = True delete = True else: #If theres a deletion in 1st pos, VCF files do not handle this well. #Proceed keeping it as '-' for alt (violates VCF), but warn user to check output. #(This is rare) print ("WARNING: You have a deletion in the first position of your alignment. VCF format does not handle this well. Please check the output to ensure it is correct.") else: #If a deletion in next pos, need to gather up all bases if any(pattern2 == '-'): deleteGroup = True #If deletion, treat affected bases as 1 'call': if delete or deleteGroup: i, pi, pos, refb, pattern = handleDeletions(i, pi, pos, ref, delete, pattern) #If no deletion, replace ref with '.', as in VCF format else: pattern[pattern==refb] = '.' #Get the list of ALTs - minus any '.'! uniques = np.unique(pattern) uniques = uniques[np.where(uniques!='.')] #Convert bases to the number that matches the ALT j=1 for u in uniques: pattern[np.where(pattern==u)[0]] = str(j) j+=1 #Now convert these calls to #/# (VCF format) calls = [ j+"/"+j if j!='.' else '.' for j in pattern ] #What if there's no variation at a variable site?? #This can happen when sites are modified by TreeTime - see below. printPos = True if len(uniques)==0: #If we expect it (it was made constant by TreeTime), it's fine. if 'inferred_const_sites' in tree_dict and pi in tree_dict['inferred_const_sites']: explainedErrors += 1 printPos = False #and don't output position to the VCF else: #If we don't expect, raise an error errorPositions.append(str(pi)) #Write it out - Increment positions by 1 so it's in VCF numbering #If no longer variable, and explained, don't write it out if printPos: output = ["MTB_anc", str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls vcfWrite.append("\t".join(output)) i+=1 #Note: The number of 'inferred_const_sites' passed back by TreeTime will often be longer #than the number of 'site that were made constant' that prints below. This is because given the site: # Ref Alt Seq # G A AANAA #This will be converted to 'AAAAA' and listed as an 'inferred_const_sites'. However, for VCF #purposes, because the site is 'variant' against the ref, it is variant, as expected, and so #won't be counted in the below list, which is only sites removed from the VCF. if 'inferred_const_sites' in tree_dict and explainedErrors != 0: print ( "Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.".format(explainedErrors)) if len(errorPositions) != 0: print ("\n***WARNING: vcf_utils.py" "\n{} sites were found that had no alternative bases. If this data has been " "run through TreeTime and contains ambiguous bases, try calling get_tree_dict with " "var_ambigs=True to see if this clears the error." "\n\nAlternative causes:" "\n- Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline " "this is most likely)" "\n- In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug)" "\n\nThese are the positions affected (numbering starts at 0):".format(str(len(errorPositions)))) print (",".join(errorPositions)) with open(file_name, 'a') as the_file: the_file.write("\n".join(vcfWrite)) if file_name.endswith(('.gz', '.GZ')): import os #must temporarily remove .gz ending, or gzip won't zip it! os.rename(file_name, file_name[:-3]) call = ["gzip", file_name[:-3]] os.system(" ".join(call))
def write_vcf(tree_dict, file_name):#, compress=False): """ Writes out a VCF-style file (which seems to be minimally handleable by vcftools and pyvcf) of the alignment. This is created from a dict in a similar format to what's created by :py:meth:`treetime.vcf_utils.read_vcf` Positions of variable sites are transformed to start at 1 to match VCF convention. Parameters ---------- tree_dict: nested dict A nested dict with keys 'sequence' 'reference' and 'positions', as is created by :py:meth:`treetime.TreeAnc.get_tree_dict` file_name: str File to which the new VCF should be written out. File names ending with '.gz' will result in the VCF automatically being gzipped. """ # Programming Logic Note: # # For a sequence like: # Pos 1 2 3 4 5 6 # Ref A C T T A C # Seq1 A C - - - G # # In a dict it is stored: # Seq1:{3:'-', 4:'-', 5:'-', 6:'G'} (Numbering from 1 for simplicity) # # In a VCF it needs to be: # POS REF ALT Seq1 # 2 CTTA C 1/1 # 6 C G 1/1 # # If a position is deleted (pos 3), need to get invariable position preceeding it # # However, in alternative case, the base before a deletion is mutant, so need to check # that next position isn't a deletion (as otherwise won't be found until after the # current single bp mutation is written out) # # When deleted position found, need to gather up all adjacent mutant positions with deletions, # but not include adjacent mutant positions that aren't deletions (pos 6) # # Don't run off the 'end' of the position list if deletion is the last thing to be included # in the VCF file sequences = tree_dict['sequences'] ref = tree_dict['reference'] positions = tree_dict['positions'] def handleDeletions(i, pi, pos, ref, delete, pattern): refb = ref[pi] if delete: #Need to get the position before i-=1 #As we'll next go to this position again pi-=1 pos = pi+1 refb = ref[pi] #re-get pattern pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) sites = [] sites.append(pattern) #Gather all positions affected by deletion - but don't run off end of position list while (i+1) < len(positions) and positions[i+1] == pi+1: i+=1 pi = positions[i] pattern = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) pattern = np.array(pattern) #Stops 'greedy' behaviour from adding mutations adjacent to deletions if any(pattern == '-'): #if part of deletion, append sites.append(pattern) refb = refb+ref[pi] else: #this is another mutation next to the deletion! i-=1 #don't append, break this loop #Rotate them into 'calls' sites = np.asarray(sites) align = np.rot90(sites) align = np.flipud(align) #Get rid of '-', and put '.' for calls that match ref #Only removes trailing '-'. This breaks VCF convension, but the standard #VCF way of handling this* is really complicated, and the situation is rare. #(*deletions and mutations at the same locations) fullpat = [] for pt in align: gp = len(pt)-1 while pt[gp] == '-': pt[gp] = '' gp-=1 pat = "".join(pt) if pat == refb: fullpat.append('.') else: fullpat.append(pat) pattern = np.array(fullpat) return i, pi, pos, refb, pattern #prepare the header of the VCF & write out header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+list(sequences.keys()) with open(file_name, 'w') as the_file: the_file.write( "##fileformat=VCFv4.2\n"+ "##source=NextStrain\n"+ "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n") the_file.write("\t".join(header)+"\n") vcfWrite = [] errorPositions = [] explainedErrors = 0 #Why so basic? Because we sometimes have to back up a position! i=0 while i < len(positions): #Get the 'pattern' of all calls at this position. #Look out specifically for current (this pos) or upcoming (next pos) deletions #But also distinguish these two, as handled differently. pi = positions[i] pos = pi+1 #change numbering to match VCF, not python, for output refb = ref[pi] #reference base at this position delete = False #deletion at this position - need to grab previous base (invariable) deleteGroup = False #deletion at next position (mutation at this pos) - do not need to get prev base #try/except is much more efficient than 'if' statements for constructing patterns, #as on average a 'variable' location will not be variable for any given sequence pattern = [] #pattern2 gets the pattern at next position to check for upcoming deletions #it's more efficient to get both here rather than loop through sequences twice! pattern2 = [] for k,v in sequences.items(): try: pattern.append(sequences[k][pi]) except KeyError: pattern.append(ref[pi]) try: pattern2.append(sequences[k][pi+1]) except KeyError: pattern2.append(ref[pi+1]) pattern = np.array(pattern) pattern2 = np.array(pattern2) #If a deletion here, need to gather up all bases, and position before if any(pattern == '-'): if pos != 1: deleteGroup = True delete = True else: #If theres a deletion in 1st pos, VCF files do not handle this well. #Proceed keeping it as '-' for alt (violates VCF), but warn user to check output. #(This is rare) print ("WARNING: You have a deletion in the first position of your alignment. VCF format does not handle this well. Please check the output to ensure it is correct.") else: #If a deletion in next pos, need to gather up all bases if any(pattern2 == '-'): deleteGroup = True #If deletion, treat affected bases as 1 'call': if delete or deleteGroup: i, pi, pos, refb, pattern = handleDeletions(i, pi, pos, ref, delete, pattern) #If no deletion, replace ref with '.', as in VCF format else: pattern[pattern==refb] = '.' #Get the list of ALTs - minus any '.'! uniques = np.unique(pattern) uniques = uniques[np.where(uniques!='.')] #Convert bases to the number that matches the ALT j=1 for u in uniques: pattern[np.where(pattern==u)[0]] = str(j) j+=1 #Now convert these calls to #/# (VCF format) calls = [ j+"/"+j if j!='.' else '.' for j in pattern ] #What if there's no variation at a variable site?? #This can happen when sites are modified by TreeTime - see below. printPos = True if len(uniques)==0: #If we expect it (it was made constant by TreeTime), it's fine. if 'inferred_const_sites' in tree_dict and pi in tree_dict['inferred_const_sites']: explainedErrors += 1 printPos = False #and don't output position to the VCF else: #If we don't expect, raise an error errorPositions.append(str(pi)) #Write it out - Increment positions by 1 so it's in VCF numbering #If no longer variable, and explained, don't write it out if printPos: output = ["MTB_anc", str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls vcfWrite.append("\t".join(output)) i+=1 #Note: The number of 'inferred_const_sites' passed back by TreeTime will often be longer #than the number of 'site that were made constant' that prints below. This is because given the site: # Ref Alt Seq # G A AANAA #This will be converted to 'AAAAA' and listed as an 'inferred_const_sites'. However, for VCF #purposes, because the site is 'variant' against the ref, it is variant, as expected, and so #won't be counted in the below list, which is only sites removed from the VCF. if 'inferred_const_sites' in tree_dict and explainedErrors != 0: print ( "Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.".format(explainedErrors)) if len(errorPositions) != 0: print ("\n***WARNING: vcf_utils.py" "\n{} sites were found that had no alternative bases. If this data has been " "run through TreeTime and contains ambiguous bases, try calling get_tree_dict with " "var_ambigs=True to see if this clears the error." "\n\nAlternative causes:" "\n- Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline " "this is most likely)" "\n- In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug)" "\n\nThese are the positions affected (numbering starts at 0):".format(str(len(errorPositions)))) print (",".join(errorPositions)) with open(file_name, 'a') as the_file: the_file.write("\n".join(vcfWrite)) if file_name.endswith(('.gz', '.GZ')): import os #must temporarily remove .gz ending, or gzip won't zip it! os.rename(file_name, file_name[:-3]) call = ["gzip", file_name[:-3]] os.system(" ".join(call))
[ "Writes", "out", "a", "VCF", "-", "style", "file", "(", "which", "seems", "to", "be", "minimally", "handleable", "by", "vcftools", "and", "pyvcf", ")", "of", "the", "alignment", ".", "This", "is", "created", "from", "a", "dict", "in", "a", "similar", "format", "to", "what", "s", "created", "by", ":", "py", ":", "meth", ":", "treetime", ".", "vcf_utils", ".", "read_vcf", "Positions", "of", "variable", "sites", "are", "transformed", "to", "start", "at", "1", "to", "match", "VCF", "convention", ".", "Parameters", "----------", "tree_dict", ":", "nested", "dict", "A", "nested", "dict", "with", "keys", "sequence", "reference", "and", "positions", "as", "is", "created", "by", ":", "py", ":", "meth", ":", "treetime", ".", "TreeAnc", ".", "get_tree_dict", "file_name", ":", "str", "File", "to", "which", "the", "new", "VCF", "should", "be", "written", "out", ".", "File", "names", "ending", "with", ".", "gz", "will", "result", "in", "the", "VCF", "automatically", "being", "gzipped", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/vcf_utils.py#L272-L518
[ "def", "write_vcf", "(", "tree_dict", ",", "file_name", ")", ":", "#, compress=False):\r", "# Programming Logic Note:\r", "#\r", "# For a sequence like:\r", "# Pos 1 2 3 4 5 6\r", "# Ref A C T T A C\r", "# Seq1 A C - - - G\r", "#\r", "# In a dict it is stored:\r", "# Seq1:{3:'-', 4:'-', 5:'-', 6:'G'} (Numbering from 1 for simplicity)\r", "#\r", "# In a VCF it needs to be:\r", "# POS REF ALT Seq1\r", "# 2 CTTA C 1/1\r", "# 6 C G 1/1\r", "#\r", "# If a position is deleted (pos 3), need to get invariable position preceeding it\r", "#\r", "# However, in alternative case, the base before a deletion is mutant, so need to check\r", "# that next position isn't a deletion (as otherwise won't be found until after the\r", "# current single bp mutation is written out)\r", "#\r", "# When deleted position found, need to gather up all adjacent mutant positions with deletions,\r", "# but not include adjacent mutant positions that aren't deletions (pos 6)\r", "#\r", "# Don't run off the 'end' of the position list if deletion is the last thing to be included\r", "# in the VCF file\r", "sequences", "=", "tree_dict", "[", "'sequences'", "]", "ref", "=", "tree_dict", "[", "'reference'", "]", "positions", "=", "tree_dict", "[", "'positions'", "]", "def", "handleDeletions", "(", "i", ",", "pi", ",", "pos", ",", "ref", ",", "delete", ",", "pattern", ")", ":", "refb", "=", "ref", "[", "pi", "]", "if", "delete", ":", "#Need to get the position before\r", "i", "-=", "1", "#As we'll next go to this position again\r", "pi", "-=", "1", "pos", "=", "pi", "+", "1", "refb", "=", "ref", "[", "pi", "]", "#re-get pattern\r", "pattern", "=", "[", "]", "for", "k", ",", "v", "in", "sequences", ".", "items", "(", ")", ":", "try", ":", "pattern", ".", "append", "(", "sequences", "[", "k", "]", "[", "pi", "]", ")", "except", "KeyError", ":", "pattern", ".", "append", "(", "ref", "[", "pi", "]", ")", "pattern", "=", "np", ".", "array", "(", "pattern", ")", "sites", "=", "[", "]", "sites", ".", "append", "(", "pattern", ")", "#Gather all positions affected by deletion - but don't run off end of position list\r", "while", "(", "i", "+", "1", ")", "<", "len", "(", "positions", ")", "and", "positions", "[", "i", "+", "1", "]", "==", "pi", "+", "1", ":", "i", "+=", "1", "pi", "=", "positions", "[", "i", "]", "pattern", "=", "[", "]", "for", "k", ",", "v", "in", "sequences", ".", "items", "(", ")", ":", "try", ":", "pattern", ".", "append", "(", "sequences", "[", "k", "]", "[", "pi", "]", ")", "except", "KeyError", ":", "pattern", ".", "append", "(", "ref", "[", "pi", "]", ")", "pattern", "=", "np", ".", "array", "(", "pattern", ")", "#Stops 'greedy' behaviour from adding mutations adjacent to deletions\r", "if", "any", "(", "pattern", "==", "'-'", ")", ":", "#if part of deletion, append\r", "sites", ".", "append", "(", "pattern", ")", "refb", "=", "refb", "+", "ref", "[", "pi", "]", "else", ":", "#this is another mutation next to the deletion!\r", "i", "-=", "1", "#don't append, break this loop\r", "#Rotate them into 'calls'\r", "sites", "=", "np", ".", "asarray", "(", "sites", ")", "align", "=", "np", ".", "rot90", "(", "sites", ")", "align", "=", "np", ".", "flipud", "(", "align", ")", "#Get rid of '-', and put '.' for calls that match ref\r", "#Only removes trailing '-'. This breaks VCF convension, but the standard\r", "#VCF way of handling this* is really complicated, and the situation is rare.\r", "#(*deletions and mutations at the same locations)\r", "fullpat", "=", "[", "]", "for", "pt", "in", "align", ":", "gp", "=", "len", "(", "pt", ")", "-", "1", "while", "pt", "[", "gp", "]", "==", "'-'", ":", "pt", "[", "gp", "]", "=", "''", "gp", "-=", "1", "pat", "=", "\"\"", ".", "join", "(", "pt", ")", "if", "pat", "==", "refb", ":", "fullpat", ".", "append", "(", "'.'", ")", "else", ":", "fullpat", ".", "append", "(", "pat", ")", "pattern", "=", "np", ".", "array", "(", "fullpat", ")", "return", "i", ",", "pi", ",", "pos", ",", "refb", ",", "pattern", "#prepare the header of the VCF & write out\r", "header", "=", "[", "\"#CHROM\"", ",", "\"POS\"", ",", "\"ID\"", ",", "\"REF\"", ",", "\"ALT\"", ",", "\"QUAL\"", ",", "\"FILTER\"", ",", "\"INFO\"", ",", "\"FORMAT\"", "]", "+", "list", "(", "sequences", ".", "keys", "(", ")", ")", "with", "open", "(", "file_name", ",", "'w'", ")", "as", "the_file", ":", "the_file", ".", "write", "(", "\"##fileformat=VCFv4.2\\n\"", "+", "\"##source=NextStrain\\n\"", "+", "\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\"", ")", "the_file", ".", "write", "(", "\"\\t\"", ".", "join", "(", "header", ")", "+", "\"\\n\"", ")", "vcfWrite", "=", "[", "]", "errorPositions", "=", "[", "]", "explainedErrors", "=", "0", "#Why so basic? Because we sometimes have to back up a position!\r", "i", "=", "0", "while", "i", "<", "len", "(", "positions", ")", ":", "#Get the 'pattern' of all calls at this position.\r", "#Look out specifically for current (this pos) or upcoming (next pos) deletions\r", "#But also distinguish these two, as handled differently.\r", "pi", "=", "positions", "[", "i", "]", "pos", "=", "pi", "+", "1", "#change numbering to match VCF, not python, for output\r", "refb", "=", "ref", "[", "pi", "]", "#reference base at this position\r", "delete", "=", "False", "#deletion at this position - need to grab previous base (invariable)\r", "deleteGroup", "=", "False", "#deletion at next position (mutation at this pos) - do not need to get prev base\r", "#try/except is much more efficient than 'if' statements for constructing patterns,\r", "#as on average a 'variable' location will not be variable for any given sequence\r", "pattern", "=", "[", "]", "#pattern2 gets the pattern at next position to check for upcoming deletions\r", "#it's more efficient to get both here rather than loop through sequences twice!\r", "pattern2", "=", "[", "]", "for", "k", ",", "v", "in", "sequences", ".", "items", "(", ")", ":", "try", ":", "pattern", ".", "append", "(", "sequences", "[", "k", "]", "[", "pi", "]", ")", "except", "KeyError", ":", "pattern", ".", "append", "(", "ref", "[", "pi", "]", ")", "try", ":", "pattern2", ".", "append", "(", "sequences", "[", "k", "]", "[", "pi", "+", "1", "]", ")", "except", "KeyError", ":", "pattern2", ".", "append", "(", "ref", "[", "pi", "+", "1", "]", ")", "pattern", "=", "np", ".", "array", "(", "pattern", ")", "pattern2", "=", "np", ".", "array", "(", "pattern2", ")", "#If a deletion here, need to gather up all bases, and position before\r", "if", "any", "(", "pattern", "==", "'-'", ")", ":", "if", "pos", "!=", "1", ":", "deleteGroup", "=", "True", "delete", "=", "True", "else", ":", "#If theres a deletion in 1st pos, VCF files do not handle this well.\r", "#Proceed keeping it as '-' for alt (violates VCF), but warn user to check output.\r", "#(This is rare)\r", "print", "(", "\"WARNING: You have a deletion in the first position of your alignment. VCF format does not handle this well. Please check the output to ensure it is correct.\"", ")", "else", ":", "#If a deletion in next pos, need to gather up all bases\r", "if", "any", "(", "pattern2", "==", "'-'", ")", ":", "deleteGroup", "=", "True", "#If deletion, treat affected bases as 1 'call':\r", "if", "delete", "or", "deleteGroup", ":", "i", ",", "pi", ",", "pos", ",", "refb", ",", "pattern", "=", "handleDeletions", "(", "i", ",", "pi", ",", "pos", ",", "ref", ",", "delete", ",", "pattern", ")", "#If no deletion, replace ref with '.', as in VCF format\r", "else", ":", "pattern", "[", "pattern", "==", "refb", "]", "=", "'.'", "#Get the list of ALTs - minus any '.'!\r", "uniques", "=", "np", ".", "unique", "(", "pattern", ")", "uniques", "=", "uniques", "[", "np", ".", "where", "(", "uniques", "!=", "'.'", ")", "]", "#Convert bases to the number that matches the ALT\r", "j", "=", "1", "for", "u", "in", "uniques", ":", "pattern", "[", "np", ".", "where", "(", "pattern", "==", "u", ")", "[", "0", "]", "]", "=", "str", "(", "j", ")", "j", "+=", "1", "#Now convert these calls to #/# (VCF format)\r", "calls", "=", "[", "j", "+", "\"/\"", "+", "j", "if", "j", "!=", "'.'", "else", "'.'", "for", "j", "in", "pattern", "]", "#What if there's no variation at a variable site??\r", "#This can happen when sites are modified by TreeTime - see below.\r", "printPos", "=", "True", "if", "len", "(", "uniques", ")", "==", "0", ":", "#If we expect it (it was made constant by TreeTime), it's fine.\r", "if", "'inferred_const_sites'", "in", "tree_dict", "and", "pi", "in", "tree_dict", "[", "'inferred_const_sites'", "]", ":", "explainedErrors", "+=", "1", "printPos", "=", "False", "#and don't output position to the VCF\r", "else", ":", "#If we don't expect, raise an error\r", "errorPositions", ".", "append", "(", "str", "(", "pi", ")", ")", "#Write it out - Increment positions by 1 so it's in VCF numbering\r", "#If no longer variable, and explained, don't write it out\r", "if", "printPos", ":", "output", "=", "[", "\"MTB_anc\"", ",", "str", "(", "pos", ")", ",", "\".\"", ",", "refb", ",", "\",\"", ".", "join", "(", "uniques", ")", ",", "\".\"", ",", "\"PASS\"", ",", "\".\"", ",", "\"GT\"", "]", "+", "calls", "vcfWrite", ".", "append", "(", "\"\\t\"", ".", "join", "(", "output", ")", ")", "i", "+=", "1", "#Note: The number of 'inferred_const_sites' passed back by TreeTime will often be longer\r", "#than the number of 'site that were made constant' that prints below. This is because given the site:\r", "# Ref Alt Seq\r", "# G A AANAA\r", "#This will be converted to 'AAAAA' and listed as an 'inferred_const_sites'. However, for VCF\r", "#purposes, because the site is 'variant' against the ref, it is variant, as expected, and so\r", "#won't be counted in the below list, which is only sites removed from the VCF.\r", "if", "'inferred_const_sites'", "in", "tree_dict", "and", "explainedErrors", "!=", "0", ":", "print", "(", "\"Sites that were constant except for ambiguous bases were made constant by TreeTime. This happened {} times. These sites are now excluded from the VCF.\"", ".", "format", "(", "explainedErrors", ")", ")", "if", "len", "(", "errorPositions", ")", "!=", "0", ":", "print", "(", "\"\\n***WARNING: vcf_utils.py\"", "\"\\n{} sites were found that had no alternative bases. If this data has been \"", "\"run through TreeTime and contains ambiguous bases, try calling get_tree_dict with \"", "\"var_ambigs=True to see if this clears the error.\"", "\"\\n\\nAlternative causes:\"", "\"\\n- Not all sequences in your alignment are in the tree (if you are running TreeTime via commandline \"", "\"this is most likely)\"", "\"\\n- In TreeTime, can be caused by overwriting variants in tips with small branch lengths (debug)\"", "\"\\n\\nThese are the positions affected (numbering starts at 0):\"", ".", "format", "(", "str", "(", "len", "(", "errorPositions", ")", ")", ")", ")", "print", "(", "\",\"", ".", "join", "(", "errorPositions", ")", ")", "with", "open", "(", "file_name", ",", "'a'", ")", "as", "the_file", ":", "the_file", ".", "write", "(", "\"\\n\"", ".", "join", "(", "vcfWrite", ")", ")", "if", "file_name", ".", "endswith", "(", "(", "'.gz'", ",", "'.GZ'", ")", ")", ":", "import", "os", "#must temporarily remove .gz ending, or gzip won't zip it!\r", "os", ".", "rename", "(", "file_name", ",", "file_name", "[", ":", "-", "3", "]", ")", "call", "=", "[", "\"gzip\"", ",", "file_name", "[", ":", "-", "3", "]", "]", "os", ".", "system", "(", "\" \"", ".", "join", "(", "call", ")", ")" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
_convolution_integrand
Evaluates int_tau f(t+tau)*g(tau) or int_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator)
treetime/node_interpolator.py
def _convolution_integrand(t_val, f, g, inverse_time=None, return_log=False): ''' Evaluates int_tau f(t+tau)*g(tau) or int_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator) ''' if inverse_time is None: raise Exception("Inverse time argument must be set!") # determine integration boundaries: if inverse_time: ## tau>g.xmin and t-tau<f.xmax tau_min = max(t_val - f.xmax, g.xmin) ## tau<g.xmax and t-tau>f.xmin tau_max = min(t_val - f.xmin, g.xmax) else: ## tau>g.xmin and t+tau>f.xmin tau_min = max(f.xmin-t_val, g.xmin) ## tau<g.xmax and t+tau<f.xmax tau_max = min(f.xmax-t_val, g.xmax) #print(tau_min, tau_max) if tau_max <= tau_min: if return_log: return ttconf.BIG_NUMBER else: return 0.0 # functions do not overlap else: # create the tau-grid for the interpolation object in the overlap region if inverse_time: tau = np.unique(np.concatenate((g.x, t_val-f.x,[tau_min,tau_max]))) else: tau = np.unique(np.concatenate((g.x, f.x-t_val,[tau_min,tau_max]))) tau = tau[(tau>tau_min-ttconf.TINY_NUMBER)&(tau<tau_max+ttconf.TINY_NUMBER)] if len(tau)<10: tau = np.linspace(tau_min, tau_max, 10) if inverse_time: # add negative logarithms tnode = t_val - tau fg = f(tnode) + g(tau, tnode=tnode) else: fg = f(t_val + tau) + g(tau, tnode=t_val) # create the interpolation object on this grid FG = Distribution(tau, fg, is_log=True, min_width = np.max([f.min_width, g.min_width]), kind='linear', assume_sorted=True) return FG
def _convolution_integrand(t_val, f, g, inverse_time=None, return_log=False): ''' Evaluates int_tau f(t+tau)*g(tau) or int_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator) ''' if inverse_time is None: raise Exception("Inverse time argument must be set!") # determine integration boundaries: if inverse_time: ## tau>g.xmin and t-tau<f.xmax tau_min = max(t_val - f.xmax, g.xmin) ## tau<g.xmax and t-tau>f.xmin tau_max = min(t_val - f.xmin, g.xmax) else: ## tau>g.xmin and t+tau>f.xmin tau_min = max(f.xmin-t_val, g.xmin) ## tau<g.xmax and t+tau<f.xmax tau_max = min(f.xmax-t_val, g.xmax) #print(tau_min, tau_max) if tau_max <= tau_min: if return_log: return ttconf.BIG_NUMBER else: return 0.0 # functions do not overlap else: # create the tau-grid for the interpolation object in the overlap region if inverse_time: tau = np.unique(np.concatenate((g.x, t_val-f.x,[tau_min,tau_max]))) else: tau = np.unique(np.concatenate((g.x, f.x-t_val,[tau_min,tau_max]))) tau = tau[(tau>tau_min-ttconf.TINY_NUMBER)&(tau<tau_max+ttconf.TINY_NUMBER)] if len(tau)<10: tau = np.linspace(tau_min, tau_max, 10) if inverse_time: # add negative logarithms tnode = t_val - tau fg = f(tnode) + g(tau, tnode=tnode) else: fg = f(t_val + tau) + g(tau, tnode=t_val) # create the interpolation object on this grid FG = Distribution(tau, fg, is_log=True, min_width = np.max([f.min_width, g.min_width]), kind='linear', assume_sorted=True) return FG
[ "Evaluates", "int_tau", "f", "(", "t", "+", "tau", ")", "*", "g", "(", "tau", ")", "or", "int_tau", "f", "(", "t", "-", "tau", ")", "g", "(", "tau", ")", "if", "inverse", "time", "is", "TRUE" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L9-L84
[ "def", "_convolution_integrand", "(", "t_val", ",", "f", ",", "g", ",", "inverse_time", "=", "None", ",", "return_log", "=", "False", ")", ":", "if", "inverse_time", "is", "None", ":", "raise", "Exception", "(", "\"Inverse time argument must be set!\"", ")", "# determine integration boundaries:", "if", "inverse_time", ":", "## tau>g.xmin and t-tau<f.xmax", "tau_min", "=", "max", "(", "t_val", "-", "f", ".", "xmax", ",", "g", ".", "xmin", ")", "## tau<g.xmax and t-tau>f.xmin", "tau_max", "=", "min", "(", "t_val", "-", "f", ".", "xmin", ",", "g", ".", "xmax", ")", "else", ":", "## tau>g.xmin and t+tau>f.xmin", "tau_min", "=", "max", "(", "f", ".", "xmin", "-", "t_val", ",", "g", ".", "xmin", ")", "## tau<g.xmax and t+tau<f.xmax", "tau_max", "=", "min", "(", "f", ".", "xmax", "-", "t_val", ",", "g", ".", "xmax", ")", "#print(tau_min, tau_max)", "if", "tau_max", "<=", "tau_min", ":", "if", "return_log", ":", "return", "ttconf", ".", "BIG_NUMBER", "else", ":", "return", "0.0", "# functions do not overlap", "else", ":", "# create the tau-grid for the interpolation object in the overlap region", "if", "inverse_time", ":", "tau", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "(", "g", ".", "x", ",", "t_val", "-", "f", ".", "x", ",", "[", "tau_min", ",", "tau_max", "]", ")", ")", ")", "else", ":", "tau", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "(", "g", ".", "x", ",", "f", ".", "x", "-", "t_val", ",", "[", "tau_min", ",", "tau_max", "]", ")", ")", ")", "tau", "=", "tau", "[", "(", "tau", ">", "tau_min", "-", "ttconf", ".", "TINY_NUMBER", ")", "&", "(", "tau", "<", "tau_max", "+", "ttconf", ".", "TINY_NUMBER", ")", "]", "if", "len", "(", "tau", ")", "<", "10", ":", "tau", "=", "np", ".", "linspace", "(", "tau_min", ",", "tau_max", ",", "10", ")", "if", "inverse_time", ":", "# add negative logarithms", "tnode", "=", "t_val", "-", "tau", "fg", "=", "f", "(", "tnode", ")", "+", "g", "(", "tau", ",", "tnode", "=", "tnode", ")", "else", ":", "fg", "=", "f", "(", "t_val", "+", "tau", ")", "+", "g", "(", "tau", ",", "tnode", "=", "t_val", ")", "# create the interpolation object on this grid", "FG", "=", "Distribution", "(", "tau", ",", "fg", ",", "is_log", "=", "True", ",", "min_width", "=", "np", ".", "max", "(", "[", "f", ".", "min_width", ",", "g", ".", "min_width", "]", ")", ",", "kind", "=", "'linear'", ",", "assume_sorted", "=", "True", ")", "return", "FG" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
_max_of_integrand
Evaluates max_tau f(t+tau)*g(tau) or max_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator)
treetime/node_interpolator.py
def _max_of_integrand(t_val, f, g, inverse_time=None, return_log=False): ''' Evaluates max_tau f(t+tau)*g(tau) or max_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator) ''' # return log is always True FG = _convolution_integrand(t_val, f, g, inverse_time, return_log=True) if FG == ttconf.BIG_NUMBER: res = ttconf.BIG_NUMBER, 0 else: X = FG.x[FG.y.argmin()] Y = FG.y.min() res = Y, X if not return_log: res[0] = np.log(res[0]) return res
def _max_of_integrand(t_val, f, g, inverse_time=None, return_log=False): ''' Evaluates max_tau f(t+tau)*g(tau) or max_tau f(t-tau)g(tau) if inverse time is TRUE Parameters ----------- t_val : double Time point f : Interpolation object First multiplier in convolution g : Interpolation object Second multiplier in convolution inverse_time : bool, None time direction. If True, then the f(t-tau)*g(tau) is calculated, otherwise, f(t+tau)*g(tau) return_log : bool If True, the logarithm will be returned Returns ------- FG : Distribution The function to be integrated as Distribution object (interpolator) ''' # return log is always True FG = _convolution_integrand(t_val, f, g, inverse_time, return_log=True) if FG == ttconf.BIG_NUMBER: res = ttconf.BIG_NUMBER, 0 else: X = FG.x[FG.y.argmin()] Y = FG.y.min() res = Y, X if not return_log: res[0] = np.log(res[0]) return res
[ "Evaluates", "max_tau", "f", "(", "t", "+", "tau", ")", "*", "g", "(", "tau", ")", "or", "max_tau", "f", "(", "t", "-", "tau", ")", "g", "(", "tau", ")", "if", "inverse", "time", "is", "TRUE" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L88-L135
[ "def", "_max_of_integrand", "(", "t_val", ",", "f", ",", "g", ",", "inverse_time", "=", "None", ",", "return_log", "=", "False", ")", ":", "# return log is always True", "FG", "=", "_convolution_integrand", "(", "t_val", ",", "f", ",", "g", ",", "inverse_time", ",", "return_log", "=", "True", ")", "if", "FG", "==", "ttconf", ".", "BIG_NUMBER", ":", "res", "=", "ttconf", ".", "BIG_NUMBER", ",", "0", "else", ":", "X", "=", "FG", ".", "x", "[", "FG", ".", "y", ".", "argmin", "(", ")", "]", "Y", "=", "FG", ".", "y", ".", "min", "(", ")", "res", "=", "Y", ",", "X", "if", "not", "return_log", ":", "res", "[", "0", "]", "=", "np", ".", "log", "(", "res", "[", "0", "]", ")", "return", "res" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
_evaluate_convolution
Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau
treetime/node_interpolator.py
def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False): """ Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau """ FG = _convolution_integrand(t_val, f, g, inverse_time, return_log) #integrate the interpolation object, return log, make neg_log #print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax)) if (return_log and FG == ttconf.BIG_NUMBER) or \ (not return_log and FG == 0.0): # distributions do not overlap res = ttconf.BIG_NUMBER # we integrate log funcitons else: res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True) if return_log: return res, -1 else: return np.exp(-res), -1
def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False): """ Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau """ FG = _convolution_integrand(t_val, f, g, inverse_time, return_log) #integrate the interpolation object, return log, make neg_log #print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax)) if (return_log and FG == ttconf.BIG_NUMBER) or \ (not return_log and FG == 0.0): # distributions do not overlap res = ttconf.BIG_NUMBER # we integrate log funcitons else: res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True) if return_log: return res, -1 else: return np.exp(-res), -1
[ "Calculate", "convolution", "F", "(", "t", ")", "=", "int", "{", "f", "(", "tau", ")", "g", "(", "t", "-", "tau", ")", "}", "dtau" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L137-L155
[ "def", "_evaluate_convolution", "(", "t_val", ",", "f", ",", "g", ",", "n_integral", "=", "100", ",", "inverse_time", "=", "None", ",", "return_log", "=", "False", ")", ":", "FG", "=", "_convolution_integrand", "(", "t_val", ",", "f", ",", "g", ",", "inverse_time", ",", "return_log", ")", "#integrate the interpolation object, return log, make neg_log", "#print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax))", "if", "(", "return_log", "and", "FG", "==", "ttconf", ".", "BIG_NUMBER", ")", "or", "(", "not", "return_log", "and", "FG", "==", "0.0", ")", ":", "# distributions do not overlap", "res", "=", "ttconf", ".", "BIG_NUMBER", "# we integrate log funcitons", "else", ":", "res", "=", "-", "FG", ".", "integrate", "(", "a", "=", "FG", ".", "xmin", ",", "b", "=", "FG", ".", "xmax", ",", "n", "=", "n_integral", ",", "return_log", "=", "True", ")", "if", "return_log", ":", "return", "res", ",", "-", "1", "else", ":", "return", "np", ".", "exp", "(", "-", "res", ")", ",", "-", "1" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
NodeInterpolator.convolve
calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False This function determines the time points of the grid of the result to ensure an accurate approximation.
treetime/node_interpolator.py
def convolve(cls, node_interp, branch_interp, max_or_integral='integral', n_grid_points = ttconf.NODE_GRID_SIZE, n_integral=ttconf.N_INTEGRAL, inverse_time=True, rel_tol=0.05, yc=10): ''' calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False This function determines the time points of the grid of the result to ensure an accurate approximation. ''' if max_or_integral not in ['max', 'integral']: raise Exception("Max_or_integral expected to be 'max' or 'integral', got " + str(max_or_integral) + " instead.") def conv_in_point(time_point): if max_or_integral == 'integral': # compute integral of the convolution return _evaluate_convolution(time_point, node_interp, branch_interp, n_integral=n_integral, return_log=True, inverse_time = inverse_time) else: # compute max of the convolution return _max_of_integrand(time_point, node_interp, branch_interp, return_log=True, inverse_time = inverse_time) # estimate peak and width joint_fwhm = (node_interp.fwhm + branch_interp.fwhm) min_fwhm = min(node_interp.fwhm, branch_interp.fwhm) # determine support of the resulting convolution # in order to be positive, the flipped support of f, shifted by t and g need to overlap if inverse_time: new_peak_pos = node_interp.peak_pos + branch_interp.peak_pos tmin = node_interp.xmin+branch_interp.xmin tmax = node_interp.xmax+branch_interp.xmax else: new_peak_pos = node_interp.peak_pos - branch_interp.peak_pos tmin = node_interp.xmin - branch_interp.xmax tmax = node_interp.xmax - branch_interp.xmin # make initial node grid consisting of linearly spaced points around # the center and quadratically spaced points at either end n = n_grid_points/3 center_width = 3*joint_fwhm grid_center = new_peak_pos + np.linspace(-1, 1, n)*center_width # add the right and left grid if it is needed right_range = (tmax - grid_center[-1]) if right_range>4*center_width: grid_right = grid_center[-1] + right_range*(np.linspace(0, 1, n)**2.0) elif right_range>0: # use linear grid the right_range is comparable to center_width grid_right = grid_center[-1] + right_range*np.linspace(0,1, int(min(n,1+0.5*n*right_range/center_width))) else: grid_right =[] left_range = grid_center[0]-tmin if left_range>4*center_width: grid_left = tmin + left_range*(np.linspace(0, 1, n)**2.0) elif left_range>0: grid_left = tmin + left_range*np.linspace(0,1, int(min(n,1+0.5*n*left_range/center_width))) else: grid_left =[] if tmin>-1: grid_zero_left = tmin + (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_left = [tmin] if tmax<1: grid_zero_right = tmax - (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_right = [tmax] # make grid and calculate convolution t_grid_0 = np.unique(np.concatenate([grid_zero_left, grid_left[:-1], grid_center, grid_right[1:], grid_zero_right])) t_grid_0 = t_grid_0[(t_grid_0 > tmin-ttconf.TINY_NUMBER) & (t_grid_0 < tmax+ttconf.TINY_NUMBER)] # res0 - the values of the convolution (integral or max) # t_0 - the value, at which the res0 achieves maximum # (when determining the maximum of the integrand, otherwise meaningless) res_0, t_0 = np.array([conv_in_point(t_val) for t_val in t_grid_0]).T # refine grid as necessary and add new points # calculate interpolation error at all internal points [2:-2] bc end points are sometime off scale interp_error = np.abs(res_0[3:-1]+res_0[1:-3]-2*res_0[2:-2]) # determine the number of extra points needed, criterion depends on distance from peak dy dy = (res_0[2:-2]-res_0.min()) dx = np.diff(t_grid_0) refine_factor = np.minimum(np.minimum(np.array(np.floor(np.sqrt(interp_error/(rel_tol*(1+(dy/yc)**4)))), dtype=int), np.array(100*(dx[1:-2]+dx[2:-1])/min_fwhm, dtype=int)), 10) insert_point_idx = np.zeros(interp_error.shape[0]+1, dtype=int) insert_point_idx[1:] = refine_factor insert_point_idx[:-1] += refine_factor # add additional points if there are any to add if np.sum(insert_point_idx): add_x = np.concatenate([np.linspace(t1,t2,n+2)[1:-1] for t1,t2,n in zip(t_grid_0[1:-2], t_grid_0[2:-1], insert_point_idx) if n>0]) # calculate convolution at these points add_y, add_t = np.array([conv_in_point(t_val) for t_val in add_x]).T t_grid_0 = np.concatenate((t_grid_0, add_x)) res_0 = np.concatenate ((res_0, add_y)) t_0 = np.concatenate ((t_0, add_t)) # instantiate the new interpolation object and return res_y = cls(t_grid_0, res_0, is_log=True, kind='linear') # the interpolation object, which is used to store the value of the # grid, which maximizes the convolution (for 'max' option), # or flat -1 distribution (for 'integral' option) # this grid is the optimal branch length res_t = Distribution(t_grid_0, t_0, is_log=True, min_width=node_interp.min_width, kind='linear') return res_y, res_t
def convolve(cls, node_interp, branch_interp, max_or_integral='integral', n_grid_points = ttconf.NODE_GRID_SIZE, n_integral=ttconf.N_INTEGRAL, inverse_time=True, rel_tol=0.05, yc=10): ''' calculate H(t) = \int_tau f(t-tau)g(tau) if inverse_time=True H(t) = \int_tau f(t+tau)g(tau) if inverse_time=False This function determines the time points of the grid of the result to ensure an accurate approximation. ''' if max_or_integral not in ['max', 'integral']: raise Exception("Max_or_integral expected to be 'max' or 'integral', got " + str(max_or_integral) + " instead.") def conv_in_point(time_point): if max_or_integral == 'integral': # compute integral of the convolution return _evaluate_convolution(time_point, node_interp, branch_interp, n_integral=n_integral, return_log=True, inverse_time = inverse_time) else: # compute max of the convolution return _max_of_integrand(time_point, node_interp, branch_interp, return_log=True, inverse_time = inverse_time) # estimate peak and width joint_fwhm = (node_interp.fwhm + branch_interp.fwhm) min_fwhm = min(node_interp.fwhm, branch_interp.fwhm) # determine support of the resulting convolution # in order to be positive, the flipped support of f, shifted by t and g need to overlap if inverse_time: new_peak_pos = node_interp.peak_pos + branch_interp.peak_pos tmin = node_interp.xmin+branch_interp.xmin tmax = node_interp.xmax+branch_interp.xmax else: new_peak_pos = node_interp.peak_pos - branch_interp.peak_pos tmin = node_interp.xmin - branch_interp.xmax tmax = node_interp.xmax - branch_interp.xmin # make initial node grid consisting of linearly spaced points around # the center and quadratically spaced points at either end n = n_grid_points/3 center_width = 3*joint_fwhm grid_center = new_peak_pos + np.linspace(-1, 1, n)*center_width # add the right and left grid if it is needed right_range = (tmax - grid_center[-1]) if right_range>4*center_width: grid_right = grid_center[-1] + right_range*(np.linspace(0, 1, n)**2.0) elif right_range>0: # use linear grid the right_range is comparable to center_width grid_right = grid_center[-1] + right_range*np.linspace(0,1, int(min(n,1+0.5*n*right_range/center_width))) else: grid_right =[] left_range = grid_center[0]-tmin if left_range>4*center_width: grid_left = tmin + left_range*(np.linspace(0, 1, n)**2.0) elif left_range>0: grid_left = tmin + left_range*np.linspace(0,1, int(min(n,1+0.5*n*left_range/center_width))) else: grid_left =[] if tmin>-1: grid_zero_left = tmin + (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_left = [tmin] if tmax<1: grid_zero_right = tmax - (tmax-tmin)*np.linspace(0,0.01,11)**2 else: grid_zero_right = [tmax] # make grid and calculate convolution t_grid_0 = np.unique(np.concatenate([grid_zero_left, grid_left[:-1], grid_center, grid_right[1:], grid_zero_right])) t_grid_0 = t_grid_0[(t_grid_0 > tmin-ttconf.TINY_NUMBER) & (t_grid_0 < tmax+ttconf.TINY_NUMBER)] # res0 - the values of the convolution (integral or max) # t_0 - the value, at which the res0 achieves maximum # (when determining the maximum of the integrand, otherwise meaningless) res_0, t_0 = np.array([conv_in_point(t_val) for t_val in t_grid_0]).T # refine grid as necessary and add new points # calculate interpolation error at all internal points [2:-2] bc end points are sometime off scale interp_error = np.abs(res_0[3:-1]+res_0[1:-3]-2*res_0[2:-2]) # determine the number of extra points needed, criterion depends on distance from peak dy dy = (res_0[2:-2]-res_0.min()) dx = np.diff(t_grid_0) refine_factor = np.minimum(np.minimum(np.array(np.floor(np.sqrt(interp_error/(rel_tol*(1+(dy/yc)**4)))), dtype=int), np.array(100*(dx[1:-2]+dx[2:-1])/min_fwhm, dtype=int)), 10) insert_point_idx = np.zeros(interp_error.shape[0]+1, dtype=int) insert_point_idx[1:] = refine_factor insert_point_idx[:-1] += refine_factor # add additional points if there are any to add if np.sum(insert_point_idx): add_x = np.concatenate([np.linspace(t1,t2,n+2)[1:-1] for t1,t2,n in zip(t_grid_0[1:-2], t_grid_0[2:-1], insert_point_idx) if n>0]) # calculate convolution at these points add_y, add_t = np.array([conv_in_point(t_val) for t_val in add_x]).T t_grid_0 = np.concatenate((t_grid_0, add_x)) res_0 = np.concatenate ((res_0, add_y)) t_0 = np.concatenate ((t_0, add_t)) # instantiate the new interpolation object and return res_y = cls(t_grid_0, res_0, is_log=True, kind='linear') # the interpolation object, which is used to store the value of the # grid, which maximizes the convolution (for 'max' option), # or flat -1 distribution (for 'integral' option) # this grid is the optimal branch length res_t = Distribution(t_grid_0, t_0, is_log=True, min_width=node_interp.min_width, kind='linear') return res_y, res_t
[ "calculate", "H", "(", "t", ")", "=", "\\", "int_tau", "f", "(", "t", "-", "tau", ")", "g", "(", "tau", ")", "if", "inverse_time", "=", "True", "H", "(", "t", ")", "=", "\\", "int_tau", "f", "(", "t", "+", "tau", ")", "g", "(", "tau", ")", "if", "inverse_time", "=", "False" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L165-L281
[ "def", "convolve", "(", "cls", ",", "node_interp", ",", "branch_interp", ",", "max_or_integral", "=", "'integral'", ",", "n_grid_points", "=", "ttconf", ".", "NODE_GRID_SIZE", ",", "n_integral", "=", "ttconf", ".", "N_INTEGRAL", ",", "inverse_time", "=", "True", ",", "rel_tol", "=", "0.05", ",", "yc", "=", "10", ")", ":", "if", "max_or_integral", "not", "in", "[", "'max'", ",", "'integral'", "]", ":", "raise", "Exception", "(", "\"Max_or_integral expected to be 'max' or 'integral', got \"", "+", "str", "(", "max_or_integral", ")", "+", "\" instead.\"", ")", "def", "conv_in_point", "(", "time_point", ")", ":", "if", "max_or_integral", "==", "'integral'", ":", "# compute integral of the convolution", "return", "_evaluate_convolution", "(", "time_point", ",", "node_interp", ",", "branch_interp", ",", "n_integral", "=", "n_integral", ",", "return_log", "=", "True", ",", "inverse_time", "=", "inverse_time", ")", "else", ":", "# compute max of the convolution", "return", "_max_of_integrand", "(", "time_point", ",", "node_interp", ",", "branch_interp", ",", "return_log", "=", "True", ",", "inverse_time", "=", "inverse_time", ")", "# estimate peak and width", "joint_fwhm", "=", "(", "node_interp", ".", "fwhm", "+", "branch_interp", ".", "fwhm", ")", "min_fwhm", "=", "min", "(", "node_interp", ".", "fwhm", ",", "branch_interp", ".", "fwhm", ")", "# determine support of the resulting convolution", "# in order to be positive, the flipped support of f, shifted by t and g need to overlap", "if", "inverse_time", ":", "new_peak_pos", "=", "node_interp", ".", "peak_pos", "+", "branch_interp", ".", "peak_pos", "tmin", "=", "node_interp", ".", "xmin", "+", "branch_interp", ".", "xmin", "tmax", "=", "node_interp", ".", "xmax", "+", "branch_interp", ".", "xmax", "else", ":", "new_peak_pos", "=", "node_interp", ".", "peak_pos", "-", "branch_interp", ".", "peak_pos", "tmin", "=", "node_interp", ".", "xmin", "-", "branch_interp", ".", "xmax", "tmax", "=", "node_interp", ".", "xmax", "-", "branch_interp", ".", "xmin", "# make initial node grid consisting of linearly spaced points around", "# the center and quadratically spaced points at either end", "n", "=", "n_grid_points", "/", "3", "center_width", "=", "3", "*", "joint_fwhm", "grid_center", "=", "new_peak_pos", "+", "np", ".", "linspace", "(", "-", "1", ",", "1", ",", "n", ")", "*", "center_width", "# add the right and left grid if it is needed", "right_range", "=", "(", "tmax", "-", "grid_center", "[", "-", "1", "]", ")", "if", "right_range", ">", "4", "*", "center_width", ":", "grid_right", "=", "grid_center", "[", "-", "1", "]", "+", "right_range", "*", "(", "np", ".", "linspace", "(", "0", ",", "1", ",", "n", ")", "**", "2.0", ")", "elif", "right_range", ">", "0", ":", "# use linear grid the right_range is comparable to center_width", "grid_right", "=", "grid_center", "[", "-", "1", "]", "+", "right_range", "*", "np", ".", "linspace", "(", "0", ",", "1", ",", "int", "(", "min", "(", "n", ",", "1", "+", "0.5", "*", "n", "*", "right_range", "/", "center_width", ")", ")", ")", "else", ":", "grid_right", "=", "[", "]", "left_range", "=", "grid_center", "[", "0", "]", "-", "tmin", "if", "left_range", ">", "4", "*", "center_width", ":", "grid_left", "=", "tmin", "+", "left_range", "*", "(", "np", ".", "linspace", "(", "0", ",", "1", ",", "n", ")", "**", "2.0", ")", "elif", "left_range", ">", "0", ":", "grid_left", "=", "tmin", "+", "left_range", "*", "np", ".", "linspace", "(", "0", ",", "1", ",", "int", "(", "min", "(", "n", ",", "1", "+", "0.5", "*", "n", "*", "left_range", "/", "center_width", ")", ")", ")", "else", ":", "grid_left", "=", "[", "]", "if", "tmin", ">", "-", "1", ":", "grid_zero_left", "=", "tmin", "+", "(", "tmax", "-", "tmin", ")", "*", "np", ".", "linspace", "(", "0", ",", "0.01", ",", "11", ")", "**", "2", "else", ":", "grid_zero_left", "=", "[", "tmin", "]", "if", "tmax", "<", "1", ":", "grid_zero_right", "=", "tmax", "-", "(", "tmax", "-", "tmin", ")", "*", "np", ".", "linspace", "(", "0", ",", "0.01", ",", "11", ")", "**", "2", "else", ":", "grid_zero_right", "=", "[", "tmax", "]", "# make grid and calculate convolution", "t_grid_0", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "[", "grid_zero_left", ",", "grid_left", "[", ":", "-", "1", "]", ",", "grid_center", ",", "grid_right", "[", "1", ":", "]", ",", "grid_zero_right", "]", ")", ")", "t_grid_0", "=", "t_grid_0", "[", "(", "t_grid_0", ">", "tmin", "-", "ttconf", ".", "TINY_NUMBER", ")", "&", "(", "t_grid_0", "<", "tmax", "+", "ttconf", ".", "TINY_NUMBER", ")", "]", "# res0 - the values of the convolution (integral or max)", "# t_0 - the value, at which the res0 achieves maximum", "# (when determining the maximum of the integrand, otherwise meaningless)", "res_0", ",", "t_0", "=", "np", ".", "array", "(", "[", "conv_in_point", "(", "t_val", ")", "for", "t_val", "in", "t_grid_0", "]", ")", ".", "T", "# refine grid as necessary and add new points", "# calculate interpolation error at all internal points [2:-2] bc end points are sometime off scale", "interp_error", "=", "np", ".", "abs", "(", "res_0", "[", "3", ":", "-", "1", "]", "+", "res_0", "[", "1", ":", "-", "3", "]", "-", "2", "*", "res_0", "[", "2", ":", "-", "2", "]", ")", "# determine the number of extra points needed, criterion depends on distance from peak dy", "dy", "=", "(", "res_0", "[", "2", ":", "-", "2", "]", "-", "res_0", ".", "min", "(", ")", ")", "dx", "=", "np", ".", "diff", "(", "t_grid_0", ")", "refine_factor", "=", "np", ".", "minimum", "(", "np", ".", "minimum", "(", "np", ".", "array", "(", "np", ".", "floor", "(", "np", ".", "sqrt", "(", "interp_error", "/", "(", "rel_tol", "*", "(", "1", "+", "(", "dy", "/", "yc", ")", "**", "4", ")", ")", ")", ")", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "100", "*", "(", "dx", "[", "1", ":", "-", "2", "]", "+", "dx", "[", "2", ":", "-", "1", "]", ")", "/", "min_fwhm", ",", "dtype", "=", "int", ")", ")", ",", "10", ")", "insert_point_idx", "=", "np", ".", "zeros", "(", "interp_error", ".", "shape", "[", "0", "]", "+", "1", ",", "dtype", "=", "int", ")", "insert_point_idx", "[", "1", ":", "]", "=", "refine_factor", "insert_point_idx", "[", ":", "-", "1", "]", "+=", "refine_factor", "# add additional points if there are any to add", "if", "np", ".", "sum", "(", "insert_point_idx", ")", ":", "add_x", "=", "np", ".", "concatenate", "(", "[", "np", ".", "linspace", "(", "t1", ",", "t2", ",", "n", "+", "2", ")", "[", "1", ":", "-", "1", "]", "for", "t1", ",", "t2", ",", "n", "in", "zip", "(", "t_grid_0", "[", "1", ":", "-", "2", "]", ",", "t_grid_0", "[", "2", ":", "-", "1", "]", ",", "insert_point_idx", ")", "if", "n", ">", "0", "]", ")", "# calculate convolution at these points", "add_y", ",", "add_t", "=", "np", ".", "array", "(", "[", "conv_in_point", "(", "t_val", ")", "for", "t_val", "in", "add_x", "]", ")", ".", "T", "t_grid_0", "=", "np", ".", "concatenate", "(", "(", "t_grid_0", ",", "add_x", ")", ")", "res_0", "=", "np", ".", "concatenate", "(", "(", "res_0", ",", "add_y", ")", ")", "t_0", "=", "np", ".", "concatenate", "(", "(", "t_0", ",", "add_t", ")", ")", "# instantiate the new interpolation object and return", "res_y", "=", "cls", "(", "t_grid_0", ",", "res_0", ",", "is_log", "=", "True", ",", "kind", "=", "'linear'", ")", "# the interpolation object, which is used to store the value of the", "# grid, which maximizes the convolution (for 'max' option),", "# or flat -1 distribution (for 'integral' option)", "# this grid is the optimal branch length", "res_t", "=", "Distribution", "(", "t_grid_0", ",", "t_0", ",", "is_log", "=", "True", ",", "min_width", "=", "node_interp", ".", "min_width", ",", "kind", "=", "'linear'", ")", "return", "res_y", ",", "res_t" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
min_interp
Find the global minimum of a function represented as an interpolation object.
treetime/utils.py
def min_interp(interp_object): """ Find the global minimum of a function represented as an interpolation object. """ try: return interp_object.x[interp_object(interp_object.x).argmin()] except Exception as e: s = "Cannot find minimum of the interpolation object" + str(interp_object.x) + \ "Minimal x: " + str(interp_object.x.min()) + "Maximal x: " + str(interp_object.x.max()) raise e
def min_interp(interp_object): """ Find the global minimum of a function represented as an interpolation object. """ try: return interp_object.x[interp_object(interp_object.x).argmin()] except Exception as e: s = "Cannot find minimum of the interpolation object" + str(interp_object.x) + \ "Minimal x: " + str(interp_object.x.min()) + "Maximal x: " + str(interp_object.x.max()) raise e
[ "Find", "the", "global", "minimum", "of", "a", "function", "represented", "as", "an", "interpolation", "object", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L117-L126
[ "def", "min_interp", "(", "interp_object", ")", ":", "try", ":", "return", "interp_object", ".", "x", "[", "interp_object", "(", "interp_object", ".", "x", ")", ".", "argmin", "(", ")", "]", "except", "Exception", "as", "e", ":", "s", "=", "\"Cannot find minimum of the interpolation object\"", "+", "str", "(", "interp_object", ".", "x", ")", "+", "\"Minimal x: \"", "+", "str", "(", "interp_object", ".", "x", ".", "min", "(", ")", ")", "+", "\"Maximal x: \"", "+", "str", "(", "interp_object", ".", "x", ".", "max", "(", ")", ")", "raise", "e" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
median_interp
Find the median of the function represented as an interpolation object.
treetime/utils.py
def median_interp(interp_object): """ Find the median of the function represented as an interpolation object. """ new_grid = np.sort(np.concatenate([interp_object.x[:-1] + 0.1*ii*np.diff(interp_object.x) for ii in range(10)]).flatten()) tmp_prop = np.exp(-(interp_object(new_grid)-interp_object.y.min())) tmp_cumsum = np.cumsum(0.5*(tmp_prop[1:]+tmp_prop[:-1])*np.diff(new_grid)) median_index = min(len(tmp_cumsum)-3, max(2,np.searchsorted(tmp_cumsum, tmp_cumsum[-1]*0.5)+1)) return new_grid[median_index]
def median_interp(interp_object): """ Find the median of the function represented as an interpolation object. """ new_grid = np.sort(np.concatenate([interp_object.x[:-1] + 0.1*ii*np.diff(interp_object.x) for ii in range(10)]).flatten()) tmp_prop = np.exp(-(interp_object(new_grid)-interp_object.y.min())) tmp_cumsum = np.cumsum(0.5*(tmp_prop[1:]+tmp_prop[:-1])*np.diff(new_grid)) median_index = min(len(tmp_cumsum)-3, max(2,np.searchsorted(tmp_cumsum, tmp_cumsum[-1]*0.5)+1)) return new_grid[median_index]
[ "Find", "the", "median", "of", "the", "function", "represented", "as", "an", "interpolation", "object", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L129-L139
[ "def", "median_interp", "(", "interp_object", ")", ":", "new_grid", "=", "np", ".", "sort", "(", "np", ".", "concatenate", "(", "[", "interp_object", ".", "x", "[", ":", "-", "1", "]", "+", "0.1", "*", "ii", "*", "np", ".", "diff", "(", "interp_object", ".", "x", ")", "for", "ii", "in", "range", "(", "10", ")", "]", ")", ".", "flatten", "(", ")", ")", "tmp_prop", "=", "np", ".", "exp", "(", "-", "(", "interp_object", "(", "new_grid", ")", "-", "interp_object", ".", "y", ".", "min", "(", ")", ")", ")", "tmp_cumsum", "=", "np", ".", "cumsum", "(", "0.5", "*", "(", "tmp_prop", "[", "1", ":", "]", "+", "tmp_prop", "[", ":", "-", "1", "]", ")", "*", "np", ".", "diff", "(", "new_grid", ")", ")", "median_index", "=", "min", "(", "len", "(", "tmp_cumsum", ")", "-", "3", ",", "max", "(", "2", ",", "np", ".", "searchsorted", "(", "tmp_cumsum", ",", "tmp_cumsum", "[", "-", "1", "]", "*", "0.5", ")", "+", "1", ")", ")", "return", "new_grid", "[", "median_index", "]" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
numeric_date
Convert datetime object to the numeric date. The numeric date format is YYYY.F, where F is the fraction of the year passed Parameters ---------- dt: datetime.datetime, None date of to be converted. if None, assume today
treetime/utils.py
def numeric_date(dt=None): """ Convert datetime object to the numeric date. The numeric date format is YYYY.F, where F is the fraction of the year passed Parameters ---------- dt: datetime.datetime, None date of to be converted. if None, assume today """ if dt is None: dt = datetime.datetime.now() try: res = dt.year + dt.timetuple().tm_yday / 365.25 except: res = None return res
def numeric_date(dt=None): """ Convert datetime object to the numeric date. The numeric date format is YYYY.F, where F is the fraction of the year passed Parameters ---------- dt: datetime.datetime, None date of to be converted. if None, assume today """ if dt is None: dt = datetime.datetime.now() try: res = dt.year + dt.timetuple().tm_yday / 365.25 except: res = None return res
[ "Convert", "datetime", "object", "to", "the", "numeric", "date", ".", "The", "numeric", "date", "format", "is", "YYYY", ".", "F", "where", "F", "is", "the", "fraction", "of", "the", "year", "passed" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L142-L161
[ "def", "numeric_date", "(", "dt", "=", "None", ")", ":", "if", "dt", "is", "None", ":", "dt", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "try", ":", "res", "=", "dt", ".", "year", "+", "dt", ".", "timetuple", "(", ")", ".", "tm_yday", "/", "365.25", "except", ":", "res", "=", "None", "return", "res" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
parse_dates
parse dates from the arguments and return a dictionary mapping taxon names to numerical dates. Parameters ---------- date_file : str name of file to parse meta data from Returns ------- dict dictionary linking fields in a column interpreted as taxon name (first column that contains 'name', 'strain', 'accession') to a numerical date inferred from a column that contains 'date'. It will first try to parse the column as float, than via pandas.to_datetime and finally as ambiguous date such as 2018-05-XX
treetime/utils.py
def parse_dates(date_file): """ parse dates from the arguments and return a dictionary mapping taxon names to numerical dates. Parameters ---------- date_file : str name of file to parse meta data from Returns ------- dict dictionary linking fields in a column interpreted as taxon name (first column that contains 'name', 'strain', 'accession') to a numerical date inferred from a column that contains 'date'. It will first try to parse the column as float, than via pandas.to_datetime and finally as ambiguous date such as 2018-05-XX """ print("\nAttempting to parse dates...") dates = {} if not os.path.isfile(date_file): print("\n\tERROR: file %s does not exist, exiting..."%date_file) return dates # separator for the csv/tsv file. If csv, we'll strip extra whitespace around ',' full_sep = '\t' if date_file.endswith('.tsv') else r'\s*,\s*' try: # read the metadata file into pandas dataframe. df = pd.read_csv(date_file, sep=full_sep, engine='python') # check the metadata has strain names in the first column # look for the column containing sampling dates # We assume that the dates might be given either in human-readable format # (e.g. ISO dates), or be already converted to the numeric format. potential_date_columns = [] potential_numdate_columns = [] potential_index_columns = [] # Scan the dataframe columns and find ones which likely to store the # dates for ci,col in enumerate(df.columns): d = df.iloc[0,ci] # strip quotation marks if type(d)==str and d[0] in ['"', "'"] and d[-1] in ['"', "'"]: for i,tmp_d in enumerate(df.iloc[:,ci]): df.iloc[i,ci] = tmp_d.strip(d[0]) if 'date' in col.lower(): potential_date_columns.append((ci, col)) if any([x==col.lower() for x in ['name', 'strain', 'accession']]): potential_index_columns.append((ci, col)) dates = {} # if a potential numeric date column was found, use it # (use the first, if there are more than one) if not len(potential_index_columns): print("ERROR: Cannot read metadata: need at least one column that contains the taxon labels." " Looking for the first column that contains 'name', 'strain', or 'accession' in the header.", file=sys.stderr) return dates else: # use the first column that is either 'name', 'strain', 'accession' index_col = sorted(potential_index_columns)[0][1] print("\tUsing column '%s' as name. This needs match the taxon names in the tree!!"%index_col) if len(potential_date_columns)>=1: #try to parse the csv file with dates in the idx column: idx = potential_date_columns[0][0] col_name = potential_date_columns[0][1] print("\tUsing column '%s' as date."%col_name) for ri, row in df.iterrows(): date_str = row.loc[col_name] k = row.loc[index_col] # try parsing as a float first try: dates[k] = float(date_str) continue except ValueError: # try whether the date string can be parsed as [2002.2:2004.3] # to indicate general ambiguous ranges if date_str[0]=='[' and date_str[-1]==']' and len(date_str[1:-1].split(':'))==2: try: dates[k] = [float(x) for x in date_str[1:-1].split(':')] continue except ValueError: pass # try date format parsing 2017-08-12 try: tmp_date = pd.to_datetime(date_str) dates[k] = numeric_date(tmp_date) except ValueError: # try ambiguous date format parsing 2017-XX-XX lower, upper = ambiguous_date_to_date_range(date_str, '%Y-%m-%d') if lower is not None: dates[k] = [numeric_date(x) for x in [lower, upper]] else: print("ERROR: Metadata file has no column which looks like a sampling date!", file=sys.stderr) if all(v is None for v in dates.values()): print("ERROR: Cannot parse dates correctly! Check date format.", file=sys.stderr) return {} return dates except: print("ERROR: Cannot read the metadata file!", file=sys.stderr) return {}
def parse_dates(date_file): """ parse dates from the arguments and return a dictionary mapping taxon names to numerical dates. Parameters ---------- date_file : str name of file to parse meta data from Returns ------- dict dictionary linking fields in a column interpreted as taxon name (first column that contains 'name', 'strain', 'accession') to a numerical date inferred from a column that contains 'date'. It will first try to parse the column as float, than via pandas.to_datetime and finally as ambiguous date such as 2018-05-XX """ print("\nAttempting to parse dates...") dates = {} if not os.path.isfile(date_file): print("\n\tERROR: file %s does not exist, exiting..."%date_file) return dates # separator for the csv/tsv file. If csv, we'll strip extra whitespace around ',' full_sep = '\t' if date_file.endswith('.tsv') else r'\s*,\s*' try: # read the metadata file into pandas dataframe. df = pd.read_csv(date_file, sep=full_sep, engine='python') # check the metadata has strain names in the first column # look for the column containing sampling dates # We assume that the dates might be given either in human-readable format # (e.g. ISO dates), or be already converted to the numeric format. potential_date_columns = [] potential_numdate_columns = [] potential_index_columns = [] # Scan the dataframe columns and find ones which likely to store the # dates for ci,col in enumerate(df.columns): d = df.iloc[0,ci] # strip quotation marks if type(d)==str and d[0] in ['"', "'"] and d[-1] in ['"', "'"]: for i,tmp_d in enumerate(df.iloc[:,ci]): df.iloc[i,ci] = tmp_d.strip(d[0]) if 'date' in col.lower(): potential_date_columns.append((ci, col)) if any([x==col.lower() for x in ['name', 'strain', 'accession']]): potential_index_columns.append((ci, col)) dates = {} # if a potential numeric date column was found, use it # (use the first, if there are more than one) if not len(potential_index_columns): print("ERROR: Cannot read metadata: need at least one column that contains the taxon labels." " Looking for the first column that contains 'name', 'strain', or 'accession' in the header.", file=sys.stderr) return dates else: # use the first column that is either 'name', 'strain', 'accession' index_col = sorted(potential_index_columns)[0][1] print("\tUsing column '%s' as name. This needs match the taxon names in the tree!!"%index_col) if len(potential_date_columns)>=1: #try to parse the csv file with dates in the idx column: idx = potential_date_columns[0][0] col_name = potential_date_columns[0][1] print("\tUsing column '%s' as date."%col_name) for ri, row in df.iterrows(): date_str = row.loc[col_name] k = row.loc[index_col] # try parsing as a float first try: dates[k] = float(date_str) continue except ValueError: # try whether the date string can be parsed as [2002.2:2004.3] # to indicate general ambiguous ranges if date_str[0]=='[' and date_str[-1]==']' and len(date_str[1:-1].split(':'))==2: try: dates[k] = [float(x) for x in date_str[1:-1].split(':')] continue except ValueError: pass # try date format parsing 2017-08-12 try: tmp_date = pd.to_datetime(date_str) dates[k] = numeric_date(tmp_date) except ValueError: # try ambiguous date format parsing 2017-XX-XX lower, upper = ambiguous_date_to_date_range(date_str, '%Y-%m-%d') if lower is not None: dates[k] = [numeric_date(x) for x in [lower, upper]] else: print("ERROR: Metadata file has no column which looks like a sampling date!", file=sys.stderr) if all(v is None for v in dates.values()): print("ERROR: Cannot parse dates correctly! Check date format.", file=sys.stderr) return {} return dates except: print("ERROR: Cannot read the metadata file!", file=sys.stderr) return {}
[ "parse", "dates", "from", "the", "arguments", "and", "return", "a", "dictionary", "mapping", "taxon", "names", "to", "numerical", "dates", "." ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L165-L266
[ "def", "parse_dates", "(", "date_file", ")", ":", "print", "(", "\"\\nAttempting to parse dates...\"", ")", "dates", "=", "{", "}", "if", "not", "os", ".", "path", ".", "isfile", "(", "date_file", ")", ":", "print", "(", "\"\\n\\tERROR: file %s does not exist, exiting...\"", "%", "date_file", ")", "return", "dates", "# separator for the csv/tsv file. If csv, we'll strip extra whitespace around ','", "full_sep", "=", "'\\t'", "if", "date_file", ".", "endswith", "(", "'.tsv'", ")", "else", "r'\\s*,\\s*'", "try", ":", "# read the metadata file into pandas dataframe.", "df", "=", "pd", ".", "read_csv", "(", "date_file", ",", "sep", "=", "full_sep", ",", "engine", "=", "'python'", ")", "# check the metadata has strain names in the first column", "# look for the column containing sampling dates", "# We assume that the dates might be given either in human-readable format", "# (e.g. ISO dates), or be already converted to the numeric format.", "potential_date_columns", "=", "[", "]", "potential_numdate_columns", "=", "[", "]", "potential_index_columns", "=", "[", "]", "# Scan the dataframe columns and find ones which likely to store the", "# dates", "for", "ci", ",", "col", "in", "enumerate", "(", "df", ".", "columns", ")", ":", "d", "=", "df", ".", "iloc", "[", "0", ",", "ci", "]", "# strip quotation marks", "if", "type", "(", "d", ")", "==", "str", "and", "d", "[", "0", "]", "in", "[", "'\"'", ",", "\"'\"", "]", "and", "d", "[", "-", "1", "]", "in", "[", "'\"'", ",", "\"'\"", "]", ":", "for", "i", ",", "tmp_d", "in", "enumerate", "(", "df", ".", "iloc", "[", ":", ",", "ci", "]", ")", ":", "df", ".", "iloc", "[", "i", ",", "ci", "]", "=", "tmp_d", ".", "strip", "(", "d", "[", "0", "]", ")", "if", "'date'", "in", "col", ".", "lower", "(", ")", ":", "potential_date_columns", ".", "append", "(", "(", "ci", ",", "col", ")", ")", "if", "any", "(", "[", "x", "==", "col", ".", "lower", "(", ")", "for", "x", "in", "[", "'name'", ",", "'strain'", ",", "'accession'", "]", "]", ")", ":", "potential_index_columns", ".", "append", "(", "(", "ci", ",", "col", ")", ")", "dates", "=", "{", "}", "# if a potential numeric date column was found, use it", "# (use the first, if there are more than one)", "if", "not", "len", "(", "potential_index_columns", ")", ":", "print", "(", "\"ERROR: Cannot read metadata: need at least one column that contains the taxon labels.\"", "\" Looking for the first column that contains 'name', 'strain', or 'accession' in the header.\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "dates", "else", ":", "# use the first column that is either 'name', 'strain', 'accession'", "index_col", "=", "sorted", "(", "potential_index_columns", ")", "[", "0", "]", "[", "1", "]", "print", "(", "\"\\tUsing column '%s' as name. This needs match the taxon names in the tree!!\"", "%", "index_col", ")", "if", "len", "(", "potential_date_columns", ")", ">=", "1", ":", "#try to parse the csv file with dates in the idx column:", "idx", "=", "potential_date_columns", "[", "0", "]", "[", "0", "]", "col_name", "=", "potential_date_columns", "[", "0", "]", "[", "1", "]", "print", "(", "\"\\tUsing column '%s' as date.\"", "%", "col_name", ")", "for", "ri", ",", "row", "in", "df", ".", "iterrows", "(", ")", ":", "date_str", "=", "row", ".", "loc", "[", "col_name", "]", "k", "=", "row", ".", "loc", "[", "index_col", "]", "# try parsing as a float first", "try", ":", "dates", "[", "k", "]", "=", "float", "(", "date_str", ")", "continue", "except", "ValueError", ":", "# try whether the date string can be parsed as [2002.2:2004.3]", "# to indicate general ambiguous ranges", "if", "date_str", "[", "0", "]", "==", "'['", "and", "date_str", "[", "-", "1", "]", "==", "']'", "and", "len", "(", "date_str", "[", "1", ":", "-", "1", "]", ".", "split", "(", "':'", ")", ")", "==", "2", ":", "try", ":", "dates", "[", "k", "]", "=", "[", "float", "(", "x", ")", "for", "x", "in", "date_str", "[", "1", ":", "-", "1", "]", ".", "split", "(", "':'", ")", "]", "continue", "except", "ValueError", ":", "pass", "# try date format parsing 2017-08-12", "try", ":", "tmp_date", "=", "pd", ".", "to_datetime", "(", "date_str", ")", "dates", "[", "k", "]", "=", "numeric_date", "(", "tmp_date", ")", "except", "ValueError", ":", "# try ambiguous date format parsing 2017-XX-XX", "lower", ",", "upper", "=", "ambiguous_date_to_date_range", "(", "date_str", ",", "'%Y-%m-%d'", ")", "if", "lower", "is", "not", "None", ":", "dates", "[", "k", "]", "=", "[", "numeric_date", "(", "x", ")", "for", "x", "in", "[", "lower", ",", "upper", "]", "]", "else", ":", "print", "(", "\"ERROR: Metadata file has no column which looks like a sampling date!\"", ",", "file", "=", "sys", ".", "stderr", ")", "if", "all", "(", "v", "is", "None", "for", "v", "in", "dates", ".", "values", "(", ")", ")", ":", "print", "(", "\"ERROR: Cannot parse dates correctly! Check date format.\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "{", "}", "return", "dates", "except", ":", "print", "(", "\"ERROR: Cannot read the metadata file!\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "{", "}" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
ambiguous_date_to_date_range
parse an abiguous date such as 2017-XX-XX to [2017,2017.999] Parameters ---------- mydate : str date string to be parsed fmt : str format descriptor. default is %Y-%m-%d min_max_year : None, optional if date is completely unknown, use this as bounds. Returns ------- tuple upper and lower bounds on the date. return (None, None) if errors
treetime/utils.py
def ambiguous_date_to_date_range(mydate, fmt="%Y-%m-%d", min_max_year=None): """parse an abiguous date such as 2017-XX-XX to [2017,2017.999] Parameters ---------- mydate : str date string to be parsed fmt : str format descriptor. default is %Y-%m-%d min_max_year : None, optional if date is completely unknown, use this as bounds. Returns ------- tuple upper and lower bounds on the date. return (None, None) if errors """ from datetime import datetime sep = fmt.split('%')[1][-1] min_date, max_date = {}, {} today = datetime.today().date() for val, field in zip(mydate.split(sep), fmt.split(sep+'%')): f = 'year' if 'y' in field.lower() else ('day' if 'd' in field.lower() else 'month') if 'XX' in val: if f=='year': if min_max_year: min_date[f]=min_max_year[0] if len(min_max_year)>1: max_date[f]=min_max_year[1] elif len(min_max_year)==1: max_date[f]=4000 #will be replaced by 'today' below. else: return None, None elif f=='month': min_date[f]=1 max_date[f]=12 elif f=='day': min_date[f]=1 max_date[f]=31 else: try: min_date[f]=int(val) max_date[f]=int(val) except ValueError: print("Can't parse date string: "+mydate, file=sys.stderr) return None, None max_date['day'] = min(max_date['day'], 31 if max_date['month'] in [1,3,5,7,8,10,12] else 28 if max_date['month']==2 else 30) lower_bound = datetime(year=min_date['year'], month=min_date['month'], day=min_date['day']).date() upper_bound = datetime(year=max_date['year'], month=max_date['month'], day=max_date['day']).date() return (lower_bound, upper_bound if upper_bound<today else today)
def ambiguous_date_to_date_range(mydate, fmt="%Y-%m-%d", min_max_year=None): """parse an abiguous date such as 2017-XX-XX to [2017,2017.999] Parameters ---------- mydate : str date string to be parsed fmt : str format descriptor. default is %Y-%m-%d min_max_year : None, optional if date is completely unknown, use this as bounds. Returns ------- tuple upper and lower bounds on the date. return (None, None) if errors """ from datetime import datetime sep = fmt.split('%')[1][-1] min_date, max_date = {}, {} today = datetime.today().date() for val, field in zip(mydate.split(sep), fmt.split(sep+'%')): f = 'year' if 'y' in field.lower() else ('day' if 'd' in field.lower() else 'month') if 'XX' in val: if f=='year': if min_max_year: min_date[f]=min_max_year[0] if len(min_max_year)>1: max_date[f]=min_max_year[1] elif len(min_max_year)==1: max_date[f]=4000 #will be replaced by 'today' below. else: return None, None elif f=='month': min_date[f]=1 max_date[f]=12 elif f=='day': min_date[f]=1 max_date[f]=31 else: try: min_date[f]=int(val) max_date[f]=int(val) except ValueError: print("Can't parse date string: "+mydate, file=sys.stderr) return None, None max_date['day'] = min(max_date['day'], 31 if max_date['month'] in [1,3,5,7,8,10,12] else 28 if max_date['month']==2 else 30) lower_bound = datetime(year=min_date['year'], month=min_date['month'], day=min_date['day']).date() upper_bound = datetime(year=max_date['year'], month=max_date['month'], day=max_date['day']).date() return (lower_bound, upper_bound if upper_bound<today else today)
[ "parse", "an", "abiguous", "date", "such", "as", "2017", "-", "XX", "-", "XX", "to", "[", "2017", "2017", ".", "999", "]" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L269-L320
[ "def", "ambiguous_date_to_date_range", "(", "mydate", ",", "fmt", "=", "\"%Y-%m-%d\"", ",", "min_max_year", "=", "None", ")", ":", "from", "datetime", "import", "datetime", "sep", "=", "fmt", ".", "split", "(", "'%'", ")", "[", "1", "]", "[", "-", "1", "]", "min_date", ",", "max_date", "=", "{", "}", ",", "{", "}", "today", "=", "datetime", ".", "today", "(", ")", ".", "date", "(", ")", "for", "val", ",", "field", "in", "zip", "(", "mydate", ".", "split", "(", "sep", ")", ",", "fmt", ".", "split", "(", "sep", "+", "'%'", ")", ")", ":", "f", "=", "'year'", "if", "'y'", "in", "field", ".", "lower", "(", ")", "else", "(", "'day'", "if", "'d'", "in", "field", ".", "lower", "(", ")", "else", "'month'", ")", "if", "'XX'", "in", "val", ":", "if", "f", "==", "'year'", ":", "if", "min_max_year", ":", "min_date", "[", "f", "]", "=", "min_max_year", "[", "0", "]", "if", "len", "(", "min_max_year", ")", ">", "1", ":", "max_date", "[", "f", "]", "=", "min_max_year", "[", "1", "]", "elif", "len", "(", "min_max_year", ")", "==", "1", ":", "max_date", "[", "f", "]", "=", "4000", "#will be replaced by 'today' below.", "else", ":", "return", "None", ",", "None", "elif", "f", "==", "'month'", ":", "min_date", "[", "f", "]", "=", "1", "max_date", "[", "f", "]", "=", "12", "elif", "f", "==", "'day'", ":", "min_date", "[", "f", "]", "=", "1", "max_date", "[", "f", "]", "=", "31", "else", ":", "try", ":", "min_date", "[", "f", "]", "=", "int", "(", "val", ")", "max_date", "[", "f", "]", "=", "int", "(", "val", ")", "except", "ValueError", ":", "print", "(", "\"Can't parse date string: \"", "+", "mydate", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", ",", "None", "max_date", "[", "'day'", "]", "=", "min", "(", "max_date", "[", "'day'", "]", ",", "31", "if", "max_date", "[", "'month'", "]", "in", "[", "1", ",", "3", ",", "5", ",", "7", ",", "8", ",", "10", ",", "12", "]", "else", "28", "if", "max_date", "[", "'month'", "]", "==", "2", "else", "30", ")", "lower_bound", "=", "datetime", "(", "year", "=", "min_date", "[", "'year'", "]", ",", "month", "=", "min_date", "[", "'month'", "]", ",", "day", "=", "min_date", "[", "'day'", "]", ")", ".", "date", "(", ")", "upper_bound", "=", "datetime", "(", "year", "=", "max_date", "[", "'year'", "]", ",", "month", "=", "max_date", "[", "'month'", "]", ",", "day", "=", "max_date", "[", "'day'", "]", ")", ".", "date", "(", ")", "return", "(", "lower_bound", ",", "upper_bound", "if", "upper_bound", "<", "today", "else", "today", ")" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
DateConversion.from_regression
Create the conversion object automatically from the tree Parameters ---------- clock_model : dict dictionary as returned from TreeRegression with fields intercept and slope
treetime/utils.py
def from_regression(cls, clock_model): """ Create the conversion object automatically from the tree Parameters ---------- clock_model : dict dictionary as returned from TreeRegression with fields intercept and slope """ dc = cls() dc.clock_rate = clock_model['slope'] dc.intercept = clock_model['intercept'] dc.chisq = clock_model['chisq'] if 'chisq' in clock_model else None dc.valid_confidence = clock_model['valid_confidence'] if 'valid_confidence' in clock_model else False if 'cov' in clock_model and dc.valid_confidence: dc.cov = clock_model['cov'] dc.r_val = clock_model['r_val'] return dc
def from_regression(cls, clock_model): """ Create the conversion object automatically from the tree Parameters ---------- clock_model : dict dictionary as returned from TreeRegression with fields intercept and slope """ dc = cls() dc.clock_rate = clock_model['slope'] dc.intercept = clock_model['intercept'] dc.chisq = clock_model['chisq'] if 'chisq' in clock_model else None dc.valid_confidence = clock_model['valid_confidence'] if 'valid_confidence' in clock_model else False if 'cov' in clock_model and dc.valid_confidence: dc.cov = clock_model['cov'] dc.r_val = clock_model['r_val'] return dc
[ "Create", "the", "conversion", "object", "automatically", "from", "the", "tree" ]
neherlab/treetime
python
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/utils.py#L41-L60
[ "def", "from_regression", "(", "cls", ",", "clock_model", ")", ":", "dc", "=", "cls", "(", ")", "dc", ".", "clock_rate", "=", "clock_model", "[", "'slope'", "]", "dc", ".", "intercept", "=", "clock_model", "[", "'intercept'", "]", "dc", ".", "chisq", "=", "clock_model", "[", "'chisq'", "]", "if", "'chisq'", "in", "clock_model", "else", "None", "dc", ".", "valid_confidence", "=", "clock_model", "[", "'valid_confidence'", "]", "if", "'valid_confidence'", "in", "clock_model", "else", "False", "if", "'cov'", "in", "clock_model", "and", "dc", ".", "valid_confidence", ":", "dc", ".", "cov", "=", "clock_model", "[", "'cov'", "]", "dc", ".", "r_val", "=", "clock_model", "[", "'r_val'", "]", "return", "dc" ]
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
test
GuacamoleClient.client
Socket connection.
guacamole/client.py
def client(self): """ Socket connection. """ if not self._client: self._client = socket.create_connection( (self.host, self.port), self.timeout) self.logger.debug('Client connected with guacd server (%s, %s, %s)' % (self.host, self.port, self.timeout)) return self._client
def client(self): """ Socket connection. """ if not self._client: self._client = socket.create_connection( (self.host, self.port), self.timeout) self.logger.debug('Client connected with guacd server (%s, %s, %s)' % (self.host, self.port, self.timeout)) return self._client
[ "Socket", "connection", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L65-L75
[ "def", "client", "(", "self", ")", ":", "if", "not", "self", ".", "_client", ":", "self", ".", "_client", "=", "socket", ".", "create_connection", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ",", "self", ".", "timeout", ")", "self", ".", "logger", ".", "debug", "(", "'Client connected with guacd server (%s, %s, %s)'", "%", "(", "self", ".", "host", ",", "self", ".", "port", ",", "self", ".", "timeout", ")", ")", "return", "self", ".", "_client" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleClient.close
Terminate connection with Guacamole guacd server.
guacamole/client.py
def close(self): """ Terminate connection with Guacamole guacd server. """ self.client.close() self._client = None self.connected = False self.logger.debug('Connection closed.')
def close(self): """ Terminate connection with Guacamole guacd server. """ self.client.close() self._client = None self.connected = False self.logger.debug('Connection closed.')
[ "Terminate", "connection", "with", "Guacamole", "guacd", "server", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L82-L89
[ "def", "close", "(", "self", ")", ":", "self", ".", "client", ".", "close", "(", ")", "self", ".", "_client", "=", "None", "self", ".", "connected", "=", "False", "self", ".", "logger", ".", "debug", "(", "'Connection closed.'", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleClient.receive
Receive instructions from Guacamole guacd server.
guacamole/client.py
def receive(self): """ Receive instructions from Guacamole guacd server. """ start = 0 while True: idx = self._buffer.find(INST_TERM.encode(), start) if idx != -1: # instruction was fully received! line = self._buffer[:idx + 1].decode() self._buffer = self._buffer[idx + 1:] self.logger.debug('Received instruction: %s' % line) return line else: start = len(self._buffer) # we are still waiting for instruction termination buf = self.client.recv(BUF_LEN) if not buf: # No data recieved, connection lost?! self.close() self.logger.debug( 'Failed to receive instruction. Closing.') return None self._buffer.extend(buf)
def receive(self): """ Receive instructions from Guacamole guacd server. """ start = 0 while True: idx = self._buffer.find(INST_TERM.encode(), start) if idx != -1: # instruction was fully received! line = self._buffer[:idx + 1].decode() self._buffer = self._buffer[idx + 1:] self.logger.debug('Received instruction: %s' % line) return line else: start = len(self._buffer) # we are still waiting for instruction termination buf = self.client.recv(BUF_LEN) if not buf: # No data recieved, connection lost?! self.close() self.logger.debug( 'Failed to receive instruction. Closing.') return None self._buffer.extend(buf)
[ "Receive", "instructions", "from", "Guacamole", "guacd", "server", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L91-L115
[ "def", "receive", "(", "self", ")", ":", "start", "=", "0", "while", "True", ":", "idx", "=", "self", ".", "_buffer", ".", "find", "(", "INST_TERM", ".", "encode", "(", ")", ",", "start", ")", "if", "idx", "!=", "-", "1", ":", "# instruction was fully received!", "line", "=", "self", ".", "_buffer", "[", ":", "idx", "+", "1", "]", ".", "decode", "(", ")", "self", ".", "_buffer", "=", "self", ".", "_buffer", "[", "idx", "+", "1", ":", "]", "self", ".", "logger", ".", "debug", "(", "'Received instruction: %s'", "%", "line", ")", "return", "line", "else", ":", "start", "=", "len", "(", "self", ".", "_buffer", ")", "# we are still waiting for instruction termination", "buf", "=", "self", ".", "client", ".", "recv", "(", "BUF_LEN", ")", "if", "not", "buf", ":", "# No data recieved, connection lost?!", "self", ".", "close", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Failed to receive instruction. Closing.'", ")", "return", "None", "self", ".", "_buffer", ".", "extend", "(", "buf", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleClient.send
Send encoded instructions to Guacamole guacd server.
guacamole/client.py
def send(self, data): """ Send encoded instructions to Guacamole guacd server. """ self.logger.debug('Sending data: %s' % data) self.client.sendall(data.encode())
def send(self, data): """ Send encoded instructions to Guacamole guacd server. """ self.logger.debug('Sending data: %s' % data) self.client.sendall(data.encode())
[ "Send", "encoded", "instructions", "to", "Guacamole", "guacd", "server", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L117-L122
[ "def", "send", "(", "self", ",", "data", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Sending data: %s'", "%", "data", ")", "self", ".", "client", ".", "sendall", "(", "data", ".", "encode", "(", ")", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleClient.send_instruction
Send instruction after encoding.
guacamole/client.py
def send_instruction(self, instruction): """ Send instruction after encoding. """ self.logger.debug('Sending instruction: %s' % str(instruction)) return self.send(instruction.encode())
def send_instruction(self, instruction): """ Send instruction after encoding. """ self.logger.debug('Sending instruction: %s' % str(instruction)) return self.send(instruction.encode())
[ "Send", "instruction", "after", "encoding", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L131-L136
[ "def", "send_instruction", "(", "self", ",", "instruction", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Sending instruction: %s'", "%", "str", "(", "instruction", ")", ")", "return", "self", ".", "send", "(", "instruction", ".", "encode", "(", ")", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleClient.handshake
Establish connection with Guacamole guacd server via handshake.
guacamole/client.py
def handshake(self, protocol='vnc', width=1024, height=768, dpi=96, audio=None, video=None, image=None, **kwargs): """ Establish connection with Guacamole guacd server via handshake. """ if protocol not in PROTOCOLS: self.logger.debug('Invalid protocol: %s' % protocol) raise GuacamoleError('Cannot start Handshake. Missing protocol.') if audio is None: audio = list() if video is None: video = list() if image is None: image = list() # 1. Send 'select' instruction self.logger.debug('Send `select` instruction.') self.send_instruction(Instruction('select', protocol)) # 2. Receive `args` instruction instruction = self.read_instruction() self.logger.debug('Expecting `args` instruction, received: %s' % str(instruction)) if not instruction: self.close() raise GuacamoleError( 'Cannot establish Handshake. Connection Lost!') if instruction.opcode != 'args': self.close() raise GuacamoleError( 'Cannot establish Handshake. Expected opcode `args`, ' 'received `%s` instead.' % instruction.opcode) # 3. Respond with size, audio & video support self.logger.debug('Send `size` instruction (%s, %s, %s)' % (width, height, dpi)) self.send_instruction(Instruction('size', width, height, dpi)) self.logger.debug('Send `audio` instruction (%s)' % audio) self.send_instruction(Instruction('audio', *audio)) self.logger.debug('Send `video` instruction (%s)' % video) self.send_instruction(Instruction('video', *video)) self.logger.debug('Send `image` instruction (%s)' % image) self.send_instruction(Instruction('image', *image)) # 4. Send `connect` instruction with proper values connection_args = [ kwargs.get(arg.replace('-', '_'), '') for arg in instruction.args ] self.logger.debug('Send `connect` instruction (%s)' % connection_args) self.send_instruction(Instruction('connect', *connection_args)) # 5. Receive ``ready`` instruction, with client ID. instruction = self.read_instruction() self.logger.debug('Expecting `ready` instruction, received: %s' % str(instruction)) if instruction.opcode != 'ready': self.logger.warning( 'Expected `ready` instruction, received: %s instead') if instruction.args: self._id = instruction.args[0] self.logger.debug( 'Established connection with client id: %s' % self.id) self.logger.debug('Handshake completed.') self.connected = True
def handshake(self, protocol='vnc', width=1024, height=768, dpi=96, audio=None, video=None, image=None, **kwargs): """ Establish connection with Guacamole guacd server via handshake. """ if protocol not in PROTOCOLS: self.logger.debug('Invalid protocol: %s' % protocol) raise GuacamoleError('Cannot start Handshake. Missing protocol.') if audio is None: audio = list() if video is None: video = list() if image is None: image = list() # 1. Send 'select' instruction self.logger.debug('Send `select` instruction.') self.send_instruction(Instruction('select', protocol)) # 2. Receive `args` instruction instruction = self.read_instruction() self.logger.debug('Expecting `args` instruction, received: %s' % str(instruction)) if not instruction: self.close() raise GuacamoleError( 'Cannot establish Handshake. Connection Lost!') if instruction.opcode != 'args': self.close() raise GuacamoleError( 'Cannot establish Handshake. Expected opcode `args`, ' 'received `%s` instead.' % instruction.opcode) # 3. Respond with size, audio & video support self.logger.debug('Send `size` instruction (%s, %s, %s)' % (width, height, dpi)) self.send_instruction(Instruction('size', width, height, dpi)) self.logger.debug('Send `audio` instruction (%s)' % audio) self.send_instruction(Instruction('audio', *audio)) self.logger.debug('Send `video` instruction (%s)' % video) self.send_instruction(Instruction('video', *video)) self.logger.debug('Send `image` instruction (%s)' % image) self.send_instruction(Instruction('image', *image)) # 4. Send `connect` instruction with proper values connection_args = [ kwargs.get(arg.replace('-', '_'), '') for arg in instruction.args ] self.logger.debug('Send `connect` instruction (%s)' % connection_args) self.send_instruction(Instruction('connect', *connection_args)) # 5. Receive ``ready`` instruction, with client ID. instruction = self.read_instruction() self.logger.debug('Expecting `ready` instruction, received: %s' % str(instruction)) if instruction.opcode != 'ready': self.logger.warning( 'Expected `ready` instruction, received: %s instead') if instruction.args: self._id = instruction.args[0] self.logger.debug( 'Established connection with client id: %s' % self.id) self.logger.debug('Handshake completed.') self.connected = True
[ "Establish", "connection", "with", "Guacamole", "guacd", "server", "via", "handshake", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L138-L213
[ "def", "handshake", "(", "self", ",", "protocol", "=", "'vnc'", ",", "width", "=", "1024", ",", "height", "=", "768", ",", "dpi", "=", "96", ",", "audio", "=", "None", ",", "video", "=", "None", ",", "image", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "protocol", "not", "in", "PROTOCOLS", ":", "self", ".", "logger", ".", "debug", "(", "'Invalid protocol: %s'", "%", "protocol", ")", "raise", "GuacamoleError", "(", "'Cannot start Handshake. Missing protocol.'", ")", "if", "audio", "is", "None", ":", "audio", "=", "list", "(", ")", "if", "video", "is", "None", ":", "video", "=", "list", "(", ")", "if", "image", "is", "None", ":", "image", "=", "list", "(", ")", "# 1. Send 'select' instruction", "self", ".", "logger", ".", "debug", "(", "'Send `select` instruction.'", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'select'", ",", "protocol", ")", ")", "# 2. Receive `args` instruction", "instruction", "=", "self", ".", "read_instruction", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Expecting `args` instruction, received: %s'", "%", "str", "(", "instruction", ")", ")", "if", "not", "instruction", ":", "self", ".", "close", "(", ")", "raise", "GuacamoleError", "(", "'Cannot establish Handshake. Connection Lost!'", ")", "if", "instruction", ".", "opcode", "!=", "'args'", ":", "self", ".", "close", "(", ")", "raise", "GuacamoleError", "(", "'Cannot establish Handshake. Expected opcode `args`, '", "'received `%s` instead.'", "%", "instruction", ".", "opcode", ")", "# 3. Respond with size, audio & video support", "self", ".", "logger", ".", "debug", "(", "'Send `size` instruction (%s, %s, %s)'", "%", "(", "width", ",", "height", ",", "dpi", ")", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'size'", ",", "width", ",", "height", ",", "dpi", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Send `audio` instruction (%s)'", "%", "audio", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'audio'", ",", "*", "audio", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Send `video` instruction (%s)'", "%", "video", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'video'", ",", "*", "video", ")", ")", "self", ".", "logger", ".", "debug", "(", "'Send `image` instruction (%s)'", "%", "image", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'image'", ",", "*", "image", ")", ")", "# 4. Send `connect` instruction with proper values", "connection_args", "=", "[", "kwargs", ".", "get", "(", "arg", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "''", ")", "for", "arg", "in", "instruction", ".", "args", "]", "self", ".", "logger", ".", "debug", "(", "'Send `connect` instruction (%s)'", "%", "connection_args", ")", "self", ".", "send_instruction", "(", "Instruction", "(", "'connect'", ",", "*", "connection_args", ")", ")", "# 5. Receive ``ready`` instruction, with client ID.", "instruction", "=", "self", ".", "read_instruction", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Expecting `ready` instruction, received: %s'", "%", "str", "(", "instruction", ")", ")", "if", "instruction", ".", "opcode", "!=", "'ready'", ":", "self", ".", "logger", ".", "warning", "(", "'Expected `ready` instruction, received: %s instead'", ")", "if", "instruction", ".", "args", ":", "self", ".", "_id", "=", "instruction", ".", "args", "[", "0", "]", "self", ".", "logger", ".", "debug", "(", "'Established connection with client id: %s'", "%", "self", ".", "id", ")", "self", ".", "logger", ".", "debug", "(", "'Handshake completed.'", ")", "self", ".", "connected", "=", "True" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
utf8
Return a utf-8 encoded string from a valid unicode string. :param unicode_str: Unicode string. :return: str
guacamole/instruction.py
def utf8(unicode_str): """ Return a utf-8 encoded string from a valid unicode string. :param unicode_str: Unicode string. :return: str """ if six.PY2 and isinstance(unicode_str, __unicode__): return unicode_str.encode('utf-8') return unicode_str
def utf8(unicode_str): """ Return a utf-8 encoded string from a valid unicode string. :param unicode_str: Unicode string. :return: str """ if six.PY2 and isinstance(unicode_str, __unicode__): return unicode_str.encode('utf-8') return unicode_str
[ "Return", "a", "utf", "-", "8", "encoded", "string", "from", "a", "valid", "unicode", "string", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/instruction.py#L40-L51
[ "def", "utf8", "(", "unicode_str", ")", ":", "if", "six", ".", "PY2", "and", "isinstance", "(", "unicode_str", ",", "__unicode__", ")", ":", "return", "unicode_str", ".", "encode", "(", "'utf-8'", ")", "return", "unicode_str" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleInstruction.load
Loads a new GuacamoleInstruction from encoded instruction string. :param instruction: Instruction string. :return: GuacamoleInstruction()
guacamole/instruction.py
def load(cls, instruction): """ Loads a new GuacamoleInstruction from encoded instruction string. :param instruction: Instruction string. :return: GuacamoleInstruction() """ if not instruction.endswith(INST_TERM): raise InvalidInstruction('Instruction termination not found.') args = cls.decode_instruction(instruction) return cls(args[0], *args[1:])
def load(cls, instruction): """ Loads a new GuacamoleInstruction from encoded instruction string. :param instruction: Instruction string. :return: GuacamoleInstruction() """ if not instruction.endswith(INST_TERM): raise InvalidInstruction('Instruction termination not found.') args = cls.decode_instruction(instruction) return cls(args[0], *args[1:])
[ "Loads", "a", "new", "GuacamoleInstruction", "from", "encoded", "instruction", "string", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/instruction.py#L61-L74
[ "def", "load", "(", "cls", ",", "instruction", ")", ":", "if", "not", "instruction", ".", "endswith", "(", "INST_TERM", ")", ":", "raise", "InvalidInstruction", "(", "'Instruction termination not found.'", ")", "args", "=", "cls", ".", "decode_instruction", "(", "instruction", ")", "return", "cls", "(", "args", "[", "0", "]", ",", "*", "args", "[", "1", ":", "]", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleInstruction.decode_instruction
Decode whole instruction and return list of args. Usually, returned arg[0] is the instruction opcode. example: >> args = decode_instruction('4.size,4.1024;') >> args == ['size', '1024'] >> True :param instruction: Instruction string. :return: list
guacamole/instruction.py
def decode_instruction(instruction): """ Decode whole instruction and return list of args. Usually, returned arg[0] is the instruction opcode. example: >> args = decode_instruction('4.size,4.1024;') >> args == ['size', '1024'] >> True :param instruction: Instruction string. :return: list """ if not instruction.endswith(INST_TERM): raise InvalidInstruction('Instruction termination not found.') # Use proper encoding instruction = utf8(instruction) # Get arg size elems = instruction.split(ELEM_SEP, 1) try: arg_size = int(elems[0]) except Exception: # Expected ValueError raise InvalidInstruction( 'Invalid arg length.' + ' Possibly due to missing element separator!') arg_str = elems[1][:arg_size] remaining = elems[1][arg_size:] args = [arg_str] if remaining.startswith(ARG_SEP): # Ignore the ARG_SEP to parse next arg. remaining = remaining[1:] elif remaining == INST_TERM: # This was the last arg! return args else: # The remaining is neither starting with ARG_SEP nor INST_TERM. raise InvalidInstruction( 'Instruction arg (%s) has invalid length.' % arg_str) next_args = GuacamoleInstruction.decode_instruction(remaining) if next_args: args = args + next_args return args
def decode_instruction(instruction): """ Decode whole instruction and return list of args. Usually, returned arg[0] is the instruction opcode. example: >> args = decode_instruction('4.size,4.1024;') >> args == ['size', '1024'] >> True :param instruction: Instruction string. :return: list """ if not instruction.endswith(INST_TERM): raise InvalidInstruction('Instruction termination not found.') # Use proper encoding instruction = utf8(instruction) # Get arg size elems = instruction.split(ELEM_SEP, 1) try: arg_size = int(elems[0]) except Exception: # Expected ValueError raise InvalidInstruction( 'Invalid arg length.' + ' Possibly due to missing element separator!') arg_str = elems[1][:arg_size] remaining = elems[1][arg_size:] args = [arg_str] if remaining.startswith(ARG_SEP): # Ignore the ARG_SEP to parse next arg. remaining = remaining[1:] elif remaining == INST_TERM: # This was the last arg! return args else: # The remaining is neither starting with ARG_SEP nor INST_TERM. raise InvalidInstruction( 'Instruction arg (%s) has invalid length.' % arg_str) next_args = GuacamoleInstruction.decode_instruction(remaining) if next_args: args = args + next_args return args
[ "Decode", "whole", "instruction", "and", "return", "list", "of", "args", ".", "Usually", "returned", "arg", "[", "0", "]", "is", "the", "instruction", "opcode", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/instruction.py#L77-L130
[ "def", "decode_instruction", "(", "instruction", ")", ":", "if", "not", "instruction", ".", "endswith", "(", "INST_TERM", ")", ":", "raise", "InvalidInstruction", "(", "'Instruction termination not found.'", ")", "# Use proper encoding", "instruction", "=", "utf8", "(", "instruction", ")", "# Get arg size", "elems", "=", "instruction", ".", "split", "(", "ELEM_SEP", ",", "1", ")", "try", ":", "arg_size", "=", "int", "(", "elems", "[", "0", "]", ")", "except", "Exception", ":", "# Expected ValueError", "raise", "InvalidInstruction", "(", "'Invalid arg length.'", "+", "' Possibly due to missing element separator!'", ")", "arg_str", "=", "elems", "[", "1", "]", "[", ":", "arg_size", "]", "remaining", "=", "elems", "[", "1", "]", "[", "arg_size", ":", "]", "args", "=", "[", "arg_str", "]", "if", "remaining", ".", "startswith", "(", "ARG_SEP", ")", ":", "# Ignore the ARG_SEP to parse next arg.", "remaining", "=", "remaining", "[", "1", ":", "]", "elif", "remaining", "==", "INST_TERM", ":", "# This was the last arg!", "return", "args", "else", ":", "# The remaining is neither starting with ARG_SEP nor INST_TERM.", "raise", "InvalidInstruction", "(", "'Instruction arg (%s) has invalid length.'", "%", "arg_str", ")", "next_args", "=", "GuacamoleInstruction", ".", "decode_instruction", "(", "remaining", ")", "if", "next_args", ":", "args", "=", "args", "+", "next_args", "return", "args" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleInstruction.encode_arg
Encode argument to be sent in a valid GuacamoleInstruction. example: >> arg = encode_arg('size') >> arg == '4.size' >> True :param arg: arg string. :return: str
guacamole/instruction.py
def encode_arg(arg): """ Encode argument to be sent in a valid GuacamoleInstruction. example: >> arg = encode_arg('size') >> arg == '4.size' >> True :param arg: arg string. :return: str """ arg_utf8 = utf8(arg) return ELEM_SEP.join([str(len(str(arg_utf8))), str(arg_utf8)])
def encode_arg(arg): """ Encode argument to be sent in a valid GuacamoleInstruction. example: >> arg = encode_arg('size') >> arg == '4.size' >> True :param arg: arg string. :return: str """ arg_utf8 = utf8(arg) return ELEM_SEP.join([str(len(str(arg_utf8))), str(arg_utf8)])
[ "Encode", "argument", "to", "be", "sent", "in", "a", "valid", "GuacamoleInstruction", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/instruction.py#L133-L148
[ "def", "encode_arg", "(", "arg", ")", ":", "arg_utf8", "=", "utf8", "(", "arg", ")", "return", "ELEM_SEP", ".", "join", "(", "[", "str", "(", "len", "(", "str", "(", "arg_utf8", ")", ")", ")", ",", "str", "(", "arg_utf8", ")", "]", ")" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
GuacamoleInstruction.encode
Prepare the instruction to be sent over the wire. :return: str
guacamole/instruction.py
def encode(self): """ Prepare the instruction to be sent over the wire. :return: str """ instruction_iter = itertools.chain([self.opcode], self.args) elems = ARG_SEP.join(self.encode_arg(arg) for arg in instruction_iter) return elems + INST_TERM
def encode(self): """ Prepare the instruction to be sent over the wire. :return: str """ instruction_iter = itertools.chain([self.opcode], self.args) elems = ARG_SEP.join(self.encode_arg(arg) for arg in instruction_iter) return elems + INST_TERM
[ "Prepare", "the", "instruction", "to", "be", "sent", "over", "the", "wire", "." ]
mohabusama/pyguacamole
python
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/instruction.py#L150-L160
[ "def", "encode", "(", "self", ")", ":", "instruction_iter", "=", "itertools", ".", "chain", "(", "[", "self", ".", "opcode", "]", ",", "self", ".", "args", ")", "elems", "=", "ARG_SEP", ".", "join", "(", "self", ".", "encode_arg", "(", "arg", ")", "for", "arg", "in", "instruction_iter", ")", "return", "elems", "+", "INST_TERM" ]
344dccc6cb3a9a045afeaf337677e5d0001aa83a
test
APIResource.class_url
Returns a versioned URI string for this class
solvebio/resource/apiresource.py
def class_url(cls): """Returns a versioned URI string for this class""" base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1')) return "/{0}/{1}".format(base, class_to_api_name(cls.class_name()))
def class_url(cls): """Returns a versioned URI string for this class""" base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1')) return "/{0}/{1}".format(base, class_to_api_name(cls.class_name()))
[ "Returns", "a", "versioned", "URI", "string", "for", "this", "class" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/apiresource.py#L49-L52
[ "def", "class_url", "(", "cls", ")", ":", "base", "=", "'v{0}'", ".", "format", "(", "getattr", "(", "cls", ",", "'RESOURCE_VERSION'", ",", "'1'", ")", ")", "return", "\"/{0}/{1}\"", ".", "format", "(", "base", ",", "class_to_api_name", "(", "cls", ".", "class_name", "(", ")", ")", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
APIResource.instance_url
Get instance URL by ID
solvebio/resource/apiresource.py
def instance_url(self): """Get instance URL by ID""" id_ = self.get(self.ID_ATTR) base = self.class_url() if id_: return '/'.join([base, six.text_type(id_)]) else: raise Exception( 'Could not determine which URL to request: %s instance ' 'has invalid ID: %r' % (type(self).__name__, id_), self.ID_ATTR)
def instance_url(self): """Get instance URL by ID""" id_ = self.get(self.ID_ATTR) base = self.class_url() if id_: return '/'.join([base, six.text_type(id_)]) else: raise Exception( 'Could not determine which URL to request: %s instance ' 'has invalid ID: %r' % (type(self).__name__, id_), self.ID_ATTR)
[ "Get", "instance", "URL", "by", "ID" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/apiresource.py#L54-L65
[ "def", "instance_url", "(", "self", ")", ":", "id_", "=", "self", ".", "get", "(", "self", ".", "ID_ATTR", ")", "base", "=", "self", ".", "class_url", "(", ")", "if", "id_", ":", "return", "'/'", ".", "join", "(", "[", "base", ",", "six", ".", "text_type", "(", "id_", ")", "]", ")", "else", ":", "raise", "Exception", "(", "'Could not determine which URL to request: %s instance '", "'has invalid ID: %r'", "%", "(", "type", "(", "self", ")", ".", "__name__", ",", "id_", ")", ",", "self", ".", "ID_ATTR", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SingletonAPIResource.class_url
Returns a versioned URI string for this class, and don't pluralize the class name.
solvebio/resource/apiresource.py
def class_url(cls): """ Returns a versioned URI string for this class, and don't pluralize the class name. """ base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1')) return "/{0}/{1}".format(base, class_to_api_name( cls.class_name(), pluralize=False))
def class_url(cls): """ Returns a versioned URI string for this class, and don't pluralize the class name. """ base = 'v{0}'.format(getattr(cls, 'RESOURCE_VERSION', '1')) return "/{0}/{1}".format(base, class_to_api_name( cls.class_name(), pluralize=False))
[ "Returns", "a", "versioned", "URI", "string", "for", "this", "class", "and", "don", "t", "pluralize", "the", "class", "name", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/apiresource.py#L146-L153
[ "def", "class_url", "(", "cls", ")", ":", "base", "=", "'v{0}'", ".", "format", "(", "getattr", "(", "cls", ",", "'RESOURCE_VERSION'", ",", "'1'", ")", ")", "return", "\"/{0}/{1}\"", ".", "format", "(", "base", ",", "class_to_api_name", "(", "cls", ".", "class_name", "(", ")", ",", "pluralize", "=", "False", ")", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
DownloadableAPIResource.download
Download the file to the specified directory or file path. Downloads to a temporary directory if no path is specified. Returns the absolute path to the file.
solvebio/resource/apiresource.py
def download(self, path=None, **kwargs): """ Download the file to the specified directory or file path. Downloads to a temporary directory if no path is specified. Returns the absolute path to the file. """ download_url = self.download_url(**kwargs) try: # For vault objects, use the object's filename # as the fallback if none is specified. filename = self.filename except AttributeError: # If the object has no filename attribute, # extract one from the download URL. filename = download_url.split('%3B%20filename%3D')[1] # Remove additional URL params from the name and "unquote" it. filename = unquote(filename.split('&')[0]) if path: path = os.path.expanduser(path) # If the path is a dir, use the extracted filename if os.path.isdir(path): path = os.path.join(path, filename) else: # Create a temporary directory for the file path = os.path.join(tempfile.gettempdir(), filename) try: response = requests.request(method='get', url=download_url) except Exception as e: _handle_request_error(e) if not (200 <= response.status_code < 400): _handle_api_error(response) with open(path, 'wb') as fileobj: fileobj.write(response._content) return path
def download(self, path=None, **kwargs): """ Download the file to the specified directory or file path. Downloads to a temporary directory if no path is specified. Returns the absolute path to the file. """ download_url = self.download_url(**kwargs) try: # For vault objects, use the object's filename # as the fallback if none is specified. filename = self.filename except AttributeError: # If the object has no filename attribute, # extract one from the download URL. filename = download_url.split('%3B%20filename%3D')[1] # Remove additional URL params from the name and "unquote" it. filename = unquote(filename.split('&')[0]) if path: path = os.path.expanduser(path) # If the path is a dir, use the extracted filename if os.path.isdir(path): path = os.path.join(path, filename) else: # Create a temporary directory for the file path = os.path.join(tempfile.gettempdir(), filename) try: response = requests.request(method='get', url=download_url) except Exception as e: _handle_request_error(e) if not (200 <= response.status_code < 400): _handle_api_error(response) with open(path, 'wb') as fileobj: fileobj.write(response._content) return path
[ "Download", "the", "file", "to", "the", "specified", "directory", "or", "file", "path", ".", "Downloads", "to", "a", "temporary", "directory", "if", "no", "path", "is", "specified", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/apiresource.py#L187-L226
[ "def", "download", "(", "self", ",", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "download_url", "=", "self", ".", "download_url", "(", "*", "*", "kwargs", ")", "try", ":", "# For vault objects, use the object's filename", "# as the fallback if none is specified.", "filename", "=", "self", ".", "filename", "except", "AttributeError", ":", "# If the object has no filename attribute,", "# extract one from the download URL.", "filename", "=", "download_url", ".", "split", "(", "'%3B%20filename%3D'", ")", "[", "1", "]", "# Remove additional URL params from the name and \"unquote\" it.", "filename", "=", "unquote", "(", "filename", ".", "split", "(", "'&'", ")", "[", "0", "]", ")", "if", "path", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "# If the path is a dir, use the extracted filename", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "else", ":", "# Create a temporary directory for the file", "path", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "filename", ")", "try", ":", "response", "=", "requests", ".", "request", "(", "method", "=", "'get'", ",", "url", "=", "download_url", ")", "except", "Exception", "as", "e", ":", "_handle_request_error", "(", "e", ")", "if", "not", "(", "200", "<=", "response", ".", "status_code", "<", "400", ")", ":", "_handle_api_error", "(", "response", ")", "with", "open", "(", "path", ",", "'wb'", ")", "as", "fileobj", ":", "fileobj", ".", "write", "(", "response", ".", "_content", ")", "return", "path" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
DatasetCommit.parent_object
Get the commit objects parent Import or Migration
solvebio/resource/datasetcommit.py
def parent_object(self): """ Get the commit objects parent Import or Migration """ from . import types parent_klass = types.get(self.parent_job_model.split('.')[1]) return parent_klass.retrieve(self.parent_job_id, client=self._client)
def parent_object(self): """ Get the commit objects parent Import or Migration """ from . import types parent_klass = types.get(self.parent_job_model.split('.')[1]) return parent_klass.retrieve(self.parent_job_id, client=self._client)
[ "Get", "the", "commit", "objects", "parent", "Import", "or", "Migration" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/datasetcommit.py#L29-L33
[ "def", "parent_object", "(", "self", ")", ":", "from", ".", "import", "types", "parent_klass", "=", "types", ".", "get", "(", "self", ".", "parent_job_model", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "return", "parent_klass", ".", "retrieve", "(", "self", ".", "parent_job_id", ",", "client", "=", "self", ".", "_client", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_ask_for_credentials
Asks the user for their email and password.
solvebio/cli/auth.py
def _ask_for_credentials(): """ Asks the user for their email and password. """ _print_msg('Please enter your SolveBio credentials') domain = raw_input('Domain (e.g. <domain>.solvebio.com): ') # Check to see if this domain supports password authentication try: account = client.request('get', '/p/accounts/{}'.format(domain)) auth = account['authentication'] except: raise SolveError('Invalid domain: {}'.format(domain)) # Account must support password-based login if auth.get('login') or auth.get('SAML', {}).get('simple_login'): email = raw_input('Email: ') password = getpass.getpass('Password (typing will be hidden): ') return (domain, email, password) else: _print_msg( 'Your domain uses Single Sign-On (SSO). ' 'Please visit https://{}.solvebio.com/settings/security ' 'for instructions on how to log in.'.format(domain)) sys.exit(1)
def _ask_for_credentials(): """ Asks the user for their email and password. """ _print_msg('Please enter your SolveBio credentials') domain = raw_input('Domain (e.g. <domain>.solvebio.com): ') # Check to see if this domain supports password authentication try: account = client.request('get', '/p/accounts/{}'.format(domain)) auth = account['authentication'] except: raise SolveError('Invalid domain: {}'.format(domain)) # Account must support password-based login if auth.get('login') or auth.get('SAML', {}).get('simple_login'): email = raw_input('Email: ') password = getpass.getpass('Password (typing will be hidden): ') return (domain, email, password) else: _print_msg( 'Your domain uses Single Sign-On (SSO). ' 'Please visit https://{}.solvebio.com/settings/security ' 'for instructions on how to log in.'.format(domain)) sys.exit(1)
[ "Asks", "the", "user", "for", "their", "email", "and", "password", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L22-L45
[ "def", "_ask_for_credentials", "(", ")", ":", "_print_msg", "(", "'Please enter your SolveBio credentials'", ")", "domain", "=", "raw_input", "(", "'Domain (e.g. <domain>.solvebio.com): '", ")", "# Check to see if this domain supports password authentication", "try", ":", "account", "=", "client", ".", "request", "(", "'get'", ",", "'/p/accounts/{}'", ".", "format", "(", "domain", ")", ")", "auth", "=", "account", "[", "'authentication'", "]", "except", ":", "raise", "SolveError", "(", "'Invalid domain: {}'", ".", "format", "(", "domain", ")", ")", "# Account must support password-based login", "if", "auth", ".", "get", "(", "'login'", ")", "or", "auth", ".", "get", "(", "'SAML'", ",", "{", "}", ")", ".", "get", "(", "'simple_login'", ")", ":", "email", "=", "raw_input", "(", "'Email: '", ")", "password", "=", "getpass", ".", "getpass", "(", "'Password (typing will be hidden): '", ")", "return", "(", "domain", ",", "email", ",", "password", ")", "else", ":", "_print_msg", "(", "'Your domain uses Single Sign-On (SSO). '", "'Please visit https://{}.solvebio.com/settings/security '", "'for instructions on how to log in.'", ".", "format", "(", "domain", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
login
Prompt user for login information (domain/email/password). Domain, email and password are used to get the user's API key. Always updates the stored credentials file.
solvebio/cli/auth.py
def login(*args, **kwargs): """ Prompt user for login information (domain/email/password). Domain, email and password are used to get the user's API key. Always updates the stored credentials file. """ if args and args[0].api_key: # Handle command-line arguments if provided. solvebio.login(api_key=args[0].api_key) elif kwargs: # Run the global login() if kwargs are provided # or local credentials are found. solvebio.login(**kwargs) else: interactive_login() # Print information about the current user user = client.whoami() if user: print_user(user) save_credentials(user['email'].lower(), solvebio.api_key) _print_msg('Updated local credentials.') return True else: _print_msg('Invalid credentials. You may not be logged-in.') return False
def login(*args, **kwargs): """ Prompt user for login information (domain/email/password). Domain, email and password are used to get the user's API key. Always updates the stored credentials file. """ if args and args[0].api_key: # Handle command-line arguments if provided. solvebio.login(api_key=args[0].api_key) elif kwargs: # Run the global login() if kwargs are provided # or local credentials are found. solvebio.login(**kwargs) else: interactive_login() # Print information about the current user user = client.whoami() if user: print_user(user) save_credentials(user['email'].lower(), solvebio.api_key) _print_msg('Updated local credentials.') return True else: _print_msg('Invalid credentials. You may not be logged-in.') return False
[ "Prompt", "user", "for", "login", "information", "(", "domain", "/", "email", "/", "password", ")", ".", "Domain", "email", "and", "password", "are", "used", "to", "get", "the", "user", "s", "API", "key", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L48-L75
[ "def", "login", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "and", "args", "[", "0", "]", ".", "api_key", ":", "# Handle command-line arguments if provided.", "solvebio", ".", "login", "(", "api_key", "=", "args", "[", "0", "]", ".", "api_key", ")", "elif", "kwargs", ":", "# Run the global login() if kwargs are provided", "# or local credentials are found.", "solvebio", ".", "login", "(", "*", "*", "kwargs", ")", "else", ":", "interactive_login", "(", ")", "# Print information about the current user", "user", "=", "client", ".", "whoami", "(", ")", "if", "user", ":", "print_user", "(", "user", ")", "save_credentials", "(", "user", "[", "'email'", "]", ".", "lower", "(", ")", ",", "solvebio", ".", "api_key", ")", "_print_msg", "(", "'Updated local credentials.'", ")", "return", "True", "else", ":", "_print_msg", "(", "'Invalid credentials. You may not be logged-in.'", ")", "return", "False" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
interactive_login
Force an interactive login via the command line. Sets the global API key and updates the client auth.
solvebio/cli/auth.py
def interactive_login(): """ Force an interactive login via the command line. Sets the global API key and updates the client auth. """ solvebio.access_token = None solvebio.api_key = None client.set_token() domain, email, password = _ask_for_credentials() if not all([domain, email, password]): print("Domain, email, and password are all required.") return try: response = client.post('/v1/auth/token', { 'domain': domain.replace('.solvebio.com', ''), 'email': email, 'password': password }) except SolveError as e: print('Login failed: {0}'.format(e)) else: solvebio.api_key = response['token'] client.set_token()
def interactive_login(): """ Force an interactive login via the command line. Sets the global API key and updates the client auth. """ solvebio.access_token = None solvebio.api_key = None client.set_token() domain, email, password = _ask_for_credentials() if not all([domain, email, password]): print("Domain, email, and password are all required.") return try: response = client.post('/v1/auth/token', { 'domain': domain.replace('.solvebio.com', ''), 'email': email, 'password': password }) except SolveError as e: print('Login failed: {0}'.format(e)) else: solvebio.api_key = response['token'] client.set_token()
[ "Force", "an", "interactive", "login", "via", "the", "command", "line", ".", "Sets", "the", "global", "API", "key", "and", "updates", "the", "client", "auth", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L78-L102
[ "def", "interactive_login", "(", ")", ":", "solvebio", ".", "access_token", "=", "None", "solvebio", ".", "api_key", "=", "None", "client", ".", "set_token", "(", ")", "domain", ",", "email", ",", "password", "=", "_ask_for_credentials", "(", ")", "if", "not", "all", "(", "[", "domain", ",", "email", ",", "password", "]", ")", ":", "print", "(", "\"Domain, email, and password are all required.\"", ")", "return", "try", ":", "response", "=", "client", ".", "post", "(", "'/v1/auth/token'", ",", "{", "'domain'", ":", "domain", ".", "replace", "(", "'.solvebio.com'", ",", "''", ")", ",", "'email'", ":", "email", ",", "'password'", ":", "password", "}", ")", "except", "SolveError", "as", "e", ":", "print", "(", "'Login failed: {0}'", ".", "format", "(", "e", ")", ")", "else", ":", "solvebio", ".", "api_key", "=", "response", "[", "'token'", "]", "client", ".", "set_token", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
whoami
Prints information about the current user. Assumes the user is already logged-in.
solvebio/cli/auth.py
def whoami(*args, **kwargs): """ Prints information about the current user. Assumes the user is already logged-in. """ user = client.whoami() if user: print_user(user) else: print('You are not logged-in.')
def whoami(*args, **kwargs): """ Prints information about the current user. Assumes the user is already logged-in. """ user = client.whoami() if user: print_user(user) else: print('You are not logged-in.')
[ "Prints", "information", "about", "the", "current", "user", ".", "Assumes", "the", "user", "is", "already", "logged", "-", "in", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L116-L126
[ "def", "whoami", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "user", "=", "client", ".", "whoami", "(", ")", "if", "user", ":", "print_user", "(", "user", ")", "else", ":", "print", "(", "'You are not logged-in.'", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
print_user
Prints information about the current user.
solvebio/cli/auth.py
def print_user(user): """ Prints information about the current user. """ email = user['email'] domain = user['account']['domain'] role = user['role'] print('You are logged-in to the "{0}" domain ' 'as {1} with role {2}.' .format(domain, email, role))
def print_user(user): """ Prints information about the current user. """ email = user['email'] domain = user['account']['domain'] role = user['role'] print('You are logged-in to the "{0}" domain ' 'as {1} with role {2}.' .format(domain, email, role))
[ "Prints", "information", "about", "the", "current", "user", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L129-L138
[ "def", "print_user", "(", "user", ")", ":", "email", "=", "user", "[", "'email'", "]", "domain", "=", "user", "[", "'account'", "]", "[", "'domain'", "]", "role", "=", "user", "[", "'role'", "]", "print", "(", "'You are logged-in to the \"{0}\" domain '", "'as {1} with role {2}.'", ".", "format", "(", "domain", ",", "email", ",", "role", ")", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
GenomicFilter.from_string
Handles UCSC-style range queries (chr1:100-200)
solvebio/query.py
def from_string(cls, string, exact=False): """ Handles UCSC-style range queries (chr1:100-200) """ try: chromosome, pos = string.split(':') except ValueError: raise ValueError('Please use UCSC-style format: "chr2:1000-2000"') if '-' in pos: start, stop = pos.replace(',', '').split('-') else: start = stop = pos.replace(',', '') return cls(chromosome, start, stop, exact=exact)
def from_string(cls, string, exact=False): """ Handles UCSC-style range queries (chr1:100-200) """ try: chromosome, pos = string.split(':') except ValueError: raise ValueError('Please use UCSC-style format: "chr2:1000-2000"') if '-' in pos: start, stop = pos.replace(',', '').split('-') else: start = stop = pos.replace(',', '') return cls(chromosome, start, stop, exact=exact)
[ "Handles", "UCSC", "-", "style", "range", "queries", "(", "chr1", ":", "100", "-", "200", ")" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L160-L174
[ "def", "from_string", "(", "cls", ",", "string", ",", "exact", "=", "False", ")", ":", "try", ":", "chromosome", ",", "pos", "=", "string", ".", "split", "(", "':'", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Please use UCSC-style format: \"chr2:1000-2000\"'", ")", "if", "'-'", "in", "pos", ":", "start", ",", "stop", "=", "pos", ".", "replace", "(", "','", ",", "''", ")", ".", "split", "(", "'-'", ")", "else", ":", "start", "=", "stop", "=", "pos", ".", "replace", "(", "','", ",", "''", ")", "return", "cls", "(", "chromosome", ",", "start", ",", "stop", ",", "exact", "=", "exact", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.filter
Returns this Query instance with the query args combined with existing set with AND. kwargs are simply passed to a new Filter object and combined to any other filters with AND. By default, everything is combined using AND. If you provide multiple filters in a single filter call, those are ANDed together. If you provide multiple filters in multiple filter calls, those are ANDed together. If you want something different, use the F class which supports ``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call filter once with the resulting Filter instance.
solvebio/query.py
def filter(self, *filters, **kwargs): """ Returns this Query instance with the query args combined with existing set with AND. kwargs are simply passed to a new Filter object and combined to any other filters with AND. By default, everything is combined using AND. If you provide multiple filters in a single filter call, those are ANDed together. If you provide multiple filters in multiple filter calls, those are ANDed together. If you want something different, use the F class which supports ``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call filter once with the resulting Filter instance. """ f = list(filters) if kwargs: f += [Filter(**kwargs)] return self._clone(filters=f)
def filter(self, *filters, **kwargs): """ Returns this Query instance with the query args combined with existing set with AND. kwargs are simply passed to a new Filter object and combined to any other filters with AND. By default, everything is combined using AND. If you provide multiple filters in a single filter call, those are ANDed together. If you provide multiple filters in multiple filter calls, those are ANDed together. If you want something different, use the F class which supports ``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call filter once with the resulting Filter instance. """ f = list(filters) if kwargs: f += [Filter(**kwargs)] return self._clone(filters=f)
[ "Returns", "this", "Query", "instance", "with", "the", "query", "args", "combined", "with", "existing", "set", "with", "AND", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L340-L362
[ "def", "filter", "(", "self", ",", "*", "filters", ",", "*", "*", "kwargs", ")", ":", "f", "=", "list", "(", "filters", ")", "if", "kwargs", ":", "f", "+=", "[", "Filter", "(", "*", "*", "kwargs", ")", "]", "return", "self", ".", "_clone", "(", "filters", "=", "f", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.range
Shortcut to do range filters on genomic datasets.
solvebio/query.py
def range(self, chromosome, start, stop, exact=False): """ Shortcut to do range filters on genomic datasets. """ return self._clone( filters=[GenomicFilter(chromosome, start, stop, exact)])
def range(self, chromosome, start, stop, exact=False): """ Shortcut to do range filters on genomic datasets. """ return self._clone( filters=[GenomicFilter(chromosome, start, stop, exact)])
[ "Shortcut", "to", "do", "range", "filters", "on", "genomic", "datasets", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L364-L369
[ "def", "range", "(", "self", ",", "chromosome", ",", "start", ",", "stop", ",", "exact", "=", "False", ")", ":", "return", "self", ".", "_clone", "(", "filters", "=", "[", "GenomicFilter", "(", "chromosome", ",", "start", ",", "stop", ",", "exact", ")", "]", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.position
Shortcut to do a single position filter on genomic datasets.
solvebio/query.py
def position(self, chromosome, position, exact=False): """ Shortcut to do a single position filter on genomic datasets. """ return self._clone( filters=[GenomicFilter(chromosome, position, exact=exact)])
def position(self, chromosome, position, exact=False): """ Shortcut to do a single position filter on genomic datasets. """ return self._clone( filters=[GenomicFilter(chromosome, position, exact=exact)])
[ "Shortcut", "to", "do", "a", "single", "position", "filter", "on", "genomic", "datasets", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L371-L376
[ "def", "position", "(", "self", ",", "chromosome", ",", "position", ",", "exact", "=", "False", ")", ":", "return", "self", ".", "_clone", "(", "filters", "=", "[", "GenomicFilter", "(", "chromosome", ",", "position", ",", "exact", "=", "exact", ")", "]", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.facets
Returns a dictionary with the requested facets. The facets function supports string args, and keyword args. q.facets('field_1', 'field_2') will return facets for field_1 and field_2. q.facets(field_1={'limit': 0}, field_2={'limit': 10}) will return all facets for field_1 and 10 facets for field_2.
solvebio/query.py
def facets(self, *args, **kwargs): """ Returns a dictionary with the requested facets. The facets function supports string args, and keyword args. q.facets('field_1', 'field_2') will return facets for field_1 and field_2. q.facets(field_1={'limit': 0}, field_2={'limit': 10}) will return all facets for field_1 and 10 facets for field_2. """ # Combine args and kwargs into facet format. facets = dict((a, {}) for a in args) facets.update(kwargs) if not facets: raise AttributeError('Faceting requires at least one field') for f in facets.keys(): if not isinstance(f, six.string_types): raise AttributeError('Facet field arguments must be strings') q = self._clone() q._limit = 0 q.execute(offset=0, facets=facets) return q._response.get('facets')
def facets(self, *args, **kwargs): """ Returns a dictionary with the requested facets. The facets function supports string args, and keyword args. q.facets('field_1', 'field_2') will return facets for field_1 and field_2. q.facets(field_1={'limit': 0}, field_2={'limit': 10}) will return all facets for field_1 and 10 facets for field_2. """ # Combine args and kwargs into facet format. facets = dict((a, {}) for a in args) facets.update(kwargs) if not facets: raise AttributeError('Faceting requires at least one field') for f in facets.keys(): if not isinstance(f, six.string_types): raise AttributeError('Facet field arguments must be strings') q = self._clone() q._limit = 0 q.execute(offset=0, facets=facets) return q._response.get('facets')
[ "Returns", "a", "dictionary", "with", "the", "requested", "facets", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L389-L415
[ "def", "facets", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Combine args and kwargs into facet format.", "facets", "=", "dict", "(", "(", "a", ",", "{", "}", ")", "for", "a", "in", "args", ")", "facets", ".", "update", "(", "kwargs", ")", "if", "not", "facets", ":", "raise", "AttributeError", "(", "'Faceting requires at least one field'", ")", "for", "f", "in", "facets", ".", "keys", "(", ")", ":", "if", "not", "isinstance", "(", "f", ",", "six", ".", "string_types", ")", ":", "raise", "AttributeError", "(", "'Facet field arguments must be strings'", ")", "q", "=", "self", ".", "_clone", "(", ")", "q", ".", "_limit", "=", "0", "q", ".", "execute", "(", "offset", "=", "0", ",", "facets", "=", "facets", ")", "return", "q", ".", "_response", ".", "get", "(", "'facets'", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query._process_filters
Takes a list of filters and returns JSON :Parameters: - `filters`: List of Filters, (key, val) tuples, or dicts Returns: List of JSON API filters
solvebio/query.py
def _process_filters(cls, filters): """Takes a list of filters and returns JSON :Parameters: - `filters`: List of Filters, (key, val) tuples, or dicts Returns: List of JSON API filters """ data = [] # Filters should always be a list for f in filters: if isinstance(f, Filter): if f.filters: data.extend(cls._process_filters(f.filters)) elif isinstance(f, dict): key = list(f.keys())[0] val = f[key] if isinstance(val, dict): # pass val (a dict) as list # so that it gets processed properly filter_filters = cls._process_filters([val]) if len(filter_filters) == 1: filter_filters = filter_filters[0] data.append({key: filter_filters}) else: data.append({key: cls._process_filters(val)}) else: data.extend((f,)) return data
def _process_filters(cls, filters): """Takes a list of filters and returns JSON :Parameters: - `filters`: List of Filters, (key, val) tuples, or dicts Returns: List of JSON API filters """ data = [] # Filters should always be a list for f in filters: if isinstance(f, Filter): if f.filters: data.extend(cls._process_filters(f.filters)) elif isinstance(f, dict): key = list(f.keys())[0] val = f[key] if isinstance(val, dict): # pass val (a dict) as list # so that it gets processed properly filter_filters = cls._process_filters([val]) if len(filter_filters) == 1: filter_filters = filter_filters[0] data.append({key: filter_filters}) else: data.append({key: cls._process_filters(val)}) else: data.extend((f,)) return data
[ "Takes", "a", "list", "of", "filters", "and", "returns", "JSON" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L442-L473
[ "def", "_process_filters", "(", "cls", ",", "filters", ")", ":", "data", "=", "[", "]", "# Filters should always be a list", "for", "f", "in", "filters", ":", "if", "isinstance", "(", "f", ",", "Filter", ")", ":", "if", "f", ".", "filters", ":", "data", ".", "extend", "(", "cls", ".", "_process_filters", "(", "f", ".", "filters", ")", ")", "elif", "isinstance", "(", "f", ",", "dict", ")", ":", "key", "=", "list", "(", "f", ".", "keys", "(", ")", ")", "[", "0", "]", "val", "=", "f", "[", "key", "]", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "# pass val (a dict) as list", "# so that it gets processed properly", "filter_filters", "=", "cls", ".", "_process_filters", "(", "[", "val", "]", ")", "if", "len", "(", "filter_filters", ")", "==", "1", ":", "filter_filters", "=", "filter_filters", "[", "0", "]", "data", ".", "append", "(", "{", "key", ":", "filter_filters", "}", ")", "else", ":", "data", ".", "append", "(", "{", "key", ":", "cls", ".", "_process_filters", "(", "val", ")", "}", ")", "else", ":", "data", ".", "extend", "(", "(", "f", ",", ")", ")", "return", "data" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.next
Allows the Query object to be an iterable. This method will iterate through a cached result set and fetch successive pages as required. A `StopIteration` exception will be raised when there aren't any more results available or when the requested result slice range or limit has been fetched. Returns: The next result.
solvebio/query.py
def next(self): """ Allows the Query object to be an iterable. This method will iterate through a cached result set and fetch successive pages as required. A `StopIteration` exception will be raised when there aren't any more results available or when the requested result slice range or limit has been fetched. Returns: The next result. """ if not hasattr(self, '_cursor'): # Iterator not initialized yet self.__iter__() # len(self) returns `min(limit, total)` results if self._cursor == len(self): raise StopIteration() if self._buffer_idx == len(self._buffer): self.execute(self._page_offset + self._buffer_idx) self._buffer_idx = 0 self._cursor += 1 self._buffer_idx += 1 return self._buffer[self._buffer_idx - 1]
def next(self): """ Allows the Query object to be an iterable. This method will iterate through a cached result set and fetch successive pages as required. A `StopIteration` exception will be raised when there aren't any more results available or when the requested result slice range or limit has been fetched. Returns: The next result. """ if not hasattr(self, '_cursor'): # Iterator not initialized yet self.__iter__() # len(self) returns `min(limit, total)` results if self._cursor == len(self): raise StopIteration() if self._buffer_idx == len(self._buffer): self.execute(self._page_offset + self._buffer_idx) self._buffer_idx = 0 self._cursor += 1 self._buffer_idx += 1 return self._buffer[self._buffer_idx - 1]
[ "Allows", "the", "Query", "object", "to", "be", "an", "iterable", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L590-L617
[ "def", "next", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_cursor'", ")", ":", "# Iterator not initialized yet", "self", ".", "__iter__", "(", ")", "# len(self) returns `min(limit, total)` results", "if", "self", ".", "_cursor", "==", "len", "(", "self", ")", ":", "raise", "StopIteration", "(", ")", "if", "self", ".", "_buffer_idx", "==", "len", "(", "self", ".", "_buffer", ")", ":", "self", ".", "execute", "(", "self", ".", "_page_offset", "+", "self", ".", "_buffer_idx", ")", "self", ".", "_buffer_idx", "=", "0", "self", ".", "_cursor", "+=", "1", "self", ".", "_buffer_idx", "+=", "1", "return", "self", ".", "_buffer", "[", "self", ".", "_buffer_idx", "-", "1", "]" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.execute
Executes a query. Additional query parameters can be passed as keyword arguments. Returns: The request parameters and the raw query response.
solvebio/query.py
def execute(self, offset=0, **query): """ Executes a query. Additional query parameters can be passed as keyword arguments. Returns: The request parameters and the raw query response. """ _params = self._build_query(**query) self._page_offset = offset _params.update( offset=self._page_offset, limit=min(self._page_size, self._limit) ) logger.debug('executing query. from/limit: %6d/%d' % (_params['offset'], _params['limit'])) # If the request results in a SolveError (ie bad filter) set the error. try: self._response = self._client.post(self._data_url, _params) except SolveError as e: self._error = e raise logger.debug('query response took: %(took)d ms, total: %(total)d' % self._response) return _params, self._response
def execute(self, offset=0, **query): """ Executes a query. Additional query parameters can be passed as keyword arguments. Returns: The request parameters and the raw query response. """ _params = self._build_query(**query) self._page_offset = offset _params.update( offset=self._page_offset, limit=min(self._page_size, self._limit) ) logger.debug('executing query. from/limit: %6d/%d' % (_params['offset'], _params['limit'])) # If the request results in a SolveError (ie bad filter) set the error. try: self._response = self._client.post(self._data_url, _params) except SolveError as e: self._error = e raise logger.debug('query response took: %(took)d ms, total: %(total)d' % self._response) return _params, self._response
[ "Executes", "a", "query", ".", "Additional", "query", "parameters", "can", "be", "passed", "as", "keyword", "arguments", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L656-L683
[ "def", "execute", "(", "self", ",", "offset", "=", "0", ",", "*", "*", "query", ")", ":", "_params", "=", "self", ".", "_build_query", "(", "*", "*", "query", ")", "self", ".", "_page_offset", "=", "offset", "_params", ".", "update", "(", "offset", "=", "self", ".", "_page_offset", ",", "limit", "=", "min", "(", "self", ".", "_page_size", ",", "self", ".", "_limit", ")", ")", "logger", ".", "debug", "(", "'executing query. from/limit: %6d/%d'", "%", "(", "_params", "[", "'offset'", "]", ",", "_params", "[", "'limit'", "]", ")", ")", "# If the request results in a SolveError (ie bad filter) set the error.", "try", ":", "self", ".", "_response", "=", "self", ".", "_client", ".", "post", "(", "self", ".", "_data_url", ",", "_params", ")", "except", "SolveError", "as", "e", ":", "self", ".", "_error", "=", "e", "raise", "logger", ".", "debug", "(", "'query response took: %(took)d ms, total: %(total)d'", "%", "self", ".", "_response", ")", "return", "_params", ",", "self", ".", "_response" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Query.migrate
Migrate the data from the Query to a target dataset. Valid optional kwargs include: * target_fields * include_errors * validation_params * metadata * commit_mode
solvebio/query.py
def migrate(self, target, follow=True, **kwargs): """ Migrate the data from the Query to a target dataset. Valid optional kwargs include: * target_fields * include_errors * validation_params * metadata * commit_mode """ from solvebio import Dataset from solvebio import DatasetMigration # Target can be provided as a Dataset, or as an ID. if isinstance(target, Dataset): target_id = target.id else: target_id = target # If a limit is set in the Query and not overridden here, use it. limit = kwargs.pop('limit', None) if not limit and self._limit < float('inf'): limit = self._limit # Build the source_params params = self._build_query(limit=limit) params.pop('offset', None) params.pop('ordering', None) migration = DatasetMigration.create( source_id=self._dataset_id, target_id=target_id, source_params=params, client=self._client, **kwargs) if follow: migration.follow() return migration
def migrate(self, target, follow=True, **kwargs): """ Migrate the data from the Query to a target dataset. Valid optional kwargs include: * target_fields * include_errors * validation_params * metadata * commit_mode """ from solvebio import Dataset from solvebio import DatasetMigration # Target can be provided as a Dataset, or as an ID. if isinstance(target, Dataset): target_id = target.id else: target_id = target # If a limit is set in the Query and not overridden here, use it. limit = kwargs.pop('limit', None) if not limit and self._limit < float('inf'): limit = self._limit # Build the source_params params = self._build_query(limit=limit) params.pop('offset', None) params.pop('ordering', None) migration = DatasetMigration.create( source_id=self._dataset_id, target_id=target_id, source_params=params, client=self._client, **kwargs) if follow: migration.follow() return migration
[ "Migrate", "the", "data", "from", "the", "Query", "to", "a", "target", "dataset", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/query.py#L711-L753
[ "def", "migrate", "(", "self", ",", "target", ",", "follow", "=", "True", ",", "*", "*", "kwargs", ")", ":", "from", "solvebio", "import", "Dataset", "from", "solvebio", "import", "DatasetMigration", "# Target can be provided as a Dataset, or as an ID.", "if", "isinstance", "(", "target", ",", "Dataset", ")", ":", "target_id", "=", "target", ".", "id", "else", ":", "target_id", "=", "target", "# If a limit is set in the Query and not overridden here, use it.", "limit", "=", "kwargs", ".", "pop", "(", "'limit'", ",", "None", ")", "if", "not", "limit", "and", "self", ".", "_limit", "<", "float", "(", "'inf'", ")", ":", "limit", "=", "self", ".", "_limit", "# Build the source_params", "params", "=", "self", ".", "_build_query", "(", "limit", "=", "limit", ")", "params", ".", "pop", "(", "'offset'", ",", "None", ")", "params", ".", "pop", "(", "'ordering'", ",", "None", ")", "migration", "=", "DatasetMigration", ".", "create", "(", "source_id", "=", "self", ".", "_dataset_id", ",", "target_id", "=", "target_id", ",", "source_params", "=", "params", ",", "client", "=", "self", ".", "_client", ",", "*", "*", "kwargs", ")", "if", "follow", ":", "migration", ".", "follow", "(", ")", "return", "migration" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
login
Sets up the auth credentials using the provided key/token, or checks the credentials file (if no token provided). Lookup order: 1. access_token 2. api_key 3. local credentials No errors are raised if no key is found.
solvebio/__init__.py
def login(**kwargs): """ Sets up the auth credentials using the provided key/token, or checks the credentials file (if no token provided). Lookup order: 1. access_token 2. api_key 3. local credentials No errors are raised if no key is found. """ from .cli.auth import get_credentials global access_token, api_key, api_host # Clear any existing auth keys access_token, api_key = None, None # Update the host api_host = kwargs.get('api_host') or api_host if kwargs.get('access_token'): access_token = kwargs.get('access_token') elif kwargs.get('api_key'): api_key = kwargs.get('api_key') else: api_key = get_credentials() if not (api_key or access_token): print('No credentials found. Requests to SolveBio may fail.') else: from solvebio.client import client # Update the client host and token client.set_host() client.set_token()
def login(**kwargs): """ Sets up the auth credentials using the provided key/token, or checks the credentials file (if no token provided). Lookup order: 1. access_token 2. api_key 3. local credentials No errors are raised if no key is found. """ from .cli.auth import get_credentials global access_token, api_key, api_host # Clear any existing auth keys access_token, api_key = None, None # Update the host api_host = kwargs.get('api_host') or api_host if kwargs.get('access_token'): access_token = kwargs.get('access_token') elif kwargs.get('api_key'): api_key = kwargs.get('api_key') else: api_key = get_credentials() if not (api_key or access_token): print('No credentials found. Requests to SolveBio may fail.') else: from solvebio.client import client # Update the client host and token client.set_host() client.set_token()
[ "Sets", "up", "the", "auth", "credentials", "using", "the", "provided", "key", "/", "token", "or", "checks", "the", "credentials", "file", "(", "if", "no", "token", "provided", ")", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/__init__.py#L120-L153
[ "def", "login", "(", "*", "*", "kwargs", ")", ":", "from", ".", "cli", ".", "auth", "import", "get_credentials", "global", "access_token", ",", "api_key", ",", "api_host", "# Clear any existing auth keys", "access_token", ",", "api_key", "=", "None", ",", "None", "# Update the host", "api_host", "=", "kwargs", ".", "get", "(", "'api_host'", ")", "or", "api_host", "if", "kwargs", ".", "get", "(", "'access_token'", ")", ":", "access_token", "=", "kwargs", ".", "get", "(", "'access_token'", ")", "elif", "kwargs", ".", "get", "(", "'api_key'", ")", ":", "api_key", "=", "kwargs", ".", "get", "(", "'api_key'", ")", "else", ":", "api_key", "=", "get_credentials", "(", ")", "if", "not", "(", "api_key", "or", "access_token", ")", ":", "print", "(", "'No credentials found. Requests to SolveBio may fail.'", ")", "else", ":", "from", "solvebio", ".", "client", "import", "client", "# Update the client host and token", "client", ".", "set_host", "(", ")", "client", ".", "set_token", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
main
Main entry point for SolveBio CLI
solvebio/cli/main.py
def main(argv=sys.argv[1:]): """ Main entry point for SolveBio CLI """ parser = SolveArgumentParser() args = parser.parse_solvebio_args(argv) if args.api_host: solvebio.api_host = args.api_host if args.api_key: solvebio.api_key = args.api_key if not solvebio.api_key: # If nothing is set (via command line or environment) # look in local credentials try: from .credentials import get_credentials solvebio.api_key = get_credentials() except: pass # Update the client host and token client.set_host() client.set_token() return args.func(args)
def main(argv=sys.argv[1:]): """ Main entry point for SolveBio CLI """ parser = SolveArgumentParser() args = parser.parse_solvebio_args(argv) if args.api_host: solvebio.api_host = args.api_host if args.api_key: solvebio.api_key = args.api_key if not solvebio.api_key: # If nothing is set (via command line or environment) # look in local credentials try: from .credentials import get_credentials solvebio.api_key = get_credentials() except: pass # Update the client host and token client.set_host() client.set_token() return args.func(args)
[ "Main", "entry", "point", "for", "SolveBio", "CLI" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/main.py#L285-L309
[ "def", "main", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", ":", "parser", "=", "SolveArgumentParser", "(", ")", "args", "=", "parser", ".", "parse_solvebio_args", "(", "argv", ")", "if", "args", ".", "api_host", ":", "solvebio", ".", "api_host", "=", "args", ".", "api_host", "if", "args", ".", "api_key", ":", "solvebio", ".", "api_key", "=", "args", ".", "api_key", "if", "not", "solvebio", ".", "api_key", ":", "# If nothing is set (via command line or environment)", "# look in local credentials", "try", ":", "from", ".", "credentials", "import", "get_credentials", "solvebio", ".", "api_key", "=", "get_credentials", "(", ")", "except", ":", "pass", "# Update the client host and token", "client", ".", "set_host", "(", ")", "client", ".", "set_token", "(", ")", "return", "args", ".", "func", "(", "args", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveArgumentParser._add_subcommands
The _add_subcommands method must be separate from the __init__ method, as infinite recursion will occur otherwise, due to the fact that the __init__ method itself will be called when instantiating a subparser, as we do below
solvebio/cli/main.py
def _add_subcommands(self): """ The _add_subcommands method must be separate from the __init__ method, as infinite recursion will occur otherwise, due to the fact that the __init__ method itself will be called when instantiating a subparser, as we do below """ subcmd_params = { 'title': 'SolveBio Commands', 'dest': 'subcommands' } subcmd = self.add_subparsers( **subcmd_params) # pylint: disable=star-args subcommands = copy.deepcopy(self.subcommands) for name, params in subcommands.items(): p = subcmd.add_parser(name, help=params['help']) p.set_defaults(func=params['func']) for arg in params.get('arguments', []): name_or_flags = arg.pop('name', None) or arg.pop('flags', None) p.add_argument(name_or_flags, **arg)
def _add_subcommands(self): """ The _add_subcommands method must be separate from the __init__ method, as infinite recursion will occur otherwise, due to the fact that the __init__ method itself will be called when instantiating a subparser, as we do below """ subcmd_params = { 'title': 'SolveBio Commands', 'dest': 'subcommands' } subcmd = self.add_subparsers( **subcmd_params) # pylint: disable=star-args subcommands = copy.deepcopy(self.subcommands) for name, params in subcommands.items(): p = subcmd.add_parser(name, help=params['help']) p.set_defaults(func=params['func']) for arg in params.get('arguments', []): name_or_flags = arg.pop('name', None) or arg.pop('flags', None) p.add_argument(name_or_flags, **arg)
[ "The", "_add_subcommands", "method", "must", "be", "separate", "from", "the", "__init__", "method", "as", "infinite", "recursion", "will", "occur", "otherwise", "due", "to", "the", "fact", "that", "the", "__init__", "method", "itself", "will", "be", "called", "when", "instantiating", "a", "subparser", "as", "we", "do", "below" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/main.py#L232-L252
[ "def", "_add_subcommands", "(", "self", ")", ":", "subcmd_params", "=", "{", "'title'", ":", "'SolveBio Commands'", ",", "'dest'", ":", "'subcommands'", "}", "subcmd", "=", "self", ".", "add_subparsers", "(", "*", "*", "subcmd_params", ")", "# pylint: disable=star-args", "subcommands", "=", "copy", ".", "deepcopy", "(", "self", ".", "subcommands", ")", "for", "name", ",", "params", "in", "subcommands", ".", "items", "(", ")", ":", "p", "=", "subcmd", ".", "add_parser", "(", "name", ",", "help", "=", "params", "[", "'help'", "]", ")", "p", ".", "set_defaults", "(", "func", "=", "params", "[", "'func'", "]", ")", "for", "arg", "in", "params", ".", "get", "(", "'arguments'", ",", "[", "]", ")", ":", "name_or_flags", "=", "arg", ".", "pop", "(", "'name'", ",", "None", ")", "or", "arg", ".", "pop", "(", "'flags'", ",", "None", ")", "p", ".", "add_argument", "(", "name_or_flags", ",", "*", "*", "arg", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveArgumentParser.parse_solvebio_args
Try to parse the args first, and then add the subparsers. We want to do this so that we can check to see if there are any unknown args. We can assume that if, by this point, there are no unknown args, we can append shell to the unknown args as a default. However, to do this, we have to suppress stdout/stderr during the initial parsing, in case the user calls the help method (in which case we want to add the additional arguments and *then* call the help method. This is a hack to get around the fact that argparse doesn't allow default subcommands.
solvebio/cli/main.py
def parse_solvebio_args(self, args=None, namespace=None): """ Try to parse the args first, and then add the subparsers. We want to do this so that we can check to see if there are any unknown args. We can assume that if, by this point, there are no unknown args, we can append shell to the unknown args as a default. However, to do this, we have to suppress stdout/stderr during the initial parsing, in case the user calls the help method (in which case we want to add the additional arguments and *then* call the help method. This is a hack to get around the fact that argparse doesn't allow default subcommands. """ try: sys.stdout = sys.stderr = open(os.devnull, 'w') _, unknown_args = self.parse_known_args(args, namespace) if not unknown_args: args.insert(0, 'shell') except SystemExit: pass finally: sys.stdout.flush() sys.stderr.flush() sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__ self._add_subcommands() return super(SolveArgumentParser, self).parse_args(args, namespace)
def parse_solvebio_args(self, args=None, namespace=None): """ Try to parse the args first, and then add the subparsers. We want to do this so that we can check to see if there are any unknown args. We can assume that if, by this point, there are no unknown args, we can append shell to the unknown args as a default. However, to do this, we have to suppress stdout/stderr during the initial parsing, in case the user calls the help method (in which case we want to add the additional arguments and *then* call the help method. This is a hack to get around the fact that argparse doesn't allow default subcommands. """ try: sys.stdout = sys.stderr = open(os.devnull, 'w') _, unknown_args = self.parse_known_args(args, namespace) if not unknown_args: args.insert(0, 'shell') except SystemExit: pass finally: sys.stdout.flush() sys.stderr.flush() sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__ self._add_subcommands() return super(SolveArgumentParser, self).parse_args(args, namespace)
[ "Try", "to", "parse", "the", "args", "first", "and", "then", "add", "the", "subparsers", ".", "We", "want", "to", "do", "this", "so", "that", "we", "can", "check", "to", "see", "if", "there", "are", "any", "unknown", "args", ".", "We", "can", "assume", "that", "if", "by", "this", "point", "there", "are", "no", "unknown", "args", "we", "can", "append", "shell", "to", "the", "unknown", "args", "as", "a", "default", ".", "However", "to", "do", "this", "we", "have", "to", "suppress", "stdout", "/", "stderr", "during", "the", "initial", "parsing", "in", "case", "the", "user", "calls", "the", "help", "method", "(", "in", "which", "case", "we", "want", "to", "add", "the", "additional", "arguments", "and", "*", "then", "*", "call", "the", "help", "method", ".", "This", "is", "a", "hack", "to", "get", "around", "the", "fact", "that", "argparse", "doesn", "t", "allow", "default", "subcommands", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/main.py#L254-L278
[ "def", "parse_solvebio_args", "(", "self", ",", "args", "=", "None", ",", "namespace", "=", "None", ")", ":", "try", ":", "sys", ".", "stdout", "=", "sys", ".", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "_", ",", "unknown_args", "=", "self", ".", "parse_known_args", "(", "args", ",", "namespace", ")", "if", "not", "unknown_args", ":", "args", ".", "insert", "(", "0", ",", "'shell'", ")", "except", "SystemExit", ":", "pass", "finally", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "sys", ".", "__stdout__", ",", "sys", ".", "__stderr__", "self", ".", "_add_subcommands", "(", ")", "return", "super", "(", "SolveArgumentParser", ",", "self", ")", ".", "parse_args", "(", "args", ",", "namespace", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
download_vault_folder
Recursively downloads a folder in a vault to a local directory. Only downloads files, not datasets.
examples/download_vault_folder.py
def download_vault_folder(remote_path, local_path, dry_run=False, force=False): """Recursively downloads a folder in a vault to a local directory. Only downloads files, not datasets.""" local_path = os.path.normpath(os.path.expanduser(local_path)) if not os.access(local_path, os.W_OK): raise Exception( 'Write access to local path ({}) is required' .format(local_path)) full_path, path_dict = solvebio.Object.validate_full_path(remote_path) vault = solvebio.Vault.get_by_full_path(path_dict['vault']) print('Downloading all files from {} to {}'.format(full_path, local_path)) if path_dict['path'] == '/': parent_object_id = None else: parent_object = solvebio.Object.get_by_full_path( remote_path, assert_type='folder') parent_object_id = parent_object.id # Scan the folder for all sub-folders and create them locally print('Creating local directory structure at: {}'.format(local_path)) if not os.path.exists(local_path): if not dry_run: os.makedirs(local_path) folders = vault.folders(parent_object_id=parent_object_id) for f in folders: path = os.path.normpath(local_path + f.path) if not os.path.exists(path): print('Creating folder: {}'.format(path)) if not dry_run: os.makedirs(path) files = vault.files(parent_object_id=parent_object_id) for f in files: path = os.path.normpath(local_path + f.path) if os.path.exists(path): if force: # Delete the local copy print('Deleting local file (force download): {}'.format(path)) if not dry_run: os.remove(path) else: print('Skipping file (already exists): {}'.format(path)) continue print('Downloading file: {}'.format(path)) if not dry_run: f.download(path)
def download_vault_folder(remote_path, local_path, dry_run=False, force=False): """Recursively downloads a folder in a vault to a local directory. Only downloads files, not datasets.""" local_path = os.path.normpath(os.path.expanduser(local_path)) if not os.access(local_path, os.W_OK): raise Exception( 'Write access to local path ({}) is required' .format(local_path)) full_path, path_dict = solvebio.Object.validate_full_path(remote_path) vault = solvebio.Vault.get_by_full_path(path_dict['vault']) print('Downloading all files from {} to {}'.format(full_path, local_path)) if path_dict['path'] == '/': parent_object_id = None else: parent_object = solvebio.Object.get_by_full_path( remote_path, assert_type='folder') parent_object_id = parent_object.id # Scan the folder for all sub-folders and create them locally print('Creating local directory structure at: {}'.format(local_path)) if not os.path.exists(local_path): if not dry_run: os.makedirs(local_path) folders = vault.folders(parent_object_id=parent_object_id) for f in folders: path = os.path.normpath(local_path + f.path) if not os.path.exists(path): print('Creating folder: {}'.format(path)) if not dry_run: os.makedirs(path) files = vault.files(parent_object_id=parent_object_id) for f in files: path = os.path.normpath(local_path + f.path) if os.path.exists(path): if force: # Delete the local copy print('Deleting local file (force download): {}'.format(path)) if not dry_run: os.remove(path) else: print('Skipping file (already exists): {}'.format(path)) continue print('Downloading file: {}'.format(path)) if not dry_run: f.download(path)
[ "Recursively", "downloads", "a", "folder", "in", "a", "vault", "to", "a", "local", "directory", ".", "Only", "downloads", "files", "not", "datasets", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/examples/download_vault_folder.py#L6-L56
[ "def", "download_vault_folder", "(", "remote_path", ",", "local_path", ",", "dry_run", "=", "False", ",", "force", "=", "False", ")", ":", "local_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "expanduser", "(", "local_path", ")", ")", "if", "not", "os", ".", "access", "(", "local_path", ",", "os", ".", "W_OK", ")", ":", "raise", "Exception", "(", "'Write access to local path ({}) is required'", ".", "format", "(", "local_path", ")", ")", "full_path", ",", "path_dict", "=", "solvebio", ".", "Object", ".", "validate_full_path", "(", "remote_path", ")", "vault", "=", "solvebio", ".", "Vault", ".", "get_by_full_path", "(", "path_dict", "[", "'vault'", "]", ")", "print", "(", "'Downloading all files from {} to {}'", ".", "format", "(", "full_path", ",", "local_path", ")", ")", "if", "path_dict", "[", "'path'", "]", "==", "'/'", ":", "parent_object_id", "=", "None", "else", ":", "parent_object", "=", "solvebio", ".", "Object", ".", "get_by_full_path", "(", "remote_path", ",", "assert_type", "=", "'folder'", ")", "parent_object_id", "=", "parent_object", ".", "id", "# Scan the folder for all sub-folders and create them locally", "print", "(", "'Creating local directory structure at: {}'", ".", "format", "(", "local_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "local_path", ")", ":", "if", "not", "dry_run", ":", "os", ".", "makedirs", "(", "local_path", ")", "folders", "=", "vault", ".", "folders", "(", "parent_object_id", "=", "parent_object_id", ")", "for", "f", "in", "folders", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "local_path", "+", "f", ".", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "print", "(", "'Creating folder: {}'", ".", "format", "(", "path", ")", ")", "if", "not", "dry_run", ":", "os", ".", "makedirs", "(", "path", ")", "files", "=", "vault", ".", "files", "(", "parent_object_id", "=", "parent_object_id", ")", "for", "f", "in", "files", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "local_path", "+", "f", ".", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "if", "force", ":", "# Delete the local copy", "print", "(", "'Deleting local file (force download): {}'", ".", "format", "(", "path", ")", ")", "if", "not", "dry_run", ":", "os", ".", "remove", "(", "path", ")", "else", ":", "print", "(", "'Skipping file (already exists): {}'", ".", "format", "(", "path", ")", ")", "continue", "print", "(", "'Downloading file: {}'", ".", "format", "(", "path", ")", ")", "if", "not", "dry_run", ":", "f", ".", "download", "(", "path", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveObject.construct_from
Used to create a new object from an HTTP response
solvebio/resource/solveobject.py
def construct_from(cls, values, **kwargs): """Used to create a new object from an HTTP response""" instance = cls(values.get(cls.ID_ATTR), **kwargs) instance.refresh_from(values) return instance
def construct_from(cls, values, **kwargs): """Used to create a new object from an HTTP response""" instance = cls(values.get(cls.ID_ATTR), **kwargs) instance.refresh_from(values) return instance
[ "Used", "to", "create", "a", "new", "object", "from", "an", "HTTP", "response" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/solveobject.py#L68-L72
[ "def", "construct_from", "(", "cls", ",", "values", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "cls", "(", "values", ".", "get", "(", "cls", ".", "ID_ATTR", ")", ",", "*", "*", "kwargs", ")", "instance", ".", "refresh_from", "(", "values", ")", "return", "instance" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveBioAuth.logout
Revoke the token and remove the cookie.
solvebio/contrib/dash/solvebio_auth.py
def logout(self): """Revoke the token and remove the cookie.""" if self._oauth_client_secret: try: oauth_token = flask.request.cookies[self.TOKEN_COOKIE_NAME] # Revoke the token requests.post( urljoin(self._api_host, self.OAUTH2_REVOKE_TOKEN_PATH), data={ 'client_id': self._oauth_client_id, 'client_secret': self._oauth_client_secret, 'token': oauth_token }) except: pass response = flask.redirect('/') self.clear_cookies(response) return response
def logout(self): """Revoke the token and remove the cookie.""" if self._oauth_client_secret: try: oauth_token = flask.request.cookies[self.TOKEN_COOKIE_NAME] # Revoke the token requests.post( urljoin(self._api_host, self.OAUTH2_REVOKE_TOKEN_PATH), data={ 'client_id': self._oauth_client_id, 'client_secret': self._oauth_client_secret, 'token': oauth_token }) except: pass response = flask.redirect('/') self.clear_cookies(response) return response
[ "Revoke", "the", "token", "and", "remove", "the", "cookie", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/contrib/dash/solvebio_auth.py#L185-L203
[ "def", "logout", "(", "self", ")", ":", "if", "self", ".", "_oauth_client_secret", ":", "try", ":", "oauth_token", "=", "flask", ".", "request", ".", "cookies", "[", "self", ".", "TOKEN_COOKIE_NAME", "]", "# Revoke the token", "requests", ".", "post", "(", "urljoin", "(", "self", ".", "_api_host", ",", "self", ".", "OAUTH2_REVOKE_TOKEN_PATH", ")", ",", "data", "=", "{", "'client_id'", ":", "self", ".", "_oauth_client_id", ",", "'client_secret'", ":", "self", ".", "_oauth_client_secret", ",", "'token'", ":", "oauth_token", "}", ")", "except", ":", "pass", "response", "=", "flask", ".", "redirect", "(", "'/'", ")", "self", ".", "clear_cookies", "(", "response", ")", "return", "response" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
launch_ipython_shell
Open the SolveBio shell (IPython wrapper)
solvebio/cli/ipython.py
def launch_ipython_shell(args): # pylint: disable=unused-argument """Open the SolveBio shell (IPython wrapper)""" try: import IPython # noqa except ImportError: _print("The SolveBio Python shell requires IPython.\n" "To install, type: 'pip install ipython'") return False if hasattr(IPython, "version_info"): if IPython.version_info > (5, 0, 0, ''): return launch_ipython_5_shell(args) _print("WARNING: Please upgrade IPython (you are running version: {})" .format(IPython.__version__)) return launch_ipython_legacy_shell(args)
def launch_ipython_shell(args): # pylint: disable=unused-argument """Open the SolveBio shell (IPython wrapper)""" try: import IPython # noqa except ImportError: _print("The SolveBio Python shell requires IPython.\n" "To install, type: 'pip install ipython'") return False if hasattr(IPython, "version_info"): if IPython.version_info > (5, 0, 0, ''): return launch_ipython_5_shell(args) _print("WARNING: Please upgrade IPython (you are running version: {})" .format(IPython.__version__)) return launch_ipython_legacy_shell(args)
[ "Open", "the", "SolveBio", "shell", "(", "IPython", "wrapper", ")" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/ipython.py#L14-L29
[ "def", "launch_ipython_shell", "(", "args", ")", ":", "# pylint: disable=unused-argument", "try", ":", "import", "IPython", "# noqa", "except", "ImportError", ":", "_print", "(", "\"The SolveBio Python shell requires IPython.\\n\"", "\"To install, type: 'pip install ipython'\"", ")", "return", "False", "if", "hasattr", "(", "IPython", ",", "\"version_info\"", ")", ":", "if", "IPython", ".", "version_info", ">", "(", "5", ",", "0", ",", "0", ",", "''", ")", ":", "return", "launch_ipython_5_shell", "(", "args", ")", "_print", "(", "\"WARNING: Please upgrade IPython (you are running version: {})\"", ".", "format", "(", "IPython", ".", "__version__", ")", ")", "return", "launch_ipython_legacy_shell", "(", "args", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
launch_ipython_5_shell
Open the SolveBio shell (IPython wrapper) with IPython 5+
solvebio/cli/ipython.py
def launch_ipython_5_shell(args): """Open the SolveBio shell (IPython wrapper) with IPython 5+""" import IPython # noqa from traitlets.config import Config c = Config() path = os.path.dirname(os.path.abspath(__file__)) try: # see if we're already inside IPython get_ipython # pylint: disable=undefined-variable _print("WARNING: Running IPython within IPython.") except NameError: c.InteractiveShell.banner1 = 'SolveBio Python shell started.\n' c.InteractiveShellApp.exec_files = ['{}/ipython_init.py'.format(path)] IPython.start_ipython(argv=[], config=c)
def launch_ipython_5_shell(args): """Open the SolveBio shell (IPython wrapper) with IPython 5+""" import IPython # noqa from traitlets.config import Config c = Config() path = os.path.dirname(os.path.abspath(__file__)) try: # see if we're already inside IPython get_ipython # pylint: disable=undefined-variable _print("WARNING: Running IPython within IPython.") except NameError: c.InteractiveShell.banner1 = 'SolveBio Python shell started.\n' c.InteractiveShellApp.exec_files = ['{}/ipython_init.py'.format(path)] IPython.start_ipython(argv=[], config=c)
[ "Open", "the", "SolveBio", "shell", "(", "IPython", "wrapper", ")", "with", "IPython", "5", "+" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/ipython.py#L32-L48
[ "def", "launch_ipython_5_shell", "(", "args", ")", ":", "import", "IPython", "# noqa", "from", "traitlets", ".", "config", "import", "Config", "c", "=", "Config", "(", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "try", ":", "# see if we're already inside IPython", "get_ipython", "# pylint: disable=undefined-variable", "_print", "(", "\"WARNING: Running IPython within IPython.\"", ")", "except", "NameError", ":", "c", ".", "InteractiveShell", ".", "banner1", "=", "'SolveBio Python shell started.\\n'", "c", ".", "InteractiveShellApp", ".", "exec_files", "=", "[", "'{}/ipython_init.py'", ".", "format", "(", "path", ")", "]", "IPython", ".", "start_ipython", "(", "argv", "=", "[", "]", ",", "config", "=", "c", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
launch_ipython_legacy_shell
Open the SolveBio shell (IPython wrapper) for older IPython versions
solvebio/cli/ipython.py
def launch_ipython_legacy_shell(args): # pylint: disable=unused-argument """Open the SolveBio shell (IPython wrapper) for older IPython versions""" try: from IPython.config.loader import Config except ImportError: _print("The SolveBio Python shell requires IPython.\n" "To install, type: 'pip install ipython'") return False try: # see if we're already inside IPython get_ipython # pylint: disable=undefined-variable except NameError: cfg = Config() prompt_config = cfg.PromptManager prompt_config.in_template = '[SolveBio] In <\\#>: ' prompt_config.in2_template = ' .\\D.: ' prompt_config.out_template = 'Out<\\#>: ' banner1 = '\nSolveBio Python shell started.' exit_msg = 'Quitting SolveBio shell.' else: _print("Running nested copies of IPython.") cfg = Config() banner1 = exit_msg = '' # First import the embeddable shell class try: from IPython.terminal.embed import InteractiveShellEmbed except ImportError: # pylint: disable=import-error,no-name-in-module from IPython.frontend.terminal.embed import InteractiveShellEmbed path = os.path.dirname(os.path.abspath(__file__)) init_file = '{}/ipython_init.py'.format(path) exec(compile(open(init_file).read(), init_file, 'exec'), globals(), locals()) InteractiveShellEmbed(config=cfg, banner1=banner1, exit_msg=exit_msg)()
def launch_ipython_legacy_shell(args): # pylint: disable=unused-argument """Open the SolveBio shell (IPython wrapper) for older IPython versions""" try: from IPython.config.loader import Config except ImportError: _print("The SolveBio Python shell requires IPython.\n" "To install, type: 'pip install ipython'") return False try: # see if we're already inside IPython get_ipython # pylint: disable=undefined-variable except NameError: cfg = Config() prompt_config = cfg.PromptManager prompt_config.in_template = '[SolveBio] In <\\#>: ' prompt_config.in2_template = ' .\\D.: ' prompt_config.out_template = 'Out<\\#>: ' banner1 = '\nSolveBio Python shell started.' exit_msg = 'Quitting SolveBio shell.' else: _print("Running nested copies of IPython.") cfg = Config() banner1 = exit_msg = '' # First import the embeddable shell class try: from IPython.terminal.embed import InteractiveShellEmbed except ImportError: # pylint: disable=import-error,no-name-in-module from IPython.frontend.terminal.embed import InteractiveShellEmbed path = os.path.dirname(os.path.abspath(__file__)) init_file = '{}/ipython_init.py'.format(path) exec(compile(open(init_file).read(), init_file, 'exec'), globals(), locals()) InteractiveShellEmbed(config=cfg, banner1=banner1, exit_msg=exit_msg)()
[ "Open", "the", "SolveBio", "shell", "(", "IPython", "wrapper", ")", "for", "older", "IPython", "versions" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/ipython.py#L51-L89
[ "def", "launch_ipython_legacy_shell", "(", "args", ")", ":", "# pylint: disable=unused-argument", "try", ":", "from", "IPython", ".", "config", ".", "loader", "import", "Config", "except", "ImportError", ":", "_print", "(", "\"The SolveBio Python shell requires IPython.\\n\"", "\"To install, type: 'pip install ipython'\"", ")", "return", "False", "try", ":", "# see if we're already inside IPython", "get_ipython", "# pylint: disable=undefined-variable", "except", "NameError", ":", "cfg", "=", "Config", "(", ")", "prompt_config", "=", "cfg", ".", "PromptManager", "prompt_config", ".", "in_template", "=", "'[SolveBio] In <\\\\#>: '", "prompt_config", ".", "in2_template", "=", "' .\\\\D.: '", "prompt_config", ".", "out_template", "=", "'Out<\\\\#>: '", "banner1", "=", "'\\nSolveBio Python shell started.'", "exit_msg", "=", "'Quitting SolveBio shell.'", "else", ":", "_print", "(", "\"Running nested copies of IPython.\"", ")", "cfg", "=", "Config", "(", ")", "banner1", "=", "exit_msg", "=", "''", "# First import the embeddable shell class", "try", ":", "from", "IPython", ".", "terminal", ".", "embed", "import", "InteractiveShellEmbed", "except", "ImportError", ":", "# pylint: disable=import-error,no-name-in-module", "from", "IPython", ".", "frontend", ".", "terminal", ".", "embed", "import", "InteractiveShellEmbed", "path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "init_file", "=", "'{}/ipython_init.py'", ".", "format", "(", "path", ")", "exec", "(", "compile", "(", "open", "(", "init_file", ")", ".", "read", "(", ")", ",", "init_file", ",", "'exec'", ")", ",", "globals", "(", ")", ",", "locals", "(", ")", ")", "InteractiveShellEmbed", "(", "config", "=", "cfg", ",", "banner1", "=", "banner1", ",", "exit_msg", "=", "exit_msg", ")", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveClient.get
Issues an HTTP GET across the wire via the Python requests library. See *request()* for information on keyword args.
solvebio/client.py
def get(self, url, params, **kwargs): """Issues an HTTP GET across the wire via the Python requests library. See *request()* for information on keyword args.""" kwargs['params'] = params return self.request('GET', url, **kwargs)
def get(self, url, params, **kwargs): """Issues an HTTP GET across the wire via the Python requests library. See *request()* for information on keyword args.""" kwargs['params'] = params return self.request('GET', url, **kwargs)
[ "Issues", "an", "HTTP", "GET", "across", "the", "wire", "via", "the", "Python", "requests", "library", ".", "See", "*", "request", "()", "*", "for", "information", "on", "keyword", "args", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/client.py#L146-L150
[ "def", "get", "(", "self", ",", "url", ",", "params", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'params'", "]", "=", "params", "return", "self", ".", "request", "(", "'GET'", ",", "url", ",", "*", "*", "kwargs", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveClient.delete
Issues an HTTP DELETE across the wire via the Python requests library. See *request* for information on keyword args.
solvebio/client.py
def delete(self, url, data, **kwargs): """Issues an HTTP DELETE across the wire via the Python requests library. See *request* for information on keyword args.""" kwargs['data'] = data return self.request('DELETE', url, **kwargs)
def delete(self, url, data, **kwargs): """Issues an HTTP DELETE across the wire via the Python requests library. See *request* for information on keyword args.""" kwargs['data'] = data return self.request('DELETE', url, **kwargs)
[ "Issues", "an", "HTTP", "DELETE", "across", "the", "wire", "via", "the", "Python", "requests", "library", ".", "See", "*", "request", "*", "for", "information", "on", "keyword", "args", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/client.py#L158-L162
[ "def", "delete", "(", "self", ",", "url", ",", "data", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'data'", "]", "=", "data", "return", "self", ".", "request", "(", "'DELETE'", ",", "url", ",", "*", "*", "kwargs", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
SolveClient.request
Issues an HTTP Request across the wire via the Python requests library. Parameters ---------- method : str an HTTP method: GET, PUT, POST, DELETE, ... url : str the place to connect to. If the url doesn't start with a protocol (https:// or http://), we'll slap solvebio.api_host in the front. allow_redirects: bool, optional set *False* we won't follow any redirects headers: dict, optional Custom headers can be provided here; generally though this will be set correctly by default dependent on the method type. If the content type is JSON, we'll JSON-encode params. param : dict, optional passed as *params* in the requests.request timeout : int, optional timeout value in seconds for the request raw: bool, optional unless *True* the response encoded to json files: file File content in the form of a file handle which is to be uploaded. Files are passed in POST requests Returns ------- response object. If *raw* is not *True* and repsonse if valid the object will be JSON encoded. Otherwise it will be the request.reposne object.
solvebio/client.py
def request(self, method, url, **kwargs): """ Issues an HTTP Request across the wire via the Python requests library. Parameters ---------- method : str an HTTP method: GET, PUT, POST, DELETE, ... url : str the place to connect to. If the url doesn't start with a protocol (https:// or http://), we'll slap solvebio.api_host in the front. allow_redirects: bool, optional set *False* we won't follow any redirects headers: dict, optional Custom headers can be provided here; generally though this will be set correctly by default dependent on the method type. If the content type is JSON, we'll JSON-encode params. param : dict, optional passed as *params* in the requests.request timeout : int, optional timeout value in seconds for the request raw: bool, optional unless *True* the response encoded to json files: file File content in the form of a file handle which is to be uploaded. Files are passed in POST requests Returns ------- response object. If *raw* is not *True* and repsonse if valid the object will be JSON encoded. Otherwise it will be the request.reposne object. """ opts = { 'allow_redirects': True, 'auth': self._auth, 'data': {}, 'files': None, 'headers': dict(self._headers), 'params': {}, 'timeout': 80, 'verify': True } raw = kwargs.pop('raw', False) debug = kwargs.pop('debug', False) opts.update(kwargs) method = method.upper() if opts['files']: # Don't use application/json for file uploads or GET requests opts['headers'].pop('Content-Type', None) else: opts['data'] = json.dumps(opts['data']) if not url.startswith(self._host): url = urljoin(self._host, url) logger.debug('API %s Request: %s' % (method, url)) if debug: self._log_raw_request(method, url, **opts) try: response = self._session.request(method, url, **opts) except Exception as e: _handle_request_error(e) if 429 == response.status_code: delay = int(response.headers['retry-after']) + 1 logger.warn('Too many requests. Retrying in {0}s.'.format(delay)) time.sleep(delay) return self.request(method, url, **kwargs) if not (200 <= response.status_code < 400): _handle_api_error(response) # 204 is used on deletion. There is no JSON here. if raw or response.status_code in [204, 301, 302]: return response return response.json()
def request(self, method, url, **kwargs): """ Issues an HTTP Request across the wire via the Python requests library. Parameters ---------- method : str an HTTP method: GET, PUT, POST, DELETE, ... url : str the place to connect to. If the url doesn't start with a protocol (https:// or http://), we'll slap solvebio.api_host in the front. allow_redirects: bool, optional set *False* we won't follow any redirects headers: dict, optional Custom headers can be provided here; generally though this will be set correctly by default dependent on the method type. If the content type is JSON, we'll JSON-encode params. param : dict, optional passed as *params* in the requests.request timeout : int, optional timeout value in seconds for the request raw: bool, optional unless *True* the response encoded to json files: file File content in the form of a file handle which is to be uploaded. Files are passed in POST requests Returns ------- response object. If *raw* is not *True* and repsonse if valid the object will be JSON encoded. Otherwise it will be the request.reposne object. """ opts = { 'allow_redirects': True, 'auth': self._auth, 'data': {}, 'files': None, 'headers': dict(self._headers), 'params': {}, 'timeout': 80, 'verify': True } raw = kwargs.pop('raw', False) debug = kwargs.pop('debug', False) opts.update(kwargs) method = method.upper() if opts['files']: # Don't use application/json for file uploads or GET requests opts['headers'].pop('Content-Type', None) else: opts['data'] = json.dumps(opts['data']) if not url.startswith(self._host): url = urljoin(self._host, url) logger.debug('API %s Request: %s' % (method, url)) if debug: self._log_raw_request(method, url, **opts) try: response = self._session.request(method, url, **opts) except Exception as e: _handle_request_error(e) if 429 == response.status_code: delay = int(response.headers['retry-after']) + 1 logger.warn('Too many requests. Retrying in {0}s.'.format(delay)) time.sleep(delay) return self.request(method, url, **kwargs) if not (200 <= response.status_code < 400): _handle_api_error(response) # 204 is used on deletion. There is no JSON here. if raw or response.status_code in [204, 301, 302]: return response return response.json()
[ "Issues", "an", "HTTP", "Request", "across", "the", "wire", "via", "the", "Python", "requests", "library", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/client.py#L164-L258
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "{", "'allow_redirects'", ":", "True", ",", "'auth'", ":", "self", ".", "_auth", ",", "'data'", ":", "{", "}", ",", "'files'", ":", "None", ",", "'headers'", ":", "dict", "(", "self", ".", "_headers", ")", ",", "'params'", ":", "{", "}", ",", "'timeout'", ":", "80", ",", "'verify'", ":", "True", "}", "raw", "=", "kwargs", ".", "pop", "(", "'raw'", ",", "False", ")", "debug", "=", "kwargs", ".", "pop", "(", "'debug'", ",", "False", ")", "opts", ".", "update", "(", "kwargs", ")", "method", "=", "method", ".", "upper", "(", ")", "if", "opts", "[", "'files'", "]", ":", "# Don't use application/json for file uploads or GET requests", "opts", "[", "'headers'", "]", ".", "pop", "(", "'Content-Type'", ",", "None", ")", "else", ":", "opts", "[", "'data'", "]", "=", "json", ".", "dumps", "(", "opts", "[", "'data'", "]", ")", "if", "not", "url", ".", "startswith", "(", "self", ".", "_host", ")", ":", "url", "=", "urljoin", "(", "self", ".", "_host", ",", "url", ")", "logger", ".", "debug", "(", "'API %s Request: %s'", "%", "(", "method", ",", "url", ")", ")", "if", "debug", ":", "self", ".", "_log_raw_request", "(", "method", ",", "url", ",", "*", "*", "opts", ")", "try", ":", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "url", ",", "*", "*", "opts", ")", "except", "Exception", "as", "e", ":", "_handle_request_error", "(", "e", ")", "if", "429", "==", "response", ".", "status_code", ":", "delay", "=", "int", "(", "response", ".", "headers", "[", "'retry-after'", "]", ")", "+", "1", "logger", ".", "warn", "(", "'Too many requests. Retrying in {0}s.'", ".", "format", "(", "delay", ")", ")", "time", ".", "sleep", "(", "delay", ")", "return", "self", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", "if", "not", "(", "200", "<=", "response", ".", "status_code", "<", "400", ")", ":", "_handle_api_error", "(", "response", ")", "# 204 is used on deletion. There is no JSON here.", "if", "raw", "or", "response", ".", "status_code", "in", "[", "204", ",", "301", ",", "302", "]", ":", "return", "response", "return", "response", ".", "json", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Task.child_object
Get Task child object class
solvebio/resource/task.py
def child_object(self): """ Get Task child object class """ from . import types child_klass = types.get(self.task_type.split('.')[1]) return child_klass.retrieve(self.task_id, client=self._client)
def child_object(self): """ Get Task child object class """ from . import types child_klass = types.get(self.task_type.split('.')[1]) return child_klass.retrieve(self.task_id, client=self._client)
[ "Get", "Task", "child", "object", "class" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/task.py#L22-L26
[ "def", "child_object", "(", "self", ")", ":", "from", ".", "import", "types", "child_klass", "=", "types", ".", "get", "(", "self", ".", "task_type", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "return", "child_klass", ".", "retrieve", "(", "self", ".", "task_id", ",", "client", "=", "self", ".", "_client", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Task.cancel
Cancel a task
solvebio/resource/task.py
def cancel(self): """ Cancel a task """ _status = self.status self.status = "canceled" try: self.save() except: # Reset status to what it was before # status update failure self.status = _status raise
def cancel(self): """ Cancel a task """ _status = self.status self.status = "canceled" try: self.save() except: # Reset status to what it was before # status update failure self.status = _status raise
[ "Cancel", "a", "task" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/task.py#L32-L42
[ "def", "cancel", "(", "self", ")", ":", "_status", "=", "self", ".", "status", "self", ".", "status", "=", "\"canceled\"", "try", ":", "self", ".", "save", "(", ")", "except", ":", "# Reset status to what it was before", "# status update failure", "self", ".", "status", "=", "_status", "raise" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
ExpandingVCFParser._parse_info_snpeff
Specialized INFO field parser for SnpEff ANN fields. Requires self._snpeff_ann_fields to be set.
solvebio/contrib/vcf_parser/vcf_parser.py
def _parse_info_snpeff(self, info): """ Specialized INFO field parser for SnpEff ANN fields. Requires self._snpeff_ann_fields to be set. """ ann = info.pop('ANN', []) or [] # Overwrite the existing ANN with something parsed # Split on '|', merge with the ANN keys parsed above. # Ensure empty values are None rather than empty string. items = [] for a in ann: # For multi-allelic records, we may have already # processed ANN. If so, quit now. if isinstance(a, dict): info['ANN'] = ann return info values = [i or None for i in a.split('|')] item = dict(zip(self._snpeff_ann_fields, values)) # Further split the Annotation field by '&' if item.get('Annotation'): item['Annotation'] = item['Annotation'].split('&') items.append(item) info['ANN'] = items return info
def _parse_info_snpeff(self, info): """ Specialized INFO field parser for SnpEff ANN fields. Requires self._snpeff_ann_fields to be set. """ ann = info.pop('ANN', []) or [] # Overwrite the existing ANN with something parsed # Split on '|', merge with the ANN keys parsed above. # Ensure empty values are None rather than empty string. items = [] for a in ann: # For multi-allelic records, we may have already # processed ANN. If so, quit now. if isinstance(a, dict): info['ANN'] = ann return info values = [i or None for i in a.split('|')] item = dict(zip(self._snpeff_ann_fields, values)) # Further split the Annotation field by '&' if item.get('Annotation'): item['Annotation'] = item['Annotation'].split('&') items.append(item) info['ANN'] = items return info
[ "Specialized", "INFO", "field", "parser", "for", "SnpEff", "ANN", "fields", ".", "Requires", "self", ".", "_snpeff_ann_fields", "to", "be", "set", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/contrib/vcf_parser/vcf_parser.py#L140-L167
[ "def", "_parse_info_snpeff", "(", "self", ",", "info", ")", ":", "ann", "=", "info", ".", "pop", "(", "'ANN'", ",", "[", "]", ")", "or", "[", "]", "# Overwrite the existing ANN with something parsed", "# Split on '|', merge with the ANN keys parsed above.", "# Ensure empty values are None rather than empty string.", "items", "=", "[", "]", "for", "a", "in", "ann", ":", "# For multi-allelic records, we may have already", "# processed ANN. If so, quit now.", "if", "isinstance", "(", "a", ",", "dict", ")", ":", "info", "[", "'ANN'", "]", "=", "ann", "return", "info", "values", "=", "[", "i", "or", "None", "for", "i", "in", "a", ".", "split", "(", "'|'", ")", "]", "item", "=", "dict", "(", "zip", "(", "self", ".", "_snpeff_ann_fields", ",", "values", ")", ")", "# Further split the Annotation field by '&'", "if", "item", ".", "get", "(", "'Annotation'", ")", ":", "item", "[", "'Annotation'", "]", "=", "item", "[", "'Annotation'", "]", ".", "split", "(", "'&'", ")", "items", ".", "append", "(", "item", ")", "info", "[", "'ANN'", "]", "=", "items", "return", "info" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
ExpandingVCFParser.next
Expands multiple alleles into one record each using an internal buffer (_next).
solvebio/contrib/vcf_parser/vcf_parser.py
def next(self): """ Expands multiple alleles into one record each using an internal buffer (_next). """ def _alt(alt): """Parses the VCF row ALT object.""" # If alt is '.' in VCF, PyVCF returns None, convert back to '.' if not alt: return '.' else: return str(alt) if not self._next: row = next(self.reader) alternate_alleles = list(map(_alt, row.ALT)) for allele in alternate_alleles: self._next.append( self.row_to_dict( row, allele=allele, alternate_alleles=alternate_alleles)) # Source line number, only increment if reading a new row. self._line_number += 1 return self._next.pop()
def next(self): """ Expands multiple alleles into one record each using an internal buffer (_next). """ def _alt(alt): """Parses the VCF row ALT object.""" # If alt is '.' in VCF, PyVCF returns None, convert back to '.' if not alt: return '.' else: return str(alt) if not self._next: row = next(self.reader) alternate_alleles = list(map(_alt, row.ALT)) for allele in alternate_alleles: self._next.append( self.row_to_dict( row, allele=allele, alternate_alleles=alternate_alleles)) # Source line number, only increment if reading a new row. self._line_number += 1 return self._next.pop()
[ "Expands", "multiple", "alleles", "into", "one", "record", "each", "using", "an", "internal", "buffer", "(", "_next", ")", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/contrib/vcf_parser/vcf_parser.py#L190-L218
[ "def", "next", "(", "self", ")", ":", "def", "_alt", "(", "alt", ")", ":", "\"\"\"Parses the VCF row ALT object.\"\"\"", "# If alt is '.' in VCF, PyVCF returns None, convert back to '.'", "if", "not", "alt", ":", "return", "'.'", "else", ":", "return", "str", "(", "alt", ")", "if", "not", "self", ".", "_next", ":", "row", "=", "next", "(", "self", ".", "reader", ")", "alternate_alleles", "=", "list", "(", "map", "(", "_alt", ",", "row", ".", "ALT", ")", ")", "for", "allele", "in", "alternate_alleles", ":", "self", ".", "_next", ".", "append", "(", "self", ".", "row_to_dict", "(", "row", ",", "allele", "=", "allele", ",", "alternate_alleles", "=", "alternate_alleles", ")", ")", "# Source line number, only increment if reading a new row.", "self", ".", "_line_number", "+=", "1", "return", "self", ".", "_next", ".", "pop", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
ExpandingVCFParser.row_to_dict
Return a parsed dictionary for JSON.
solvebio/contrib/vcf_parser/vcf_parser.py
def row_to_dict(self, row, allele, alternate_alleles): """Return a parsed dictionary for JSON.""" def _variant_sbid(**kwargs): """Generates a SolveBio variant ID (SBID).""" return '{build}-{chromosome}-{start}-{stop}-{allele}'\ .format(**kwargs).upper() if allele == '.': # Try to use the ref, if '.' is supplied for alt. allele = row.REF or allele genomic_coordinates = { 'build': self.genome_build, 'chromosome': row.CHROM, 'start': row.POS, 'stop': row.POS + len(row.REF) - 1 } # SolveBio standard variant format variant_sbid = _variant_sbid(allele=allele, **genomic_coordinates) return { 'genomic_coordinates': genomic_coordinates, 'variant': variant_sbid, 'allele': allele, 'row_id': row.ID, 'reference_allele': row.REF, 'alternate_alleles': alternate_alleles, 'info': self._parse_info(row.INFO), 'qual': row.QUAL, 'filter': row.FILTER }
def row_to_dict(self, row, allele, alternate_alleles): """Return a parsed dictionary for JSON.""" def _variant_sbid(**kwargs): """Generates a SolveBio variant ID (SBID).""" return '{build}-{chromosome}-{start}-{stop}-{allele}'\ .format(**kwargs).upper() if allele == '.': # Try to use the ref, if '.' is supplied for alt. allele = row.REF or allele genomic_coordinates = { 'build': self.genome_build, 'chromosome': row.CHROM, 'start': row.POS, 'stop': row.POS + len(row.REF) - 1 } # SolveBio standard variant format variant_sbid = _variant_sbid(allele=allele, **genomic_coordinates) return { 'genomic_coordinates': genomic_coordinates, 'variant': variant_sbid, 'allele': allele, 'row_id': row.ID, 'reference_allele': row.REF, 'alternate_alleles': alternate_alleles, 'info': self._parse_info(row.INFO), 'qual': row.QUAL, 'filter': row.FILTER }
[ "Return", "a", "parsed", "dictionary", "for", "JSON", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/contrib/vcf_parser/vcf_parser.py#L220-L253
[ "def", "row_to_dict", "(", "self", ",", "row", ",", "allele", ",", "alternate_alleles", ")", ":", "def", "_variant_sbid", "(", "*", "*", "kwargs", ")", ":", "\"\"\"Generates a SolveBio variant ID (SBID).\"\"\"", "return", "'{build}-{chromosome}-{start}-{stop}-{allele}'", ".", "format", "(", "*", "*", "kwargs", ")", ".", "upper", "(", ")", "if", "allele", "==", "'.'", ":", "# Try to use the ref, if '.' is supplied for alt.", "allele", "=", "row", ".", "REF", "or", "allele", "genomic_coordinates", "=", "{", "'build'", ":", "self", ".", "genome_build", ",", "'chromosome'", ":", "row", ".", "CHROM", ",", "'start'", ":", "row", ".", "POS", ",", "'stop'", ":", "row", ".", "POS", "+", "len", "(", "row", ".", "REF", ")", "-", "1", "}", "# SolveBio standard variant format", "variant_sbid", "=", "_variant_sbid", "(", "allele", "=", "allele", ",", "*", "*", "genomic_coordinates", ")", "return", "{", "'genomic_coordinates'", ":", "genomic_coordinates", ",", "'variant'", ":", "variant_sbid", ",", "'allele'", ":", "allele", ",", "'row_id'", ":", "row", ".", "ID", ",", "'reference_allele'", ":", "row", ".", "REF", ",", "'alternate_alleles'", ":", "alternate_alleles", ",", "'info'", ":", "self", ".", "_parse_info", "(", "row", ".", "INFO", ")", ",", "'qual'", ":", "row", ".", "QUAL", ",", "'filter'", ":", "row", ".", "FILTER", "}" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
get_credentials
Returns the user's stored API key if a valid credentials file is found. Raises CredentialsError if no valid credentials file is found.
solvebio/cli/credentials.py
def get_credentials(): """ Returns the user's stored API key if a valid credentials file is found. Raises CredentialsError if no valid credentials file is found. """ try: netrc_path = netrc.path() auths = netrc(netrc_path).authenticators( urlparse(solvebio.api_host).netloc) except (IOError, TypeError, NetrcParseError) as e: raise CredentialsError( 'Could not open credentials file: ' + str(e)) if auths: # auths = (login, account, password) return auths[2] else: return None
def get_credentials(): """ Returns the user's stored API key if a valid credentials file is found. Raises CredentialsError if no valid credentials file is found. """ try: netrc_path = netrc.path() auths = netrc(netrc_path).authenticators( urlparse(solvebio.api_host).netloc) except (IOError, TypeError, NetrcParseError) as e: raise CredentialsError( 'Could not open credentials file: ' + str(e)) if auths: # auths = (login, account, password) return auths[2] else: return None
[ "Returns", "the", "user", "s", "stored", "API", "key", "if", "a", "valid", "credentials", "file", "is", "found", ".", "Raises", "CredentialsError", "if", "no", "valid", "credentials", "file", "is", "found", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/credentials.py#L76-L93
[ "def", "get_credentials", "(", ")", ":", "try", ":", "netrc_path", "=", "netrc", ".", "path", "(", ")", "auths", "=", "netrc", "(", "netrc_path", ")", ".", "authenticators", "(", "urlparse", "(", "solvebio", ".", "api_host", ")", ".", "netloc", ")", "except", "(", "IOError", ",", "TypeError", ",", "NetrcParseError", ")", "as", "e", ":", "raise", "CredentialsError", "(", "'Could not open credentials file: '", "+", "str", "(", "e", ")", ")", "if", "auths", ":", "# auths = (login, account, password)", "return", "auths", "[", "2", "]", "else", ":", "return", "None" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
netrc.save
Dump the class data in the format of a .netrc file.
solvebio/cli/credentials.py
def save(self, path): """Dump the class data in the format of a .netrc file.""" rep = "" for host in self.hosts.keys(): attrs = self.hosts[host] rep = rep + "machine " + host + "\n\tlogin " \ + six.text_type(attrs[0]) + "\n" if attrs[1]: rep = rep + "account " + six.text_type(attrs[1]) rep = rep + "\tpassword " + six.text_type(attrs[2]) + "\n" for macro in self.macros.keys(): rep = rep + "macdef " + macro + "\n" for line in self.macros[macro]: rep = rep + line rep = rep + "\n" f = open(path, 'w') f.write(rep) f.close()
def save(self, path): """Dump the class data in the format of a .netrc file.""" rep = "" for host in self.hosts.keys(): attrs = self.hosts[host] rep = rep + "machine " + host + "\n\tlogin " \ + six.text_type(attrs[0]) + "\n" if attrs[1]: rep = rep + "account " + six.text_type(attrs[1]) rep = rep + "\tpassword " + six.text_type(attrs[2]) + "\n" for macro in self.macros.keys(): rep = rep + "macdef " + macro + "\n" for line in self.macros[macro]: rep = rep + line rep = rep + "\n" f = open(path, 'w') f.write(rep) f.close()
[ "Dump", "the", "class", "data", "in", "the", "format", "of", "a", ".", "netrc", "file", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/credentials.py#L48-L66
[ "def", "save", "(", "self", ",", "path", ")", ":", "rep", "=", "\"\"", "for", "host", "in", "self", ".", "hosts", ".", "keys", "(", ")", ":", "attrs", "=", "self", ".", "hosts", "[", "host", "]", "rep", "=", "rep", "+", "\"machine \"", "+", "host", "+", "\"\\n\\tlogin \"", "+", "six", ".", "text_type", "(", "attrs", "[", "0", "]", ")", "+", "\"\\n\"", "if", "attrs", "[", "1", "]", ":", "rep", "=", "rep", "+", "\"account \"", "+", "six", ".", "text_type", "(", "attrs", "[", "1", "]", ")", "rep", "=", "rep", "+", "\"\\tpassword \"", "+", "six", ".", "text_type", "(", "attrs", "[", "2", "]", ")", "+", "\"\\n\"", "for", "macro", "in", "self", ".", "macros", ".", "keys", "(", ")", ":", "rep", "=", "rep", "+", "\"macdef \"", "+", "macro", "+", "\"\\n\"", "for", "line", "in", "self", ".", "macros", "[", "macro", "]", ":", "rep", "=", "rep", "+", "line", "rep", "=", "rep", "+", "\"\\n\"", "f", "=", "open", "(", "path", ",", "'w'", ")", "f", ".", "write", "(", "rep", ")", "f", ".", "close", "(", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_isint
>>> _isint("123") True >>> _isint("123.45") False
solvebio/utils/tabulate.py
def _isint(string): """ >>> _isint("123") True >>> _isint("123.45") False """ return type(string) is int or \ (isinstance(string, _binary_type) or isinstance(string, string_types)) and \ _isconvertible(int, string)
def _isint(string): """ >>> _isint("123") True >>> _isint("123.45") False """ return type(string) is int or \ (isinstance(string, _binary_type) or isinstance(string, string_types)) and \ _isconvertible(int, string)
[ ">>>", "_isint", "(", "123", ")", "True", ">>>", "_isint", "(", "123", ".", "45", ")", "False" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L160-L170
[ "def", "_isint", "(", "string", ")", ":", "return", "type", "(", "string", ")", "is", "int", "or", "(", "isinstance", "(", "string", ",", "_binary_type", ")", "or", "isinstance", "(", "string", ",", "string_types", ")", ")", "and", "_isconvertible", "(", "int", ",", "string", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_align_column
[string] -> [padded_string] >>> list(map(str,_align_column( \ ["12.345", "-1234.5", "1.23", "1234.5", \ "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', \ ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
solvebio/utils/tabulate.py
def _align_column(strings, alignment, minwidth=0, has_invisible=True): """ [string] -> [padded_string] >>> list(map(str,_align_column( \ ["12.345", "-1234.5", "1.23", "1234.5", \ "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', \ ' 1234.5 ', ' 1e+234 ', ' 1.0e234'] """ if alignment == "right": strings = [s.strip() for s in strings] padfn = _padleft elif alignment in "center": strings = [s.strip() for s in strings] padfn = _padboth elif alignment in "decimal": decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft else: strings = [s.strip() for s in strings] padfn = _padright if has_invisible: width_fn = _visible_width else: width_fn = len maxwidth = max(max(list(map(width_fn, strings))), minwidth) padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings] return padded_strings
def _align_column(strings, alignment, minwidth=0, has_invisible=True): """ [string] -> [padded_string] >>> list(map(str,_align_column( \ ["12.345", "-1234.5", "1.23", "1234.5", \ "1e+234", "1.0e234"], "decimal"))) [' 12.345 ', '-1234.5 ', ' 1.23 ', \ ' 1234.5 ', ' 1e+234 ', ' 1.0e234'] """ if alignment == "right": strings = [s.strip() for s in strings] padfn = _padleft elif alignment in "center": strings = [s.strip() for s in strings] padfn = _padboth elif alignment in "decimal": decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft else: strings = [s.strip() for s in strings] padfn = _padright if has_invisible: width_fn = _visible_width else: width_fn = len maxwidth = max(max(list(map(width_fn, strings))), minwidth) padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings] return padded_strings
[ "[", "string", "]", "-", ">", "[", "padded_string", "]" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L298-L332
[ "def", "_align_column", "(", "strings", ",", "alignment", ",", "minwidth", "=", "0", ",", "has_invisible", "=", "True", ")", ":", "if", "alignment", "==", "\"right\"", ":", "strings", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "strings", "]", "padfn", "=", "_padleft", "elif", "alignment", "in", "\"center\"", ":", "strings", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "strings", "]", "padfn", "=", "_padboth", "elif", "alignment", "in", "\"decimal\"", ":", "decimals", "=", "[", "_afterpoint", "(", "s", ")", "for", "s", "in", "strings", "]", "maxdecimals", "=", "max", "(", "decimals", ")", "strings", "=", "[", "s", "+", "(", "maxdecimals", "-", "decs", ")", "*", "\" \"", "for", "s", ",", "decs", "in", "zip", "(", "strings", ",", "decimals", ")", "]", "padfn", "=", "_padleft", "else", ":", "strings", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "strings", "]", "padfn", "=", "_padright", "if", "has_invisible", ":", "width_fn", "=", "_visible_width", "else", ":", "width_fn", "=", "len", "maxwidth", "=", "max", "(", "max", "(", "list", "(", "map", "(", "width_fn", ",", "strings", ")", ")", ")", ",", "minwidth", ")", "padded_strings", "=", "[", "padfn", "(", "maxwidth", ",", "s", ",", "has_invisible", ")", "for", "s", "in", "strings", "]", "return", "padded_strings" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_format
Format a value accoding to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', \ '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \ \\u0446\\u0438\\u0444\\u0440\\u0430\\n-------\ -------\\n\\u0430\\u0437 \ 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True
solvebio/utils/tabulate.py
def _format(val, valtype, floatfmt, missingval=""): """ Format a value accoding to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', \ '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \ \\u0446\\u0438\\u0444\\u0440\\u0430\\n-------\ -------\\n\\u0430\\u0437 \ 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ if val is None: return missingval if valtype in [int, _binary_type, _text_type]: return "{0}".format(val) elif valtype is float: return format(float(val), floatfmt) else: return "{0}".format(val)
def _format(val, valtype, floatfmt, missingval=""): """ Format a value accoding to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', \ '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \ \\u0446\\u0438\\u0444\\u0440\\u0430\\n-------\ -------\\n\\u0430\\u0437 \ 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ if val is None: return missingval if valtype in [int, _binary_type, _text_type]: return "{0}".format(val) elif valtype is float: return format(float(val), floatfmt) else: return "{0}".format(val)
[ "Format", "a", "value", "accoding", "to", "its", "type", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L364-L389
[ "def", "_format", "(", "val", ",", "valtype", ",", "floatfmt", ",", "missingval", "=", "\"\"", ")", ":", "if", "val", "is", "None", ":", "return", "missingval", "if", "valtype", "in", "[", "int", ",", "_binary_type", ",", "_text_type", "]", ":", "return", "\"{0}\"", ".", "format", "(", "val", ")", "elif", "valtype", "is", "float", ":", "return", "format", "(", "float", "(", "val", ")", ",", "floatfmt", ")", "else", ":", "return", "\"{0}\"", ".", "format", "(", "val", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_normalize_tabular_data
Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * 2D NumPy arrays * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys".
solvebio/utils/tabulate.py
def _normalize_tabular_data(tabular_data, headers, sort=True): """ Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * 2D NumPy arrays * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = list(tabular_data.keys()) # columns have to be transposed rows = list(izip_longest(*list(tabular_data.values()))) elif hasattr(tabular_data, "index"): # values is a property, has .index then # it's likely a pandas.DataFrame (pandas 0.11.0) keys = list(tabular_data.keys()) # values matrix doesn't need to be transposed vals = tabular_data.values names = tabular_data.index rows = [[v] + list(row) for v, row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict " "or a DataFrame") if headers == "keys": headers = list(map(_text_type, keys)) # headers should be strings else: # it's, as usual, an iterable of iterables, or a NumPy array rows = list(tabular_data) if headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, list(range(len(rows[0]))))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(headers) rows = list(map(list, rows)) if sort and len(rows) > 1: rows = sorted(rows, key=lambda x: x[0]) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""] * (ncols - nhs) + headers return rows, headers
def _normalize_tabular_data(tabular_data, headers, sort=True): """ Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * 2D NumPy arrays * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = list(tabular_data.keys()) # columns have to be transposed rows = list(izip_longest(*list(tabular_data.values()))) elif hasattr(tabular_data, "index"): # values is a property, has .index then # it's likely a pandas.DataFrame (pandas 0.11.0) keys = list(tabular_data.keys()) # values matrix doesn't need to be transposed vals = tabular_data.values names = tabular_data.index rows = [[v] + list(row) for v, row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict " "or a DataFrame") if headers == "keys": headers = list(map(_text_type, keys)) # headers should be strings else: # it's, as usual, an iterable of iterables, or a NumPy array rows = list(tabular_data) if headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, list(range(len(rows[0]))))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(headers) rows = list(map(list, rows)) if sort and len(rows) > 1: rows = sorted(rows, key=lambda x: x[0]) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""] * (ncols - nhs) + headers return rows, headers
[ "Transform", "a", "supported", "data", "type", "to", "a", "list", "of", "lists", "and", "a", "list", "of", "headers", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L401-L467
[ "def", "_normalize_tabular_data", "(", "tabular_data", ",", "headers", ",", "sort", "=", "True", ")", ":", "if", "hasattr", "(", "tabular_data", ",", "\"keys\"", ")", "and", "hasattr", "(", "tabular_data", ",", "\"values\"", ")", ":", "# dict-like and pandas.DataFrame?", "if", "hasattr", "(", "tabular_data", ".", "values", ",", "\"__call__\"", ")", ":", "# likely a conventional dict", "keys", "=", "list", "(", "tabular_data", ".", "keys", "(", ")", ")", "# columns have to be transposed", "rows", "=", "list", "(", "izip_longest", "(", "*", "list", "(", "tabular_data", ".", "values", "(", ")", ")", ")", ")", "elif", "hasattr", "(", "tabular_data", ",", "\"index\"", ")", ":", "# values is a property, has .index then", "# it's likely a pandas.DataFrame (pandas 0.11.0)", "keys", "=", "list", "(", "tabular_data", ".", "keys", "(", ")", ")", "# values matrix doesn't need to be transposed", "vals", "=", "tabular_data", ".", "values", "names", "=", "tabular_data", ".", "index", "rows", "=", "[", "[", "v", "]", "+", "list", "(", "row", ")", "for", "v", ",", "row", "in", "zip", "(", "names", ",", "vals", ")", "]", "else", ":", "raise", "ValueError", "(", "\"tabular data doesn't appear to be a dict \"", "\"or a DataFrame\"", ")", "if", "headers", "==", "\"keys\"", ":", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "keys", ")", ")", "# headers should be strings", "else", ":", "# it's, as usual, an iterable of iterables, or a NumPy array", "rows", "=", "list", "(", "tabular_data", ")", "if", "headers", "==", "\"keys\"", "and", "len", "(", "rows", ")", ">", "0", ":", "# keys are column indices", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "list", "(", "range", "(", "len", "(", "rows", "[", "0", "]", ")", ")", ")", ")", ")", "# take headers from the first row if necessary", "if", "headers", "==", "\"firstrow\"", "and", "len", "(", "rows", ")", ">", "0", ":", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "rows", "[", "0", "]", ")", ")", "# headers should be strings", "rows", "=", "rows", "[", "1", ":", "]", "headers", "=", "list", "(", "headers", ")", "rows", "=", "list", "(", "map", "(", "list", ",", "rows", ")", ")", "if", "sort", "and", "len", "(", "rows", ")", ">", "1", ":", "rows", "=", "sorted", "(", "rows", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "# pad with empty headers for initial columns if necessary", "if", "headers", "and", "len", "(", "rows", ")", ">", "0", ":", "nhs", "=", "len", "(", "headers", ")", "ncols", "=", "len", "(", "rows", "[", "0", "]", ")", "if", "nhs", "<", "ncols", ":", "headers", "=", "[", "\"\"", "]", "*", "(", "ncols", "-", "nhs", ")", "+", "headers", "return", "rows", ",", "headers" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_build_row
Return a string which represents a row of data cells.
solvebio/utils/tabulate.py
def _build_row(cells, padding, begin, sep, end): "Return a string which represents a row of data cells." pad = " " * padding padded_cells = [pad + cell + pad for cell in cells] # SolveBio: we're only displaying Key-Value tuples (dimension of 2). # enforce that we don't wrap lines by setting a max # limit on row width which is equal to TTY_COLS (see printing) rendered_cells = (begin + sep.join(padded_cells) + end).rstrip() if len(rendered_cells) > TTY_COLS: if not cells[-1].endswith(" ") and not cells[-1].endswith("-"): terminating_str = " ... " else: terminating_str = "" rendered_cells = "{0}{1}{2}".format( rendered_cells[:TTY_COLS - len(terminating_str) - 1], terminating_str, end) return rendered_cells
def _build_row(cells, padding, begin, sep, end): "Return a string which represents a row of data cells." pad = " " * padding padded_cells = [pad + cell + pad for cell in cells] # SolveBio: we're only displaying Key-Value tuples (dimension of 2). # enforce that we don't wrap lines by setting a max # limit on row width which is equal to TTY_COLS (see printing) rendered_cells = (begin + sep.join(padded_cells) + end).rstrip() if len(rendered_cells) > TTY_COLS: if not cells[-1].endswith(" ") and not cells[-1].endswith("-"): terminating_str = " ... " else: terminating_str = "" rendered_cells = "{0}{1}{2}".format( rendered_cells[:TTY_COLS - len(terminating_str) - 1], terminating_str, end) return rendered_cells
[ "Return", "a", "string", "which", "represents", "a", "row", "of", "data", "cells", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L470-L489
[ "def", "_build_row", "(", "cells", ",", "padding", ",", "begin", ",", "sep", ",", "end", ")", ":", "pad", "=", "\" \"", "*", "padding", "padded_cells", "=", "[", "pad", "+", "cell", "+", "pad", "for", "cell", "in", "cells", "]", "# SolveBio: we're only displaying Key-Value tuples (dimension of 2).", "# enforce that we don't wrap lines by setting a max", "# limit on row width which is equal to TTY_COLS (see printing)", "rendered_cells", "=", "(", "begin", "+", "sep", ".", "join", "(", "padded_cells", ")", "+", "end", ")", ".", "rstrip", "(", ")", "if", "len", "(", "rendered_cells", ")", ">", "TTY_COLS", ":", "if", "not", "cells", "[", "-", "1", "]", ".", "endswith", "(", "\" \"", ")", "and", "not", "cells", "[", "-", "1", "]", ".", "endswith", "(", "\"-\"", ")", ":", "terminating_str", "=", "\" ... \"", "else", ":", "terminating_str", "=", "\"\"", "rendered_cells", "=", "\"{0}{1}{2}\"", ".", "format", "(", "rendered_cells", "[", ":", "TTY_COLS", "-", "len", "(", "terminating_str", ")", "-", "1", "]", ",", "terminating_str", ",", "end", ")", "return", "rendered_cells" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_build_line
Return a string which represents a horizontal line.
solvebio/utils/tabulate.py
def _build_line(colwidths, padding, begin, fill, sep, end): "Return a string which represents a horizontal line." cells = [fill * (w + 2 * padding) for w in colwidths] return _build_row(cells, 0, begin, sep, end)
def _build_line(colwidths, padding, begin, fill, sep, end): "Return a string which represents a horizontal line." cells = [fill * (w + 2 * padding) for w in colwidths] return _build_row(cells, 0, begin, sep, end)
[ "Return", "a", "string", "which", "represents", "a", "horizontal", "line", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L492-L495
[ "def", "_build_line", "(", "colwidths", ",", "padding", ",", "begin", ",", "fill", ",", "sep", ",", "end", ")", ":", "cells", "=", "[", "fill", "*", "(", "w", "+", "2", "*", "padding", ")", "for", "w", "in", "colwidths", "]", "return", "_build_row", "(", "cells", ",", "0", ",", "begin", ",", "sep", ",", "end", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_mediawiki_cell_attrs
Prefix every cell in a row with an HTML alignment attribute.
solvebio/utils/tabulate.py
def _mediawiki_cell_attrs(row, colaligns): "Prefix every cell in a row with an HTML alignment attribute." alignment = {"left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| '} row2 = [alignment[a] + c for c, a in zip(row, colaligns)] return row2
def _mediawiki_cell_attrs(row, colaligns): "Prefix every cell in a row with an HTML alignment attribute." alignment = {"left": '', "right": 'align="right"| ', "center": 'align="center"| ', "decimal": 'align="right"| '} row2 = [alignment[a] + c for c, a in zip(row, colaligns)] return row2
[ "Prefix", "every", "cell", "in", "a", "row", "with", "an", "HTML", "alignment", "attribute", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L498-L505
[ "def", "_mediawiki_cell_attrs", "(", "row", ",", "colaligns", ")", ":", "alignment", "=", "{", "\"left\"", ":", "''", ",", "\"right\"", ":", "'align=\"right\"| '", ",", "\"center\"", ":", "'align=\"center\"| '", ",", "\"decimal\"", ":", "'align=\"right\"| '", "}", "row2", "=", "[", "alignment", "[", "a", "]", "+", "c", "for", "c", ",", "a", "in", "zip", "(", "row", ",", "colaligns", ")", "]", "return", "row2" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_line_segment_with_colons
Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).
solvebio/utils/tabulate.py
def _line_segment_with_colons(linefmt, align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" fill = linefmt.hline w = colwidth if align in ["right", "decimal"]: return (fill[0] * (w - 1)) + ":" elif align == "center": return ":" + (fill[0] * (w - 2)) + ":" elif align == "left": return ":" + (fill[0] * (w - 1)) else: return fill[0] * w
def _line_segment_with_colons(linefmt, align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" fill = linefmt.hline w = colwidth if align in ["right", "decimal"]: return (fill[0] * (w - 1)) + ":" elif align == "center": return ":" + (fill[0] * (w - 2)) + ":" elif align == "left": return ":" + (fill[0] * (w - 1)) else: return fill[0] * w
[ "Return", "a", "segment", "of", "a", "horizontal", "line", "with", "optional", "colons", "which", "indicate", "column", "s", "alignment", "(", "as", "in", "pipe", "output", "format", ")", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L508-L520
[ "def", "_line_segment_with_colons", "(", "linefmt", ",", "align", ",", "colwidth", ")", ":", "fill", "=", "linefmt", ".", "hline", "w", "=", "colwidth", "if", "align", "in", "[", "\"right\"", ",", "\"decimal\"", "]", ":", "return", "(", "fill", "[", "0", "]", "*", "(", "w", "-", "1", ")", ")", "+", "\":\"", "elif", "align", "==", "\"center\"", ":", "return", "\":\"", "+", "(", "fill", "[", "0", "]", "*", "(", "w", "-", "2", ")", ")", "+", "\":\"", "elif", "align", "==", "\"left\"", ":", "return", "\":\"", "+", "(", "fill", "[", "0", "]", "*", "(", "w", "-", "1", ")", ")", "else", ":", "return", "fill", "[", "0", "]", "*", "w" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
_format_table
Produce a plain-text representation of the table.
solvebio/utils/tabulate.py
def _format_table(fmt, headers, rows, colwidths, colaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if headers else fmt.without_header_hide pad = fmt.padding headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow if fmt.lineabove and "lineabove" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.lineabove)) if headers: lines.append(_build_row(headers, pad, *headerrow)) if fmt.linebelowheader and "linebelowheader" not in hidden: begin, fill, sep, end = fmt.linebelowheader if fmt.usecolons: segs = [ _line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad) for w, a in zip(colwidths, colaligns)] lines.append(_build_row(segs, 0, begin, sep, end)) else: lines.append(_build_line(colwidths, pad, *fmt.linebelowheader)) if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in rows[:-1]: lines.append(_build_row(row, pad, *fmt.datarow)) lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows)) # the last row without a line below lines.append(_build_row(rows[-1], pad, *fmt.datarow)) else: for row in rows: lines.append(_build_row(row, pad, *fmt.datarow)) if fmt.linebelow and "linebelow" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.linebelow)) return "\n".join(lines)
def _format_table(fmt, headers, rows, colwidths, colaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if headers else fmt.without_header_hide pad = fmt.padding headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow if fmt.lineabove and "lineabove" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.lineabove)) if headers: lines.append(_build_row(headers, pad, *headerrow)) if fmt.linebelowheader and "linebelowheader" not in hidden: begin, fill, sep, end = fmt.linebelowheader if fmt.usecolons: segs = [ _line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad) for w, a in zip(colwidths, colaligns)] lines.append(_build_row(segs, 0, begin, sep, end)) else: lines.append(_build_line(colwidths, pad, *fmt.linebelowheader)) if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in rows[:-1]: lines.append(_build_row(row, pad, *fmt.datarow)) lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows)) # the last row without a line below lines.append(_build_row(rows[-1], pad, *fmt.datarow)) else: for row in rows: lines.append(_build_row(row, pad, *fmt.datarow)) if fmt.linebelow and "linebelow" not in hidden: lines.append(_build_line(colwidths, pad, *fmt.linebelow)) return "\n".join(lines)
[ "Produce", "a", "plain", "-", "text", "representation", "of", "the", "table", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L523-L560
[ "def", "_format_table", "(", "fmt", ",", "headers", ",", "rows", ",", "colwidths", ",", "colaligns", ")", ":", "lines", "=", "[", "]", "hidden", "=", "fmt", ".", "with_header_hide", "if", "headers", "else", "fmt", ".", "without_header_hide", "pad", "=", "fmt", ".", "padding", "headerrow", "=", "fmt", ".", "headerrow", "if", "fmt", ".", "headerrow", "else", "fmt", ".", "datarow", "if", "fmt", ".", "lineabove", "and", "\"lineabove\"", "not", "in", "hidden", ":", "lines", ".", "append", "(", "_build_line", "(", "colwidths", ",", "pad", ",", "*", "fmt", ".", "lineabove", ")", ")", "if", "headers", ":", "lines", ".", "append", "(", "_build_row", "(", "headers", ",", "pad", ",", "*", "headerrow", ")", ")", "if", "fmt", ".", "linebelowheader", "and", "\"linebelowheader\"", "not", "in", "hidden", ":", "begin", ",", "fill", ",", "sep", ",", "end", "=", "fmt", ".", "linebelowheader", "if", "fmt", ".", "usecolons", ":", "segs", "=", "[", "_line_segment_with_colons", "(", "fmt", ".", "linebelowheader", ",", "a", ",", "w", "+", "2", "*", "pad", ")", "for", "w", ",", "a", "in", "zip", "(", "colwidths", ",", "colaligns", ")", "]", "lines", ".", "append", "(", "_build_row", "(", "segs", ",", "0", ",", "begin", ",", "sep", ",", "end", ")", ")", "else", ":", "lines", ".", "append", "(", "_build_line", "(", "colwidths", ",", "pad", ",", "*", "fmt", ".", "linebelowheader", ")", ")", "if", "rows", "and", "fmt", ".", "linebetweenrows", "and", "\"linebetweenrows\"", "not", "in", "hidden", ":", "# initial rows with a line below", "for", "row", "in", "rows", "[", ":", "-", "1", "]", ":", "lines", ".", "append", "(", "_build_row", "(", "row", ",", "pad", ",", "*", "fmt", ".", "datarow", ")", ")", "lines", ".", "append", "(", "_build_line", "(", "colwidths", ",", "pad", ",", "*", "fmt", ".", "linebetweenrows", ")", ")", "# the last row without a line below", "lines", ".", "append", "(", "_build_row", "(", "rows", "[", "-", "1", "]", ",", "pad", ",", "*", "fmt", ".", "datarow", ")", ")", "else", ":", "for", "row", "in", "rows", ":", "lines", ".", "append", "(", "_build_row", "(", "row", ",", "pad", ",", "*", "fmt", ".", "datarow", ")", ")", "if", "fmt", ".", "linebelow", "and", "\"linebelow\"", "not", "in", "hidden", ":", "lines", ".", "append", "(", "_build_line", "(", "colwidths", ",", "pad", ",", "*", "fmt", ".", "linebelow", ")", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Dataset.import_file
This is a shortcut to creating a DatasetImport. Can't use "import()" because of Python.
solvebio/resource/dataset.py
def import_file(self, path, **kwargs): """ This is a shortcut to creating a DatasetImport. Can't use "import()" because of Python. """ from . import Manifest from . import DatasetImport if 'id' not in self or not self['id']: raise Exception( 'No Dataset ID found. ' 'Please instantiate or retrieve a dataset ' 'with an ID.') manifest = Manifest() manifest.add(path) return DatasetImport.create( dataset_id=self['id'], manifest=manifest.manifest, **kwargs)
def import_file(self, path, **kwargs): """ This is a shortcut to creating a DatasetImport. Can't use "import()" because of Python. """ from . import Manifest from . import DatasetImport if 'id' not in self or not self['id']: raise Exception( 'No Dataset ID found. ' 'Please instantiate or retrieve a dataset ' 'with an ID.') manifest = Manifest() manifest.add(path) return DatasetImport.create( dataset_id=self['id'], manifest=manifest.manifest, **kwargs)
[ "This", "is", "a", "shortcut", "to", "creating", "a", "DatasetImport", ".", "Can", "t", "use", "import", "()", "because", "of", "Python", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/dataset.py#L264-L283
[ "def", "import_file", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "from", ".", "import", "Manifest", "from", ".", "import", "DatasetImport", "if", "'id'", "not", "in", "self", "or", "not", "self", "[", "'id'", "]", ":", "raise", "Exception", "(", "'No Dataset ID found. '", "'Please instantiate or retrieve a dataset '", "'with an ID.'", ")", "manifest", "=", "Manifest", "(", ")", "manifest", ".", "add", "(", "path", ")", "return", "DatasetImport", ".", "create", "(", "dataset_id", "=", "self", "[", "'id'", "]", ",", "manifest", "=", "manifest", ".", "manifest", ",", "*", "*", "kwargs", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Dataset.migrate
Migrate the data from this dataset to a target dataset. Valid optional kwargs include: * source_params * target_fields * include_errors * commit_mode
solvebio/resource/dataset.py
def migrate(self, target, follow=True, **kwargs): """ Migrate the data from this dataset to a target dataset. Valid optional kwargs include: * source_params * target_fields * include_errors * commit_mode """ if 'id' not in self or not self['id']: raise Exception( 'No source dataset ID found. ' 'Please instantiate the Dataset ' 'object with an ID.') # Target can be provided as a Dataset, or as an ID. if isinstance(target, Dataset): target_id = target.id else: target_id = target migration = DatasetMigration.create( source_id=self['id'], target_id=target_id, **kwargs) if follow: migration.follow() return migration
def migrate(self, target, follow=True, **kwargs): """ Migrate the data from this dataset to a target dataset. Valid optional kwargs include: * source_params * target_fields * include_errors * commit_mode """ if 'id' not in self or not self['id']: raise Exception( 'No source dataset ID found. ' 'Please instantiate the Dataset ' 'object with an ID.') # Target can be provided as a Dataset, or as an ID. if isinstance(target, Dataset): target_id = target.id else: target_id = target migration = DatasetMigration.create( source_id=self['id'], target_id=target_id, **kwargs) if follow: migration.follow() return migration
[ "Migrate", "the", "data", "from", "this", "dataset", "to", "a", "target", "dataset", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/dataset.py#L302-L334
[ "def", "migrate", "(", "self", ",", "target", ",", "follow", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "'id'", "not", "in", "self", "or", "not", "self", "[", "'id'", "]", ":", "raise", "Exception", "(", "'No source dataset ID found. '", "'Please instantiate the Dataset '", "'object with an ID.'", ")", "# Target can be provided as a Dataset, or as an ID.", "if", "isinstance", "(", "target", ",", "Dataset", ")", ":", "target_id", "=", "target", ".", "id", "else", ":", "target_id", "=", "target", "migration", "=", "DatasetMigration", ".", "create", "(", "source_id", "=", "self", "[", "'id'", "]", ",", "target_id", "=", "target_id", ",", "*", "*", "kwargs", ")", "if", "follow", ":", "migration", ".", "follow", "(", ")", "return", "migration" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Object.validate_full_path
Helper method to parse a full or partial path and return a full path as well as a dict containing path parts. Uses the following rules when processing the path: * If no domain, uses the current user's account domain * If no vault, uses the current user's personal vault. * If no path, uses '/' (vault root) Returns a tuple containing: * The validated full_path * A dictionary with the components: * domain: the domain of the vault * vault: the name of the vault, without domain * vault_full_path: domain:vault * path: the object path within the vault * parent_path: the parent path to the object * filename: the object's filename (if any) * full_path: the validated full path The following components may be overridden using kwargs: * vault * path Object paths (also known as "paths") must begin with a forward slash. The following path formats are supported: domain:vault:/path -> object "path" in the root of "domain:vault" domain:vault/path -> object "path" in the root of "domain:vault" vault:/path -> object "path" in the root of "vault" vault/path -> object "path" in the root of "vault" ~/path -> object "path" in the root of personal vault vault/ -> root of "vault" ~/ -> root of your personal vault The following two formats are not supported: path -> invalid/ambiguous path (exception) vault:path -> invalid/ambiguous path (exception) vault:path/path -> unsupported, interpreted as domain:vault/path
solvebio/resource/object.py
def validate_full_path(cls, full_path, **kwargs): """Helper method to parse a full or partial path and return a full path as well as a dict containing path parts. Uses the following rules when processing the path: * If no domain, uses the current user's account domain * If no vault, uses the current user's personal vault. * If no path, uses '/' (vault root) Returns a tuple containing: * The validated full_path * A dictionary with the components: * domain: the domain of the vault * vault: the name of the vault, without domain * vault_full_path: domain:vault * path: the object path within the vault * parent_path: the parent path to the object * filename: the object's filename (if any) * full_path: the validated full path The following components may be overridden using kwargs: * vault * path Object paths (also known as "paths") must begin with a forward slash. The following path formats are supported: domain:vault:/path -> object "path" in the root of "domain:vault" domain:vault/path -> object "path" in the root of "domain:vault" vault:/path -> object "path" in the root of "vault" vault/path -> object "path" in the root of "vault" ~/path -> object "path" in the root of personal vault vault/ -> root of "vault" ~/ -> root of your personal vault The following two formats are not supported: path -> invalid/ambiguous path (exception) vault:path -> invalid/ambiguous path (exception) vault:path/path -> unsupported, interpreted as domain:vault/path """ from solvebio.resource.vault import Vault _client = kwargs.pop('client', None) or cls._client or client if not full_path: raise Exception( 'Invalid path: ', 'Full path must be in one of the following formats: ' '"vault:/path", "domain:vault:/path", or "~/path"') # Parse the vault's full_path, using overrides if any input_vault = kwargs.get('vault') or full_path try: vault_full_path, path_dict = \ Vault.validate_full_path(input_vault, client=_client) except Exception as err: raise Exception('Could not determine vault from "{0}": {1}' .format(input_vault, err)) if kwargs.get('path'): # Allow override of the object_path. full_path = '{0}:/{1}'.format(vault_full_path, kwargs['path']) match = cls.PATH_RE.match(full_path) if match: object_path = match.groupdict()['path'] else: raise Exception( 'Cannot find a valid object path in "{0}". ' 'Full path must be in one of the following formats: ' '"vault:/path", "domain:vault:/path", or "~/path"' .format(full_path)) # Remove double slashes object_path = re.sub('//+', '/', object_path) if object_path != '/': # Remove trailing slash object_path = object_path.rstrip('/') path_dict['path'] = object_path # TODO: parent_path and filename full_path = '{domain}:{vault}:{path}'.format(**path_dict) path_dict['full_path'] = full_path return full_path, path_dict
def validate_full_path(cls, full_path, **kwargs): """Helper method to parse a full or partial path and return a full path as well as a dict containing path parts. Uses the following rules when processing the path: * If no domain, uses the current user's account domain * If no vault, uses the current user's personal vault. * If no path, uses '/' (vault root) Returns a tuple containing: * The validated full_path * A dictionary with the components: * domain: the domain of the vault * vault: the name of the vault, without domain * vault_full_path: domain:vault * path: the object path within the vault * parent_path: the parent path to the object * filename: the object's filename (if any) * full_path: the validated full path The following components may be overridden using kwargs: * vault * path Object paths (also known as "paths") must begin with a forward slash. The following path formats are supported: domain:vault:/path -> object "path" in the root of "domain:vault" domain:vault/path -> object "path" in the root of "domain:vault" vault:/path -> object "path" in the root of "vault" vault/path -> object "path" in the root of "vault" ~/path -> object "path" in the root of personal vault vault/ -> root of "vault" ~/ -> root of your personal vault The following two formats are not supported: path -> invalid/ambiguous path (exception) vault:path -> invalid/ambiguous path (exception) vault:path/path -> unsupported, interpreted as domain:vault/path """ from solvebio.resource.vault import Vault _client = kwargs.pop('client', None) or cls._client or client if not full_path: raise Exception( 'Invalid path: ', 'Full path must be in one of the following formats: ' '"vault:/path", "domain:vault:/path", or "~/path"') # Parse the vault's full_path, using overrides if any input_vault = kwargs.get('vault') or full_path try: vault_full_path, path_dict = \ Vault.validate_full_path(input_vault, client=_client) except Exception as err: raise Exception('Could not determine vault from "{0}": {1}' .format(input_vault, err)) if kwargs.get('path'): # Allow override of the object_path. full_path = '{0}:/{1}'.format(vault_full_path, kwargs['path']) match = cls.PATH_RE.match(full_path) if match: object_path = match.groupdict()['path'] else: raise Exception( 'Cannot find a valid object path in "{0}". ' 'Full path must be in one of the following formats: ' '"vault:/path", "domain:vault:/path", or "~/path"' .format(full_path)) # Remove double slashes object_path = re.sub('//+', '/', object_path) if object_path != '/': # Remove trailing slash object_path = object_path.rstrip('/') path_dict['path'] = object_path # TODO: parent_path and filename full_path = '{domain}:{vault}:{path}'.format(**path_dict) path_dict['full_path'] = full_path return full_path, path_dict
[ "Helper", "method", "to", "parse", "a", "full", "or", "partial", "path", "and", "return", "a", "full", "path", "as", "well", "as", "a", "dict", "containing", "path", "parts", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/object.py#L46-L135
[ "def", "validate_full_path", "(", "cls", ",", "full_path", ",", "*", "*", "kwargs", ")", ":", "from", "solvebio", ".", "resource", ".", "vault", "import", "Vault", "_client", "=", "kwargs", ".", "pop", "(", "'client'", ",", "None", ")", "or", "cls", ".", "_client", "or", "client", "if", "not", "full_path", ":", "raise", "Exception", "(", "'Invalid path: '", ",", "'Full path must be in one of the following formats: '", "'\"vault:/path\", \"domain:vault:/path\", or \"~/path\"'", ")", "# Parse the vault's full_path, using overrides if any", "input_vault", "=", "kwargs", ".", "get", "(", "'vault'", ")", "or", "full_path", "try", ":", "vault_full_path", ",", "path_dict", "=", "Vault", ".", "validate_full_path", "(", "input_vault", ",", "client", "=", "_client", ")", "except", "Exception", "as", "err", ":", "raise", "Exception", "(", "'Could not determine vault from \"{0}\": {1}'", ".", "format", "(", "input_vault", ",", "err", ")", ")", "if", "kwargs", ".", "get", "(", "'path'", ")", ":", "# Allow override of the object_path.", "full_path", "=", "'{0}:/{1}'", ".", "format", "(", "vault_full_path", ",", "kwargs", "[", "'path'", "]", ")", "match", "=", "cls", ".", "PATH_RE", ".", "match", "(", "full_path", ")", "if", "match", ":", "object_path", "=", "match", ".", "groupdict", "(", ")", "[", "'path'", "]", "else", ":", "raise", "Exception", "(", "'Cannot find a valid object path in \"{0}\". '", "'Full path must be in one of the following formats: '", "'\"vault:/path\", \"domain:vault:/path\", or \"~/path\"'", ".", "format", "(", "full_path", ")", ")", "# Remove double slashes", "object_path", "=", "re", ".", "sub", "(", "'//+'", ",", "'/'", ",", "object_path", ")", "if", "object_path", "!=", "'/'", ":", "# Remove trailing slash", "object_path", "=", "object_path", ".", "rstrip", "(", "'/'", ")", "path_dict", "[", "'path'", "]", "=", "object_path", "# TODO: parent_path and filename", "full_path", "=", "'{domain}:{vault}:{path}'", ".", "format", "(", "*", "*", "path_dict", ")", "path_dict", "[", "'full_path'", "]", "=", "full_path", "return", "full_path", ",", "path_dict" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
create_dataset
Attempt to create a new dataset given the following params: * template_id * template_file * capacity * create_vault * [argument] dataset name or full path NOTE: genome_build has been deprecated and is no longer used.
solvebio/cli/data.py
def create_dataset(args): """ Attempt to create a new dataset given the following params: * template_id * template_file * capacity * create_vault * [argument] dataset name or full path NOTE: genome_build has been deprecated and is no longer used. """ # For backwards compatibility, the "full_path" argument # can be a dataset filename, but only if vault and path # are set. If vault/path are both provided and there # are no forward-slashes in the "full_path", assume # the user has provided a dataset filename. if '/' not in args.full_path and args.vault and args.path: full_path, path_dict = Object.validate_full_path( '{0}:/{1}/{2}'.format(args.vault, args.path, args.full_path)) else: full_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Accept a template_id or a template_file if args.template_id: # Validate the template ID try: tpl = solvebio.DatasetTemplate.retrieve(args.template_id) except solvebio.SolveError as e: if e.status_code != 404: raise e print("No template with ID {0} found!" .format(args.template_id)) sys.exit(1) elif args.template_file: mode = 'r' fopen = open if check_gzip_path(args.template_file): mode = 'rb' fopen = gzip.open # Validate the template file with fopen(args.template_file, mode) as fp: try: tpl_json = json.load(fp) except: print('Template file {0} could not be loaded. Please ' 'pass valid JSON'.format(args.template_file)) sys.exit(1) tpl = solvebio.DatasetTemplate.create(**tpl_json) print("A new dataset template was created with id: {0}".format(tpl.id)) else: print("Creating a new dataset {0} without a template." .format(full_path)) tpl = None fields = [] entity_type = None description = None if tpl: print("Creating new dataset {0} using the template '{1}'." .format(full_path, tpl.name)) fields = tpl.fields entity_type = tpl.entity_type # include template used to create description = 'Created with dataset template: {0}'.format(str(tpl.id)) return solvebio.Dataset.get_or_create_by_full_path( full_path, capacity=args.capacity, entity_type=entity_type, fields=fields, description=description, create_vault=args.create_vault, )
def create_dataset(args): """ Attempt to create a new dataset given the following params: * template_id * template_file * capacity * create_vault * [argument] dataset name or full path NOTE: genome_build has been deprecated and is no longer used. """ # For backwards compatibility, the "full_path" argument # can be a dataset filename, but only if vault and path # are set. If vault/path are both provided and there # are no forward-slashes in the "full_path", assume # the user has provided a dataset filename. if '/' not in args.full_path and args.vault and args.path: full_path, path_dict = Object.validate_full_path( '{0}:/{1}/{2}'.format(args.vault, args.path, args.full_path)) else: full_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Accept a template_id or a template_file if args.template_id: # Validate the template ID try: tpl = solvebio.DatasetTemplate.retrieve(args.template_id) except solvebio.SolveError as e: if e.status_code != 404: raise e print("No template with ID {0} found!" .format(args.template_id)) sys.exit(1) elif args.template_file: mode = 'r' fopen = open if check_gzip_path(args.template_file): mode = 'rb' fopen = gzip.open # Validate the template file with fopen(args.template_file, mode) as fp: try: tpl_json = json.load(fp) except: print('Template file {0} could not be loaded. Please ' 'pass valid JSON'.format(args.template_file)) sys.exit(1) tpl = solvebio.DatasetTemplate.create(**tpl_json) print("A new dataset template was created with id: {0}".format(tpl.id)) else: print("Creating a new dataset {0} without a template." .format(full_path)) tpl = None fields = [] entity_type = None description = None if tpl: print("Creating new dataset {0} using the template '{1}'." .format(full_path, tpl.name)) fields = tpl.fields entity_type = tpl.entity_type # include template used to create description = 'Created with dataset template: {0}'.format(str(tpl.id)) return solvebio.Dataset.get_or_create_by_full_path( full_path, capacity=args.capacity, entity_type=entity_type, fields=fields, description=description, create_vault=args.create_vault, )
[ "Attempt", "to", "create", "a", "new", "dataset", "given", "the", "following", "params", ":" ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/data.py#L112-L189
[ "def", "create_dataset", "(", "args", ")", ":", "# For backwards compatibility, the \"full_path\" argument", "# can be a dataset filename, but only if vault and path", "# are set. If vault/path are both provided and there", "# are no forward-slashes in the \"full_path\", assume", "# the user has provided a dataset filename.", "if", "'/'", "not", "in", "args", ".", "full_path", "and", "args", ".", "vault", "and", "args", ".", "path", ":", "full_path", ",", "path_dict", "=", "Object", ".", "validate_full_path", "(", "'{0}:/{1}/{2}'", ".", "format", "(", "args", ".", "vault", ",", "args", ".", "path", ",", "args", ".", "full_path", ")", ")", "else", ":", "full_path", ",", "path_dict", "=", "Object", ".", "validate_full_path", "(", "args", ".", "full_path", ",", "vault", "=", "args", ".", "vault", ",", "path", "=", "args", ".", "path", ")", "# Accept a template_id or a template_file", "if", "args", ".", "template_id", ":", "# Validate the template ID", "try", ":", "tpl", "=", "solvebio", ".", "DatasetTemplate", ".", "retrieve", "(", "args", ".", "template_id", ")", "except", "solvebio", ".", "SolveError", "as", "e", ":", "if", "e", ".", "status_code", "!=", "404", ":", "raise", "e", "print", "(", "\"No template with ID {0} found!\"", ".", "format", "(", "args", ".", "template_id", ")", ")", "sys", ".", "exit", "(", "1", ")", "elif", "args", ".", "template_file", ":", "mode", "=", "'r'", "fopen", "=", "open", "if", "check_gzip_path", "(", "args", ".", "template_file", ")", ":", "mode", "=", "'rb'", "fopen", "=", "gzip", ".", "open", "# Validate the template file", "with", "fopen", "(", "args", ".", "template_file", ",", "mode", ")", "as", "fp", ":", "try", ":", "tpl_json", "=", "json", ".", "load", "(", "fp", ")", "except", ":", "print", "(", "'Template file {0} could not be loaded. Please '", "'pass valid JSON'", ".", "format", "(", "args", ".", "template_file", ")", ")", "sys", ".", "exit", "(", "1", ")", "tpl", "=", "solvebio", ".", "DatasetTemplate", ".", "create", "(", "*", "*", "tpl_json", ")", "print", "(", "\"A new dataset template was created with id: {0}\"", ".", "format", "(", "tpl", ".", "id", ")", ")", "else", ":", "print", "(", "\"Creating a new dataset {0} without a template.\"", ".", "format", "(", "full_path", ")", ")", "tpl", "=", "None", "fields", "=", "[", "]", "entity_type", "=", "None", "description", "=", "None", "if", "tpl", ":", "print", "(", "\"Creating new dataset {0} using the template '{1}'.\"", ".", "format", "(", "full_path", ",", "tpl", ".", "name", ")", ")", "fields", "=", "tpl", ".", "fields", "entity_type", "=", "tpl", ".", "entity_type", "# include template used to create", "description", "=", "'Created with dataset template: {0}'", ".", "format", "(", "str", "(", "tpl", ".", "id", ")", ")", "return", "solvebio", ".", "Dataset", ".", "get_or_create_by_full_path", "(", "full_path", ",", "capacity", "=", "args", ".", "capacity", ",", "entity_type", "=", "entity_type", ",", "fields", "=", "fields", ",", "description", "=", "description", ",", "create_vault", "=", "args", ".", "create_vault", ",", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
upload
Given a folder or file, upload all the folders and files contained within it, skipping ones that already exist on the remote.
solvebio/cli/data.py
def upload(args): """ Given a folder or file, upload all the folders and files contained within it, skipping ones that already exist on the remote. """ base_remote_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Assert the vault exists and is accessible vault = Vault.get_by_full_path(path_dict['vault_full_path']) # If not the vault root, validate remote path exists and is a folder if path_dict['path'] != '/': Object.get_by_full_path(base_remote_path, assert_type='folder') for local_path in args.local_path: local_path = local_path.rstrip('/') local_start = os.path.basename(local_path) if os.path.isdir(local_path): _upload_folder(path_dict['domain'], vault, base_remote_path, local_path, local_start) else: Object.upload_file(local_path, path_dict['path'], vault.full_path)
def upload(args): """ Given a folder or file, upload all the folders and files contained within it, skipping ones that already exist on the remote. """ base_remote_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Assert the vault exists and is accessible vault = Vault.get_by_full_path(path_dict['vault_full_path']) # If not the vault root, validate remote path exists and is a folder if path_dict['path'] != '/': Object.get_by_full_path(base_remote_path, assert_type='folder') for local_path in args.local_path: local_path = local_path.rstrip('/') local_start = os.path.basename(local_path) if os.path.isdir(local_path): _upload_folder(path_dict['domain'], vault, base_remote_path, local_path, local_start) else: Object.upload_file(local_path, path_dict['path'], vault.full_path)
[ "Given", "a", "folder", "or", "file", "upload", "all", "the", "folders", "and", "files", "contained", "within", "it", "skipping", "ones", "that", "already", "exist", "on", "the", "remote", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/data.py#L192-L216
[ "def", "upload", "(", "args", ")", ":", "base_remote_path", ",", "path_dict", "=", "Object", ".", "validate_full_path", "(", "args", ".", "full_path", ",", "vault", "=", "args", ".", "vault", ",", "path", "=", "args", ".", "path", ")", "# Assert the vault exists and is accessible", "vault", "=", "Vault", ".", "get_by_full_path", "(", "path_dict", "[", "'vault_full_path'", "]", ")", "# If not the vault root, validate remote path exists and is a folder", "if", "path_dict", "[", "'path'", "]", "!=", "'/'", ":", "Object", ".", "get_by_full_path", "(", "base_remote_path", ",", "assert_type", "=", "'folder'", ")", "for", "local_path", "in", "args", ".", "local_path", ":", "local_path", "=", "local_path", ".", "rstrip", "(", "'/'", ")", "local_start", "=", "os", ".", "path", ".", "basename", "(", "local_path", ")", "if", "os", ".", "path", ".", "isdir", "(", "local_path", ")", ":", "_upload_folder", "(", "path_dict", "[", "'domain'", "]", ",", "vault", ",", "base_remote_path", ",", "local_path", ",", "local_start", ")", "else", ":", "Object", ".", "upload_file", "(", "local_path", ",", "path_dict", "[", "'path'", "]", ",", "vault", ".", "full_path", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
import_file
Given a dataset and a local path, upload and import the file(s). Command arguments (args): * create_dataset * template_id * full_path * vault (optional, overrides the vault in full_path) * path (optional, overrides the path in full_path) * commit_mode * capacity * file (list) * follow (default: False)
solvebio/cli/data.py
def import_file(args): """ Given a dataset and a local path, upload and import the file(s). Command arguments (args): * create_dataset * template_id * full_path * vault (optional, overrides the vault in full_path) * path (optional, overrides the path in full_path) * commit_mode * capacity * file (list) * follow (default: False) """ full_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Ensure the dataset exists. Create if necessary. if args.create_dataset: dataset = create_dataset(args) else: try: dataset = solvebio.Dataset.get_by_full_path(full_path) except solvebio.SolveError as e: if e.status_code != 404: raise e print("Dataset not found: {0}".format(full_path)) print("Tip: use the --create-dataset flag " "to create one from a template") sys.exit(1) # Generate a manifest from the local files manifest = solvebio.Manifest() manifest.add(*args.file) # Create the manifest-based import imp = solvebio.DatasetImport.create( dataset_id=dataset.id, manifest=manifest.manifest, commit_mode=args.commit_mode ) if args.follow: imp.follow() else: mesh_url = 'https://my.solvebio.com/activity/' print("Your import has been submitted, view details at: {0}" .format(mesh_url))
def import_file(args): """ Given a dataset and a local path, upload and import the file(s). Command arguments (args): * create_dataset * template_id * full_path * vault (optional, overrides the vault in full_path) * path (optional, overrides the path in full_path) * commit_mode * capacity * file (list) * follow (default: False) """ full_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Ensure the dataset exists. Create if necessary. if args.create_dataset: dataset = create_dataset(args) else: try: dataset = solvebio.Dataset.get_by_full_path(full_path) except solvebio.SolveError as e: if e.status_code != 404: raise e print("Dataset not found: {0}".format(full_path)) print("Tip: use the --create-dataset flag " "to create one from a template") sys.exit(1) # Generate a manifest from the local files manifest = solvebio.Manifest() manifest.add(*args.file) # Create the manifest-based import imp = solvebio.DatasetImport.create( dataset_id=dataset.id, manifest=manifest.manifest, commit_mode=args.commit_mode ) if args.follow: imp.follow() else: mesh_url = 'https://my.solvebio.com/activity/' print("Your import has been submitted, view details at: {0}" .format(mesh_url))
[ "Given", "a", "dataset", "and", "a", "local", "path", "upload", "and", "import", "the", "file", "(", "s", ")", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/data.py#L219-L270
[ "def", "import_file", "(", "args", ")", ":", "full_path", ",", "path_dict", "=", "Object", ".", "validate_full_path", "(", "args", ".", "full_path", ",", "vault", "=", "args", ".", "vault", ",", "path", "=", "args", ".", "path", ")", "# Ensure the dataset exists. Create if necessary.", "if", "args", ".", "create_dataset", ":", "dataset", "=", "create_dataset", "(", "args", ")", "else", ":", "try", ":", "dataset", "=", "solvebio", ".", "Dataset", ".", "get_by_full_path", "(", "full_path", ")", "except", "solvebio", ".", "SolveError", "as", "e", ":", "if", "e", ".", "status_code", "!=", "404", ":", "raise", "e", "print", "(", "\"Dataset not found: {0}\"", ".", "format", "(", "full_path", ")", ")", "print", "(", "\"Tip: use the --create-dataset flag \"", "\"to create one from a template\"", ")", "sys", ".", "exit", "(", "1", ")", "# Generate a manifest from the local files", "manifest", "=", "solvebio", ".", "Manifest", "(", ")", "manifest", ".", "add", "(", "*", "args", ".", "file", ")", "# Create the manifest-based import", "imp", "=", "solvebio", ".", "DatasetImport", ".", "create", "(", "dataset_id", "=", "dataset", ".", "id", ",", "manifest", "=", "manifest", ".", "manifest", ",", "commit_mode", "=", "args", ".", "commit_mode", ")", "if", "args", ".", "follow", ":", "imp", ".", "follow", "(", ")", "else", ":", "mesh_url", "=", "'https://my.solvebio.com/activity/'", "print", "(", "\"Your import has been submitted, view details at: {0}\"", ".", "format", "(", "mesh_url", ")", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Vault.validate_full_path
Helper method to return a full path from a full or partial path. If no domain, assumes user's account domain If the vault is "~", assumes personal vault. Valid vault paths include: domain:vault domain:vault:/path domain:vault/path vault:/path vault ~/ Invalid vault paths include: /vault/ /path / :/ Does not allow overrides for any vault path components.
solvebio/resource/vault.py
def validate_full_path(cls, full_path, **kwargs): """Helper method to return a full path from a full or partial path. If no domain, assumes user's account domain If the vault is "~", assumes personal vault. Valid vault paths include: domain:vault domain:vault:/path domain:vault/path vault:/path vault ~/ Invalid vault paths include: /vault/ /path / :/ Does not allow overrides for any vault path components. """ _client = kwargs.pop('client', None) or cls._client or client full_path = full_path.strip() if not full_path: raise Exception( 'Vault path "{0}" is invalid. Path must be in the format: ' '"domain:vault:/path" or "vault:/path".'.format(full_path) ) match = cls.VAULT_PATH_RE.match(full_path) if not match: raise Exception( 'Vault path "{0}" is invalid. Path must be in the format: ' '"domain:vault:/path" or "vault:/path".'.format(full_path) ) path_parts = match.groupdict() # Handle the special case where "~" means personal vault if path_parts.get('vault') == '~': path_parts = dict(domain=None, vault=None) # If any values are None, set defaults from the user. if None in path_parts.values(): user = _client.get('/v1/user', {}) defaults = { 'domain': user['account']['domain'], 'vault': 'user-{0}'.format(user['id']) } path_parts = dict((k, v or defaults.get(k)) for k, v in path_parts.items()) # Rebuild the full path full_path = '{domain}:{vault}'.format(**path_parts) path_parts['vault_full_path'] = full_path return full_path, path_parts
def validate_full_path(cls, full_path, **kwargs): """Helper method to return a full path from a full or partial path. If no domain, assumes user's account domain If the vault is "~", assumes personal vault. Valid vault paths include: domain:vault domain:vault:/path domain:vault/path vault:/path vault ~/ Invalid vault paths include: /vault/ /path / :/ Does not allow overrides for any vault path components. """ _client = kwargs.pop('client', None) or cls._client or client full_path = full_path.strip() if not full_path: raise Exception( 'Vault path "{0}" is invalid. Path must be in the format: ' '"domain:vault:/path" or "vault:/path".'.format(full_path) ) match = cls.VAULT_PATH_RE.match(full_path) if not match: raise Exception( 'Vault path "{0}" is invalid. Path must be in the format: ' '"domain:vault:/path" or "vault:/path".'.format(full_path) ) path_parts = match.groupdict() # Handle the special case where "~" means personal vault if path_parts.get('vault') == '~': path_parts = dict(domain=None, vault=None) # If any values are None, set defaults from the user. if None in path_parts.values(): user = _client.get('/v1/user', {}) defaults = { 'domain': user['account']['domain'], 'vault': 'user-{0}'.format(user['id']) } path_parts = dict((k, v or defaults.get(k)) for k, v in path_parts.items()) # Rebuild the full path full_path = '{domain}:{vault}'.format(**path_parts) path_parts['vault_full_path'] = full_path return full_path, path_parts
[ "Helper", "method", "to", "return", "a", "full", "path", "from", "a", "full", "or", "partial", "path", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/vault.py#L57-L115
[ "def", "validate_full_path", "(", "cls", ",", "full_path", ",", "*", "*", "kwargs", ")", ":", "_client", "=", "kwargs", ".", "pop", "(", "'client'", ",", "None", ")", "or", "cls", ".", "_client", "or", "client", "full_path", "=", "full_path", ".", "strip", "(", ")", "if", "not", "full_path", ":", "raise", "Exception", "(", "'Vault path \"{0}\" is invalid. Path must be in the format: '", "'\"domain:vault:/path\" or \"vault:/path\".'", ".", "format", "(", "full_path", ")", ")", "match", "=", "cls", ".", "VAULT_PATH_RE", ".", "match", "(", "full_path", ")", "if", "not", "match", ":", "raise", "Exception", "(", "'Vault path \"{0}\" is invalid. Path must be in the format: '", "'\"domain:vault:/path\" or \"vault:/path\".'", ".", "format", "(", "full_path", ")", ")", "path_parts", "=", "match", ".", "groupdict", "(", ")", "# Handle the special case where \"~\" means personal vault", "if", "path_parts", ".", "get", "(", "'vault'", ")", "==", "'~'", ":", "path_parts", "=", "dict", "(", "domain", "=", "None", ",", "vault", "=", "None", ")", "# If any values are None, set defaults from the user.", "if", "None", "in", "path_parts", ".", "values", "(", ")", ":", "user", "=", "_client", ".", "get", "(", "'/v1/user'", ",", "{", "}", ")", "defaults", "=", "{", "'domain'", ":", "user", "[", "'account'", "]", "[", "'domain'", "]", ",", "'vault'", ":", "'user-{0}'", ".", "format", "(", "user", "[", "'id'", "]", ")", "}", "path_parts", "=", "dict", "(", "(", "k", ",", "v", "or", "defaults", ".", "get", "(", "k", ")", ")", "for", "k", ",", "v", "in", "path_parts", ".", "items", "(", ")", ")", "# Rebuild the full path", "full_path", "=", "'{domain}:{vault}'", ".", "format", "(", "*", "*", "path_parts", ")", "path_parts", "[", "'vault_full_path'", "]", "=", "full_path", "return", "full_path", ",", "path_parts" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
validate_api_host_url
Validate SolveBio API host url. Valid urls must not be empty and must contain either HTTP or HTTPS scheme.
solvebio/utils/validators.py
def validate_api_host_url(url): """ Validate SolveBio API host url. Valid urls must not be empty and must contain either HTTP or HTTPS scheme. """ if not url: raise SolveError('No SolveBio API host is set') parsed = urlparse(url) if parsed.scheme not in ['http', 'https']: raise SolveError( 'Invalid API host: %s. ' 'Missing url scheme (HTTP or HTTPS).' % url ) elif not parsed.netloc: raise SolveError('Invalid API host: %s.' % url) return True
def validate_api_host_url(url): """ Validate SolveBio API host url. Valid urls must not be empty and must contain either HTTP or HTTPS scheme. """ if not url: raise SolveError('No SolveBio API host is set') parsed = urlparse(url) if parsed.scheme not in ['http', 'https']: raise SolveError( 'Invalid API host: %s. ' 'Missing url scheme (HTTP or HTTPS).' % url ) elif not parsed.netloc: raise SolveError('Invalid API host: %s.' % url) return True
[ "Validate", "SolveBio", "API", "host", "url", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/validators.py#L8-L28
[ "def", "validate_api_host_url", "(", "url", ")", ":", "if", "not", "url", ":", "raise", "SolveError", "(", "'No SolveBio API host is set'", ")", "parsed", "=", "urlparse", "(", "url", ")", "if", "parsed", ".", "scheme", "not", "in", "[", "'http'", ",", "'https'", "]", ":", "raise", "SolveError", "(", "'Invalid API host: %s. '", "'Missing url scheme (HTTP or HTTPS).'", "%", "url", ")", "elif", "not", "parsed", ".", "netloc", ":", "raise", "SolveError", "(", "'Invalid API host: %s.'", "%", "url", ")", "return", "True" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Manifest.add
Add one or more files or URLs to the manifest. If files contains a glob, it is expanded. All files are uploaded to SolveBio. The Upload object is used to fill the manifest.
solvebio/resource/manifest.py
def add(self, *args): """ Add one or more files or URLs to the manifest. If files contains a glob, it is expanded. All files are uploaded to SolveBio. The Upload object is used to fill the manifest. """ def _is_url(path): p = urlparse(path) return bool(p.scheme) for path in args: path = os.path.expanduser(path) if _is_url(path): self.add_url(path) elif os.path.isfile(path): self.add_file(path) elif os.path.isdir(path): for f in os.listdir(path): self.add_file(f) elif glob.glob(path): for f in glob.glob(path): self.add_file(f) else: raise ValueError( 'Path: "{0}" is not a valid format or does not exist. ' 'Manifest paths must be files, directories, or URLs.' .format(path) )
def add(self, *args): """ Add one or more files or URLs to the manifest. If files contains a glob, it is expanded. All files are uploaded to SolveBio. The Upload object is used to fill the manifest. """ def _is_url(path): p = urlparse(path) return bool(p.scheme) for path in args: path = os.path.expanduser(path) if _is_url(path): self.add_url(path) elif os.path.isfile(path): self.add_file(path) elif os.path.isdir(path): for f in os.listdir(path): self.add_file(f) elif glob.glob(path): for f in glob.glob(path): self.add_file(f) else: raise ValueError( 'Path: "{0}" is not a valid format or does not exist. ' 'Manifest paths must be files, directories, or URLs.' .format(path) )
[ "Add", "one", "or", "more", "files", "or", "URLs", "to", "the", "manifest", ".", "If", "files", "contains", "a", "glob", "it", "is", "expanded", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/resource/manifest.py#L47-L76
[ "def", "add", "(", "self", ",", "*", "args", ")", ":", "def", "_is_url", "(", "path", ")", ":", "p", "=", "urlparse", "(", "path", ")", "return", "bool", "(", "p", ".", "scheme", ")", "for", "path", "in", "args", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "_is_url", "(", "path", ")", ":", "self", ".", "add_url", "(", "path", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "self", ".", "add_file", "(", "path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", ":", "self", ".", "add_file", "(", "f", ")", "elif", "glob", ".", "glob", "(", "path", ")", ":", "for", "f", "in", "glob", ".", "glob", "(", "path", ")", ":", "self", ".", "add_file", "(", "f", ")", "else", ":", "raise", "ValueError", "(", "'Path: \"{0}\" is not a valid format or does not exist. '", "'Manifest paths must be files, directories, or URLs.'", ".", "format", "(", "path", ")", ")" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Annotator.annotate
Annotate a set of records with stored fields. Args: records: A list or iterator (can be a Query object) chunk_size: The number of records to annotate at once (max 500). Returns: A generator that yields one annotated record at a time.
solvebio/annotate.py
def annotate(self, records, **kwargs): """Annotate a set of records with stored fields. Args: records: A list or iterator (can be a Query object) chunk_size: The number of records to annotate at once (max 500). Returns: A generator that yields one annotated record at a time. """ # Update annotator_params with any kwargs self.annotator_params.update(**kwargs) chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE) chunk = [] for i, record in enumerate(records): chunk.append(record) if (i + 1) % chunk_size == 0: for r in self._execute(chunk): yield r chunk = [] if chunk: for r in self._execute(chunk): yield r chunk = []
def annotate(self, records, **kwargs): """Annotate a set of records with stored fields. Args: records: A list or iterator (can be a Query object) chunk_size: The number of records to annotate at once (max 500). Returns: A generator that yields one annotated record at a time. """ # Update annotator_params with any kwargs self.annotator_params.update(**kwargs) chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE) chunk = [] for i, record in enumerate(records): chunk.append(record) if (i + 1) % chunk_size == 0: for r in self._execute(chunk): yield r chunk = [] if chunk: for r in self._execute(chunk): yield r chunk = []
[ "Annotate", "a", "set", "of", "records", "with", "stored", "fields", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/annotate.py#L39-L64
[ "def", "annotate", "(", "self", ",", "records", ",", "*", "*", "kwargs", ")", ":", "# Update annotator_params with any kwargs", "self", ".", "annotator_params", ".", "update", "(", "*", "*", "kwargs", ")", "chunk_size", "=", "self", ".", "annotator_params", ".", "get", "(", "'chunk_size'", ",", "self", ".", "CHUNK_SIZE", ")", "chunk", "=", "[", "]", "for", "i", ",", "record", "in", "enumerate", "(", "records", ")", ":", "chunk", ".", "append", "(", "record", ")", "if", "(", "i", "+", "1", ")", "%", "chunk_size", "==", "0", ":", "for", "r", "in", "self", ".", "_execute", "(", "chunk", ")", ":", "yield", "r", "chunk", "=", "[", "]", "if", "chunk", ":", "for", "r", "in", "self", ".", "_execute", "(", "chunk", ")", ":", "yield", "r", "chunk", "=", "[", "]" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
Expression.evaluate
Evaluates the expression with the provided context and format.
solvebio/annotate.py
def evaluate(self, data=None, data_type='string', is_list=False): """Evaluates the expression with the provided context and format.""" payload = { 'data': data, 'expression': self.expr, 'data_type': data_type, 'is_list': is_list } res = self._client.post('/v1/evaluate', payload) return res['result']
def evaluate(self, data=None, data_type='string', is_list=False): """Evaluates the expression with the provided context and format.""" payload = { 'data': data, 'expression': self.expr, 'data_type': data_type, 'is_list': is_list } res = self._client.post('/v1/evaluate', payload) return res['result']
[ "Evaluates", "the", "expression", "with", "the", "provided", "context", "and", "format", "." ]
solvebio/solvebio-python
python
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/annotate.py#L88-L97
[ "def", "evaluate", "(", "self", ",", "data", "=", "None", ",", "data_type", "=", "'string'", ",", "is_list", "=", "False", ")", ":", "payload", "=", "{", "'data'", ":", "data", ",", "'expression'", ":", "self", ".", "expr", ",", "'data_type'", ":", "data_type", ",", "'is_list'", ":", "is_list", "}", "res", "=", "self", ".", "_client", ".", "post", "(", "'/v1/evaluate'", ",", "payload", ")", "return", "res", "[", "'result'", "]" ]
b29614643043afd19c1d8074e8f25c6700d51a73
test
format_output
Format output using *format_name*. This is a wrapper around the :class:`TabularOutputFormatter` class. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str
cli_helpers/tabular_output/output_formatter.py
def format_output(data, headers, format_name, **kwargs): """Format output using *format_name*. This is a wrapper around the :class:`TabularOutputFormatter` class. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str """ formatter = TabularOutputFormatter(format_name=format_name) return formatter.format_output(data, headers, **kwargs)
def format_output(data, headers, format_name, **kwargs): """Format output using *format_name*. This is a wrapper around the :class:`TabularOutputFormatter` class. :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str """ formatter = TabularOutputFormatter(format_name=format_name) return formatter.format_output(data, headers, **kwargs)
[ "Format", "output", "using", "*", "format_name", "*", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L181-L195
[ "def", "format_output", "(", "data", ",", "headers", ",", "format_name", ",", "*", "*", "kwargs", ")", ":", "formatter", "=", "TabularOutputFormatter", "(", "format_name", "=", "format_name", ")", "return", "formatter", ".", "format_output", "(", "data", ",", "headers", ",", "*", "*", "kwargs", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter.format_name
Set the default format name. :param str format_name: The display format name. :raises ValueError: if the format is not recognized.
cli_helpers/tabular_output/output_formatter.py
def format_name(self, format_name): """Set the default format name. :param str format_name: The display format name. :raises ValueError: if the format is not recognized. """ if format_name in self.supported_formats: self._format_name = format_name else: raise ValueError('unrecognized format_name "{}"'.format( format_name))
def format_name(self, format_name): """Set the default format name. :param str format_name: The display format name. :raises ValueError: if the format is not recognized. """ if format_name in self.supported_formats: self._format_name = format_name else: raise ValueError('unrecognized format_name "{}"'.format( format_name))
[ "Set", "the", "default", "format", "name", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L89-L100
[ "def", "format_name", "(", "self", ",", "format_name", ")", ":", "if", "format_name", "in", "self", ".", "supported_formats", ":", "self", ".", "_format_name", "=", "format_name", "else", ":", "raise", "ValueError", "(", "'unrecognized format_name \"{}\"'", ".", "format", "(", "format_name", ")", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter.register_new_formatter
Register a new output formatter. :param str format_name: The name of the format. :param callable handler: The function that formats the data. :param tuple preprocessors: The preprocessors to call before formatting. :param dict kwargs: Keys/values for keyword argument defaults.
cli_helpers/tabular_output/output_formatter.py
def register_new_formatter(cls, format_name, handler, preprocessors=(), kwargs=None): """Register a new output formatter. :param str format_name: The name of the format. :param callable handler: The function that formats the data. :param tuple preprocessors: The preprocessors to call before formatting. :param dict kwargs: Keys/values for keyword argument defaults. """ cls._output_formats[format_name] = OutputFormatHandler( format_name, preprocessors, handler, kwargs or {})
def register_new_formatter(cls, format_name, handler, preprocessors=(), kwargs=None): """Register a new output formatter. :param str format_name: The name of the format. :param callable handler: The function that formats the data. :param tuple preprocessors: The preprocessors to call before formatting. :param dict kwargs: Keys/values for keyword argument defaults. """ cls._output_formats[format_name] = OutputFormatHandler( format_name, preprocessors, handler, kwargs or {})
[ "Register", "a", "new", "output", "formatter", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L108-L120
[ "def", "register_new_formatter", "(", "cls", ",", "format_name", ",", "handler", ",", "preprocessors", "=", "(", ")", ",", "kwargs", "=", "None", ")", ":", "cls", ".", "_output_formats", "[", "format_name", "]", "=", "OutputFormatHandler", "(", "format_name", ",", "preprocessors", ",", "handler", ",", "kwargs", "or", "{", "}", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter.format_output
Format the headers and data using a specific formatter. *format_name* must be a supported formatter (see :attr:`supported_formats`). :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use (optional, if the :class:`TabularOutputFormatter` object has a default format set). :param tuple preprocessors: Additional preprocessors to call before any formatter preprocessors. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str :raises ValueError: If the *format_name* is not recognized.
cli_helpers/tabular_output/output_formatter.py
def format_output(self, data, headers, format_name=None, preprocessors=(), column_types=None, **kwargs): """Format the headers and data using a specific formatter. *format_name* must be a supported formatter (see :attr:`supported_formats`). :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use (optional, if the :class:`TabularOutputFormatter` object has a default format set). :param tuple preprocessors: Additional preprocessors to call before any formatter preprocessors. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str :raises ValueError: If the *format_name* is not recognized. """ format_name = format_name or self._format_name if format_name not in self.supported_formats: raise ValueError('unrecognized format "{}"'.format(format_name)) (_, _preprocessors, formatter, fkwargs) = self._output_formats[format_name] fkwargs.update(kwargs) if column_types is None: data = list(data) column_types = self._get_column_types(data) for f in unique_items(preprocessors + _preprocessors): data, headers = f(data, headers, column_types=column_types, **fkwargs) return formatter(list(data), headers, column_types=column_types, **fkwargs)
def format_output(self, data, headers, format_name=None, preprocessors=(), column_types=None, **kwargs): """Format the headers and data using a specific formatter. *format_name* must be a supported formatter (see :attr:`supported_formats`). :param iterable data: An :term:`iterable` (e.g. list) of rows. :param iterable headers: The column headers. :param str format_name: The display format to use (optional, if the :class:`TabularOutputFormatter` object has a default format set). :param tuple preprocessors: Additional preprocessors to call before any formatter preprocessors. :param \*\*kwargs: Optional arguments for the formatter. :return: The formatted data. :rtype: str :raises ValueError: If the *format_name* is not recognized. """ format_name = format_name or self._format_name if format_name not in self.supported_formats: raise ValueError('unrecognized format "{}"'.format(format_name)) (_, _preprocessors, formatter, fkwargs) = self._output_formats[format_name] fkwargs.update(kwargs) if column_types is None: data = list(data) column_types = self._get_column_types(data) for f in unique_items(preprocessors + _preprocessors): data, headers = f(data, headers, column_types=column_types, **fkwargs) return formatter(list(data), headers, column_types=column_types, **fkwargs)
[ "Format", "the", "headers", "and", "data", "using", "a", "specific", "formatter", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L122-L154
[ "def", "format_output", "(", "self", ",", "data", ",", "headers", ",", "format_name", "=", "None", ",", "preprocessors", "=", "(", ")", ",", "column_types", "=", "None", ",", "*", "*", "kwargs", ")", ":", "format_name", "=", "format_name", "or", "self", ".", "_format_name", "if", "format_name", "not", "in", "self", ".", "supported_formats", ":", "raise", "ValueError", "(", "'unrecognized format \"{}\"'", ".", "format", "(", "format_name", ")", ")", "(", "_", ",", "_preprocessors", ",", "formatter", ",", "fkwargs", ")", "=", "self", ".", "_output_formats", "[", "format_name", "]", "fkwargs", ".", "update", "(", "kwargs", ")", "if", "column_types", "is", "None", ":", "data", "=", "list", "(", "data", ")", "column_types", "=", "self", ".", "_get_column_types", "(", "data", ")", "for", "f", "in", "unique_items", "(", "preprocessors", "+", "_preprocessors", ")", ":", "data", ",", "headers", "=", "f", "(", "data", ",", "headers", ",", "column_types", "=", "column_types", ",", "*", "*", "fkwargs", ")", "return", "formatter", "(", "list", "(", "data", ")", ",", "headers", ",", "column_types", "=", "column_types", ",", "*", "*", "fkwargs", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter._get_column_types
Get a list of the data types for each column in *data*.
cli_helpers/tabular_output/output_formatter.py
def _get_column_types(self, data): """Get a list of the data types for each column in *data*.""" columns = list(zip_longest(*data)) return [self._get_column_type(column) for column in columns]
def _get_column_types(self, data): """Get a list of the data types for each column in *data*.""" columns = list(zip_longest(*data)) return [self._get_column_type(column) for column in columns]
[ "Get", "a", "list", "of", "the", "data", "types", "for", "each", "column", "in", "*", "data", "*", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L156-L159
[ "def", "_get_column_types", "(", "self", ",", "data", ")", ":", "columns", "=", "list", "(", "zip_longest", "(", "*", "data", ")", ")", "return", "[", "self", ".", "_get_column_type", "(", "column", ")", "for", "column", "in", "columns", "]" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter._get_column_type
Get the most generic data type for iterable *column*.
cli_helpers/tabular_output/output_formatter.py
def _get_column_type(self, column): """Get the most generic data type for iterable *column*.""" type_values = [TYPES[self._get_type(v)] for v in column] inverse_types = {v: k for k, v in TYPES.items()} return inverse_types[max(type_values)]
def _get_column_type(self, column): """Get the most generic data type for iterable *column*.""" type_values = [TYPES[self._get_type(v)] for v in column] inverse_types = {v: k for k, v in TYPES.items()} return inverse_types[max(type_values)]
[ "Get", "the", "most", "generic", "data", "type", "for", "iterable", "*", "column", "*", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L161-L165
[ "def", "_get_column_type", "(", "self", ",", "column", ")", ":", "type_values", "=", "[", "TYPES", "[", "self", ".", "_get_type", "(", "v", ")", "]", "for", "v", "in", "column", "]", "inverse_types", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "TYPES", ".", "items", "(", ")", "}", "return", "inverse_types", "[", "max", "(", "type_values", ")", "]" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
TabularOutputFormatter._get_type
Get the data type for *value*.
cli_helpers/tabular_output/output_formatter.py
def _get_type(self, value): """Get the data type for *value*.""" if value is None: return type(None) elif type(value) in int_types: return int elif type(value) in float_types: return float elif isinstance(value, binary_type): return binary_type else: return text_type
def _get_type(self, value): """Get the data type for *value*.""" if value is None: return type(None) elif type(value) in int_types: return int elif type(value) in float_types: return float elif isinstance(value, binary_type): return binary_type else: return text_type
[ "Get", "the", "data", "type", "for", "*", "value", "*", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/output_formatter.py#L167-L178
[ "def", "_get_type", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "type", "(", "None", ")", "elif", "type", "(", "value", ")", "in", "int_types", ":", "return", "int", "elif", "type", "(", "value", ")", "in", "float_types", ":", "return", "float", "elif", "isinstance", "(", "value", ",", "binary_type", ")", ":", "return", "binary_type", "else", ":", "return", "text_type" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
adapter
Wrap tabulate inside a function for TabularOutputFormatter.
cli_helpers/tabular_output/tabulate_adapter.py
def adapter(data, headers, table_format=None, preserve_whitespace=False, **kwargs): """Wrap tabulate inside a function for TabularOutputFormatter.""" keys = ('floatfmt', 'numalign', 'stralign', 'showindex', 'disable_numparse') tkwargs = {'tablefmt': table_format} tkwargs.update(filter_dict_by_key(kwargs, keys)) if table_format in supported_markup_formats: tkwargs.update(numalign=None, stralign=None) tabulate.PRESERVE_WHITESPACE = preserve_whitespace return iter(tabulate.tabulate(data, headers, **tkwargs).split('\n'))
def adapter(data, headers, table_format=None, preserve_whitespace=False, **kwargs): """Wrap tabulate inside a function for TabularOutputFormatter.""" keys = ('floatfmt', 'numalign', 'stralign', 'showindex', 'disable_numparse') tkwargs = {'tablefmt': table_format} tkwargs.update(filter_dict_by_key(kwargs, keys)) if table_format in supported_markup_formats: tkwargs.update(numalign=None, stralign=None) tabulate.PRESERVE_WHITESPACE = preserve_whitespace return iter(tabulate.tabulate(data, headers, **tkwargs).split('\n'))
[ "Wrap", "tabulate", "inside", "a", "function", "for", "TabularOutputFormatter", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/tabular_output/tabulate_adapter.py#L86-L98
[ "def", "adapter", "(", "data", ",", "headers", ",", "table_format", "=", "None", ",", "preserve_whitespace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "keys", "=", "(", "'floatfmt'", ",", "'numalign'", ",", "'stralign'", ",", "'showindex'", ",", "'disable_numparse'", ")", "tkwargs", "=", "{", "'tablefmt'", ":", "table_format", "}", "tkwargs", ".", "update", "(", "filter_dict_by_key", "(", "kwargs", ",", "keys", ")", ")", "if", "table_format", "in", "supported_markup_formats", ":", "tkwargs", ".", "update", "(", "numalign", "=", "None", ",", "stralign", "=", "None", ")", "tabulate", ".", "PRESERVE_WHITESPACE", "=", "preserve_whitespace", "return", "iter", "(", "tabulate", ".", "tabulate", "(", "data", ",", "headers", ",", "*", "*", "tkwargs", ")", ".", "split", "(", "'\\n'", ")", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
get_user_config_dir
Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\\Users\<user>\AppData\Roaming\Acme\My App`` Windows 7 (not roaming): ``C:\\Users\<user>\AppData\Local\Acme\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems.
cli_helpers/config.py
def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True): """Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\\Users\<user>\AppData\Roaming\Acme\My App`` Windows 7 (not roaming): ``C:\\Users\<user>\AppData\Local\Acme\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: key = 'APPDATA' if roaming else 'LOCALAPPDATA' folder = os.path.expanduser(os.environ.get(key, '~')) return os.path.join(folder, app_author, app_name) if MAC and not force_xdg: return os.path.join(os.path.expanduser( '~/Library/Application Support'), app_name) return os.path.join( os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')), _pathify(app_name))
def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True): """Returns the config folder for the application. The default behavior is to return whatever is most appropriate for the operating system. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``~/Library/Application Support/My App`` Mac OS X (XDG): ``~/.config/my-app`` Unix: ``~/.config/my-app`` Windows 7 (roaming): ``C:\\Users\<user>\AppData\Roaming\Acme\My App`` Windows 7 (not roaming): ``C:\\Users\<user>\AppData\Local\Acme\My App`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param roaming: controls if the folder should be roaming or not on Windows. Has no effect on non-Windows systems. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: key = 'APPDATA' if roaming else 'LOCALAPPDATA' folder = os.path.expanduser(os.environ.get(key, '~')) return os.path.join(folder, app_author, app_name) if MAC and not force_xdg: return os.path.join(os.path.expanduser( '~/Library/Application Support'), app_name) return os.path.join( os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')), _pathify(app_name))
[ "Returns", "the", "config", "folder", "for", "the", "application", ".", "The", "default", "behavior", "is", "to", "return", "whatever", "is", "most", "appropriate", "for", "the", "operating", "system", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L193-L231
[ "def", "get_user_config_dir", "(", "app_name", ",", "app_author", ",", "roaming", "=", "True", ",", "force_xdg", "=", "True", ")", ":", "if", "WIN", ":", "key", "=", "'APPDATA'", "if", "roaming", "else", "'LOCALAPPDATA'", "folder", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "environ", ".", "get", "(", "key", ",", "'~'", ")", ")", "return", "os", ".", "path", ".", "join", "(", "folder", ",", "app_author", ",", "app_name", ")", "if", "MAC", "and", "not", "force_xdg", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~/Library/Application Support'", ")", ",", "app_name", ")", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "environ", ".", "get", "(", "'XDG_CONFIG_HOME'", ",", "'~/.config'", ")", ")", ",", "_pathify", "(", "app_name", ")", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
get_system_config_dirs
r"""Returns a list of system-wide config folders for the application. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``['/Library/Application Support/My App']`` Mac OS X (XDG): ``['/etc/xdg/my-app']`` Unix: ``['/etc/xdg/my-app']`` Windows 7: ``['C:\ProgramData\Acme\My App']`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems.
cli_helpers/config.py
def get_system_config_dirs(app_name, app_author, force_xdg=True): r"""Returns a list of system-wide config folders for the application. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``['/Library/Application Support/My App']`` Mac OS X (XDG): ``['/etc/xdg/my-app']`` Unix: ``['/etc/xdg/my-app']`` Windows 7: ``['C:\ProgramData\Acme\My App']`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: folder = os.environ.get('PROGRAMDATA') return [os.path.join(folder, app_author, app_name)] if MAC and not force_xdg: return [os.path.join('/Library/Application Support', app_name)] dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg') paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)] return [os.path.join(d, _pathify(app_name)) for d in paths]
def get_system_config_dirs(app_name, app_author, force_xdg=True): r"""Returns a list of system-wide config folders for the application. For an example application called ``"My App"`` by ``"Acme"``, something like the following folders could be returned: macOS (non-XDG): ``['/Library/Application Support/My App']`` Mac OS X (XDG): ``['/etc/xdg/my-app']`` Unix: ``['/etc/xdg/my-app']`` Windows 7: ``['C:\ProgramData\Acme\My App']`` :param app_name: the application name. This should be properly capitalized and can contain whitespace. :param app_author: The app author's name (or company). This should be properly capitalized and can contain whitespace. :param force_xdg: if this is set to `True`, then on macOS the XDG Base Directory Specification will be followed. Has no effect on non-macOS systems. """ if WIN: folder = os.environ.get('PROGRAMDATA') return [os.path.join(folder, app_author, app_name)] if MAC and not force_xdg: return [os.path.join('/Library/Application Support', app_name)] dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg') paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)] return [os.path.join(d, _pathify(app_name)) for d in paths]
[ "r", "Returns", "a", "list", "of", "system", "-", "wide", "config", "folders", "for", "the", "application", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L234-L265
[ "def", "get_system_config_dirs", "(", "app_name", ",", "app_author", ",", "force_xdg", "=", "True", ")", ":", "if", "WIN", ":", "folder", "=", "os", ".", "environ", ".", "get", "(", "'PROGRAMDATA'", ")", "return", "[", "os", ".", "path", ".", "join", "(", "folder", ",", "app_author", ",", "app_name", ")", "]", "if", "MAC", "and", "not", "force_xdg", ":", "return", "[", "os", ".", "path", ".", "join", "(", "'/Library/Application Support'", ",", "app_name", ")", "]", "dirs", "=", "os", ".", "environ", ".", "get", "(", "'XDG_CONFIG_DIRS'", ",", "'/etc/xdg'", ")", "paths", "=", "[", "os", ".", "path", ".", "expanduser", "(", "x", ")", "for", "x", "in", "dirs", ".", "split", "(", "os", ".", "pathsep", ")", "]", "return", "[", "os", ".", "path", ".", "join", "(", "d", ",", "_pathify", "(", "app_name", ")", ")", "for", "d", "in", "paths", "]" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
Config.read_default_config
Read the default config file. :raises DefaultConfigValidationError: There was a validation error with the *default* file.
cli_helpers/config.py
def read_default_config(self): """Read the default config file. :raises DefaultConfigValidationError: There was a validation error with the *default* file. """ if self.validate: self.default_config = ConfigObj(configspec=self.default_file, list_values=False, _inspec=True, encoding='utf8') valid = self.default_config.validate(Validator(), copy=True, preserve_errors=True) if valid is not True: for name, section in valid.items(): if section is True: continue for key, value in section.items(): if isinstance(value, ValidateError): raise DefaultConfigValidationError( 'section [{}], key "{}": {}'.format( name, key, value)) elif self.default_file: self.default_config, _ = self.read_config_file(self.default_file) self.update(self.default_config)
def read_default_config(self): """Read the default config file. :raises DefaultConfigValidationError: There was a validation error with the *default* file. """ if self.validate: self.default_config = ConfigObj(configspec=self.default_file, list_values=False, _inspec=True, encoding='utf8') valid = self.default_config.validate(Validator(), copy=True, preserve_errors=True) if valid is not True: for name, section in valid.items(): if section is True: continue for key, value in section.items(): if isinstance(value, ValidateError): raise DefaultConfigValidationError( 'section [{}], key "{}": {}'.format( name, key, value)) elif self.default_file: self.default_config, _ = self.read_config_file(self.default_file) self.update(self.default_config)
[ "Read", "the", "default", "config", "file", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L77-L101
[ "def", "read_default_config", "(", "self", ")", ":", "if", "self", ".", "validate", ":", "self", ".", "default_config", "=", "ConfigObj", "(", "configspec", "=", "self", ".", "default_file", ",", "list_values", "=", "False", ",", "_inspec", "=", "True", ",", "encoding", "=", "'utf8'", ")", "valid", "=", "self", ".", "default_config", ".", "validate", "(", "Validator", "(", ")", ",", "copy", "=", "True", ",", "preserve_errors", "=", "True", ")", "if", "valid", "is", "not", "True", ":", "for", "name", ",", "section", "in", "valid", ".", "items", "(", ")", ":", "if", "section", "is", "True", ":", "continue", "for", "key", ",", "value", "in", "section", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "ValidateError", ")", ":", "raise", "DefaultConfigValidationError", "(", "'section [{}], key \"{}\": {}'", ".", "format", "(", "name", ",", "key", ",", "value", ")", ")", "elif", "self", ".", "default_file", ":", "self", ".", "default_config", ",", "_", "=", "self", ".", "read_config_file", "(", "self", ".", "default_file", ")", "self", ".", "update", "(", "self", ".", "default_config", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
Config.read
Read the default, additional, system, and user config files. :raises DefaultConfigValidationError: There was a validation error with the *default* file.
cli_helpers/config.py
def read(self): """Read the default, additional, system, and user config files. :raises DefaultConfigValidationError: There was a validation error with the *default* file. """ if self.default_file: self.read_default_config() return self.read_config_files(self.all_config_files())
def read(self): """Read the default, additional, system, and user config files. :raises DefaultConfigValidationError: There was a validation error with the *default* file. """ if self.default_file: self.read_default_config() return self.read_config_files(self.all_config_files())
[ "Read", "the", "default", "additional", "system", "and", "user", "config", "files", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L103-L111
[ "def", "read", "(", "self", ")", ":", "if", "self", ".", "default_file", ":", "self", ".", "read_default_config", "(", ")", "return", "self", ".", "read_config_files", "(", "self", ".", "all_config_files", "(", ")", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
Config.user_config_file
Get the absolute path to the user config file.
cli_helpers/config.py
def user_config_file(self): """Get the absolute path to the user config file.""" return os.path.join( get_user_config_dir(self.app_name, self.app_author), self.filename)
def user_config_file(self): """Get the absolute path to the user config file.""" return os.path.join( get_user_config_dir(self.app_name, self.app_author), self.filename)
[ "Get", "the", "absolute", "path", "to", "the", "user", "config", "file", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L113-L117
[ "def", "user_config_file", "(", "self", ")", ":", "return", "os", ".", "path", ".", "join", "(", "get_user_config_dir", "(", "self", ".", "app_name", ",", "self", ".", "app_author", ")", ",", "self", ".", "filename", ")" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
Config.system_config_files
Get a list of absolute paths to the system config files.
cli_helpers/config.py
def system_config_files(self): """Get a list of absolute paths to the system config files.""" return [os.path.join(f, self.filename) for f in get_system_config_dirs( self.app_name, self.app_author)]
def system_config_files(self): """Get a list of absolute paths to the system config files.""" return [os.path.join(f, self.filename) for f in get_system_config_dirs( self.app_name, self.app_author)]
[ "Get", "a", "list", "of", "absolute", "paths", "to", "the", "system", "config", "files", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L119-L122
[ "def", "system_config_files", "(", "self", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "f", ",", "self", ".", "filename", ")", "for", "f", "in", "get_system_config_dirs", "(", "self", ".", "app_name", ",", "self", ".", "app_author", ")", "]" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae
test
Config.additional_files
Get a list of absolute paths to the additional config files.
cli_helpers/config.py
def additional_files(self): """Get a list of absolute paths to the additional config files.""" return [os.path.join(f, self.filename) for f in self.additional_dirs]
def additional_files(self): """Get a list of absolute paths to the additional config files.""" return [os.path.join(f, self.filename) for f in self.additional_dirs]
[ "Get", "a", "list", "of", "absolute", "paths", "to", "the", "additional", "config", "files", "." ]
dbcli/cli_helpers
python
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L124-L126
[ "def", "additional_files", "(", "self", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "f", ",", "self", ".", "filename", ")", "for", "f", "in", "self", ".", "additional_dirs", "]" ]
3ebd891ac0c02bad061182dbcb54a47fb21980ae