INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Format instruction details for inclusion in disassembly output
def _disassemble(self, lineno_width=3, mark_as_current=False): """Format instruction details for inclusion in disassembly output *lineno_width* sets the width of the line number field (0 omits it) *mark_as_current* inserts a '-->' marker arrow as part of the line """ fields = [] # Column: Source code line number if lineno_width: if self.starts_line is not None: lineno_fmt = "%%%dd" % lineno_width fields.append(lineno_fmt % self.starts_line) else: fields.append(' ' * lineno_width) # Column: Current instruction indicator if mark_as_current: fields.append('-->') else: fields.append(' ') # Column: Jump target marker if self.is_jump_target: fields.append('>>') else: fields.append(' ') # Column: Instruction offset from start of code sequence fields.append(repr(self.offset).rjust(4)) # Column: Opcode name fields.append(self.opname.ljust(20)) # Column: Opcode argument if self.arg is not None: fields.append(repr(self.arg).rjust(5)) # Column: Opcode argument details if self.argrepr: fields.append('(' + self.argrepr + ')') return ' '.join(fields).rstrip()
Returns intersection of two lists. Assumes the lists are sorted by start positions
def intersection(l1, l2): '''Returns intersection of two lists. Assumes the lists are sorted by start positions''' if len(l1) == 0 or len(l2) == 0: return [] out = [] l2_pos = 0 for l in l1: while l2_pos < len(l2) and l2[l2_pos].end < l.start: l2_pos += 1 if l2_pos == len(l2): break while l2_pos < len(l2) and l.intersects(l2[l2_pos]): out.append(l.intersection(l2[l2_pos])) l2_pos += 1 l2_pos = max(0, l2_pos - 1) return out
Sorts list merges any overlapping intervals and also adjacent intervals. e. g. [ 0 1 ] [ 1 2 ] would be merge to [ 0. 2 ].
def merge_overlapping_in_list(l): '''Sorts list, merges any overlapping intervals, and also adjacent intervals. e.g. [0,1], [1,2] would be merge to [0,.2].''' i = 0 l.sort() while i < len(l) - 1: u = l[i].union(l[i+1]) if u is not None: l[i] = u l.pop(i+1) else: i += 1
Sorts list in place then removes any intervals that are completely contained inside another interval
def remove_contained_in_list(l): '''Sorts list in place, then removes any intervals that are completely contained inside another interval''' i = 0 l.sort() while i < len(l) - 1: if l[i+1].contains(l[i]): l.pop(i) elif l[i].contains(l[i+1]): l.pop(i+1) else: i += 1
Returns the distance from the point to the interval. Zero if the point lies inside the interval.
def distance_to_point(self, p): '''Returns the distance from the point to the interval. Zero if the point lies inside the interval.''' if self.start <= p <= self.end: return 0 else: return min(abs(self.start - p), abs(self.end - p))
Returns true iff this interval intersects the interval i
def intersects(self, i): '''Returns true iff this interval intersects the interval i''' return self.start <= i.end and i.start <= self.end
Returns true iff this interval contains the interval i
def contains(self, i): '''Returns true iff this interval contains the interval i''' return self.start <= i.start and i.end <= self.end
If intervals intersect returns their union otherwise returns None
def union(self, i): '''If intervals intersect, returns their union, otherwise returns None''' if self.intersects(i) or self.end + 1 == i.start or i.end + 1 == self.start: return Interval(min(self.start, i.start), max(self.end, i.end)) else: return None
Like union but ignores whether the two intervals intersect or not
def union_fill_gap(self, i): '''Like union, but ignores whether the two intervals intersect or not''' return Interval(min(self.start, i.start), max(self.end, i.end))
If intervals intersect returns their intersection otherwise returns None
def intersection(self, i): '''If intervals intersect, returns their intersection, otherwise returns None''' if self.intersects(i): return Interval(max(self.start, i.start), min(self.end, i.end)) else: return None
Iterates over a FASTA or FASTQ file yielding the next sequence in the file until there are no more sequences
def file_reader(fname, read_quals=False): '''Iterates over a FASTA or FASTQ file, yielding the next sequence in the file until there are no more sequences''' f = utils.open_file_read(fname) line = f.readline() phylip_regex = re.compile('^\s*[0-9]+\s+[0-9]+$') gbk_regex = re.compile('^LOCUS\s+\S') if line.startswith('>'): seq = Fasta() previous_lines[f] = line elif line.startswith('##gff-version 3'): seq = Fasta() # if a GFF file, need to skip past all the annotation # and get to the fasta sequences at the end of the file while not line.startswith('>'): line = f.readline() if not line: utils.close(f) raise Error('No sequences found in GFF file "' + fname + '"') seq = Fasta() previous_lines[f] = line elif line.startswith('ID ') and line[5] != ' ': seq = Embl() previous_lines[f] = line elif gbk_regex.search(line): seq = Embl() previous_lines[f] = line elif line.startswith('@'): seq = Fastq() previous_lines[f] = line elif phylip_regex.search(line): # phylip format could be interleaved or not, need to look at next # couple of lines to figure that out. Don't expect these files to # be too huge, so just store all the sequences in memory number_of_seqs, bases_per_seq = line.strip().split() number_of_seqs = int(number_of_seqs) bases_per_seq = int(bases_per_seq) got_blank_line = False first_line = line seq_lines = [] while 1: line = f.readline() if line == '': break elif line == '\n': got_blank_line = True else: seq_lines.append(line.rstrip()) utils.close(f) if len(seq_lines) == 1 or len(seq_lines) == number_of_seqs: sequential = True elif seq_lines[0][10] != ' ' and seq_lines[1][10] == ' ': sequential = True else: sequential = False # if the 11th char of second sequence line is a space, then the file is sequential, e.g.: # GAGCCCGGGC AATACAGGGT AT # as opposed to: # Salmo gairAAGCCTTGGC AGTGCAGGGT if sequential: current_id = None current_seq = '' for line in seq_lines: if len(current_seq) == bases_per_seq or len(current_seq) == 0: if current_id is not None: yield Fasta(current_id, current_seq.replace('-', '')) current_seq = '' current_id, new_bases = line[0:10].rstrip(), line.rstrip()[10:] else: new_bases = line.rstrip() current_seq += new_bases.replace(' ','') yield Fasta(current_id, current_seq.replace('-', '')) else: # seaview files start all seqs at pos >=12. Other files start # their sequence at the start of the line if seq_lines[number_of_seqs + 1][0] == ' ': first_gap_pos = seq_lines[0].find(' ') end_of_gap = first_gap_pos while seq_lines[0][end_of_gap] == ' ': end_of_gap += 1 first_seq_base = end_of_gap else: first_seq_base = 10 seqs = [] for i in range(number_of_seqs): name, bases = seq_lines[i][0:first_seq_base].rstrip(), seq_lines[i][first_seq_base:] seqs.append(Fasta(name, bases)) for i in range(number_of_seqs, len(seq_lines)): seqs[i%number_of_seqs].seq += seq_lines[i] for fa in seqs: fa.seq = fa.seq.replace(' ','').replace('-','') yield fa return elif line == '': utils.close(f) return else: utils.close(f) raise Error('Error determining file type from file "' + fname + '". First line is:\n' + line.rstrip()) try: while seq.get_next_from_file(f, read_quals): yield seq finally: utils.close(f)
Returns Fasta object with the same name of the bases from start to end but not including end
def subseq(self, start, end): '''Returns Fasta object with the same name, of the bases from start to end, but not including end''' return Fasta(self.id, self.seq[start:end])
Gets the prefix and suffix of an name of a capillary read e. g. xxxxx. p1k or xxxx. q1k. Returns a tuple ( prefix suffx )
def split_capillary_id(self): '''Gets the prefix and suffix of an name of a capillary read, e.g. xxxxx.p1k or xxxx.q1k. Returns a tuple (prefix, suffx)''' try: a = self.id.rsplit('.', 1) if a[1].startswith('p'): dir = 'fwd' elif a[1].startswith('q'): dir = 'rev' else: dir = 'unk' return {'prefix': a[0], 'dir': dir, 'suffix':a[1]} except: raise Error('Error in split_capillary_id() on ID', self.id)
Assumes sequence is nucleotides. Returns list of all combinations of redundant nucleotides. e. g. R is A or G so CRT would have combinations CAT and CGT
def expand_nucleotides(self): '''Assumes sequence is nucleotides. Returns list of all combinations of redundant nucleotides. e.g. R is A or G, so CRT would have combinations CAT and CGT''' s = list(self.seq) for i in range(len(s)): if s[i] in redundant_nts: s[i] = ''.join(redundant_nts[s[i]]) seqs = [] for x in itertools.product(*s): seqs.append(Fasta(self.id + '.' + str(len(seqs) + 1), ''.join(x))) return seqs
Removes any trailing/ 1 or/ 2 off the end of the name
def strip_illumina_suffix(self): '''Removes any trailing /1 or /2 off the end of the name''' if self.id.endswith('/1') or self.id.endswith('/2'): self.id = self.id[:-2]
Returns true if the sequence is all Ns ( upper or lower case )
def is_all_Ns(self, start=0, end=None): '''Returns true if the sequence is all Ns (upper or lower case)''' if end is not None: if start > end: raise Error('Error in is_all_Ns. Start coord must be <= end coord') end += 1 else: end = len(self) if len(self) == 0: return False else: return re.search('[^Nn]', self.seq[start:end]) is None
Adds a random base within window bases around every skip bases. e. g. skip = 10 window = 1 means a random base added somwhere in theintervals [ 9 11 ] [ 19 21 ]...
def add_insertions(self, skip=10, window=1, test=False): '''Adds a random base within window bases around every skip bases. e.g. skip=10, window=1 means a random base added somwhere in theintervals [9,11], [19,21] ... ''' assert 2 * window < skip new_seq = list(self.seq) for i in range(len(self) - skip, 0, -skip): pos = random.randrange(i - window, i + window + 1) base = random.choice(['A', 'C', 'G', 'T']) if test: base = 'N' new_seq.insert(pos, base) self.seq = ''.join(new_seq)
Replaces all occurrences of old with new
def replace_bases(self, old, new): '''Replaces all occurrences of 'old' with 'new' ''' self.seq = self.seq.replace(old, new)
Replaces the sequence from start to end with the sequence new
def replace_interval(self, start, end, new): '''Replaces the sequence from start to end with the sequence "new"''' if start > end or start > len(self) - 1 or end > len(self) - 1: raise Error('Error replacing bases ' + str(start) + '-' + str(end) + ' in sequence ' + self.id) self.seq = self.seq[0:start] + new + self.seq[end + 1:]
Finds the positions of all gaps in the sequence that are at least min_length long. Returns a list of Intervals. Coords are zero - based
def gaps(self, min_length = 1): '''Finds the positions of all gaps in the sequence that are at least min_length long. Returns a list of Intervals. Coords are zero-based''' gaps = [] regex = re.compile('N+', re.IGNORECASE) for m in regex.finditer(self.seq): if m.span()[1] - m.span()[0] + 1 >= min_length: gaps.append(intervals.Interval(m.span()[0], m.span()[1] - 1)) return gaps
Finds coords of contigs i. e. everything that s not a gap ( N or n ). Returns a list of Intervals. Coords are zero - based
def contig_coords(self): '''Finds coords of contigs, i.e. everything that's not a gap (N or n). Returns a list of Intervals. Coords are zero-based''' # contigs are the opposite of gaps, so work out the coords from the gap coords gaps = self.gaps() if len(gaps) == 0: return [intervals.Interval(0, len(self) - 1)] coords = [0] for g in gaps: if g.start == 0: coords = [g.end + 1] else: coords += [g.start - 1, g.end + 1] if coords[-1] < len(self): coords.append(len(self) - 1) return [intervals.Interval(coords[i], coords[i+1]) for i in range(0, len(coords)-1,2)]
Returns a list of ORFs that the sequence has starting on the given frame. Each returned ORF is an interval. Interval object. If revomp = True then finds the ORFs of the reverse complement of the sequence.
def orfs(self, frame=0, revcomp=False): '''Returns a list of ORFs that the sequence has, starting on the given frame. Each returned ORF is an interval.Interval object. If revomp=True, then finds the ORFs of the reverse complement of the sequence.''' assert frame in [0,1,2] if revcomp: self.revcomp() aa_seq = self.translate(frame=frame).seq.rstrip('X') if revcomp: self.revcomp() orfs = _orfs_from_aa_seq(aa_seq) for i in range(len(orfs)): if revcomp: start = len(self) - (orfs[i].end * 3 + 3) - frame end = len(self) - (orfs[i].start * 3) - 1 - frame else: start = orfs[i].start * 3 + frame end = orfs[i].end * 3 + 2 + frame orfs[i] = intervals.Interval(start, end) return orfs
Finds all open reading frames in the sequence that are at least as long as min_length. Includes ORFs on the reverse strand. Returns a list of ORFs where each element is a tuple: ( interval. Interval bool ) where bool = True means on the reverse strand
def all_orfs(self, min_length=300): '''Finds all open reading frames in the sequence, that are at least as long as min_length. Includes ORFs on the reverse strand. Returns a list of ORFs, where each element is a tuple: (interval.Interval, bool) where bool=True means on the reverse strand''' orfs = [] for frame in [0,1,2]: for revcomp in [False, True]: orfs.extend([(t, revcomp) for t in self.orfs(frame=frame, revcomp=revcomp) if len(t)>=min_length]) return sorted(orfs, key=lambda t:t[0])
Returns true iff length is > = 6 is a multiple of 3 and there is exactly one stop codon in the sequence and it is at the end
def is_complete_orf(self): '''Returns true iff length is >= 6, is a multiple of 3, and there is exactly one stop codon in the sequence and it is at the end''' if len(self) %3 != 0 or len(self) < 6: return False orfs = self.orfs() complete_orf = intervals.Interval(0, len(self) - 1) for orf in orfs: if orf == complete_orf: return True return False
Returns true iff: length > = 6 length is a multiple of 3 first codon is start last codon is a stop and has no other stop codons
def looks_like_gene(self): '''Returns true iff: length >=6, length is a multiple of 3, first codon is start, last codon is a stop and has no other stop codons''' return self.is_complete_orf() \ and len(self) >= 6 \ and len(self) %3 == 0 \ and self.seq[0:3].upper() in genetic_codes.starts[genetic_code]
Tries to make into a gene sequence. Tries all three reading frames and both strands. Returns a tuple ( new sequence strand frame ) if it was successful. Otherwise returns None.
def make_into_gene(self): '''Tries to make into a gene sequence. Tries all three reading frames and both strands. Returns a tuple (new sequence, strand, frame) if it was successful. Otherwise returns None.''' for reverse in [True, False]: for frame in range(3): new_seq = copy.copy(self) if reverse: new_seq.revcomp() new_seq.seq = new_seq[frame:] if len(new_seq) % 3: new_seq.seq = new_seq.seq[:-(len(new_seq) % 3)] new_aa_seq = new_seq.translate() if len(new_aa_seq) >= 2 and new_seq[0:3] in genetic_codes.starts[genetic_code] and new_aa_seq[-1] == '*' and '*' not in new_aa_seq[:-1]: strand = '-' if reverse else '+' return new_seq, strand, frame return None
Removes first start/ end bases off the start/ end of the sequence
def trim(self, start, end): '''Removes first 'start'/'end' bases off the start/end of the sequence''' self.seq = self.seq[start:len(self.seq) - end]
Returns a Fastq object. qual_scores expected to be a list of numbers like you would get in a. qual file
def to_Fastq(self, qual_scores): '''Returns a Fastq object. qual_scores expected to be a list of numbers, like you would get in a .qual file''' if len(self) != len(qual_scores): raise Error('Error making Fastq from Fasta, lengths differ.', self.id) return Fastq(self.id, self.seq, ''.join([chr(max(0, min(x, 93)) + 33) for x in qual_scores]))
Finds every occurrence ( including overlapping ones ) of the search_string including on the reverse strand. Returns a list where each element is a tuple ( position strand ) where strand is in [ - + ]. Positions are zero - based
def search(self, search_string): '''Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based''' seq = self.seq.upper() search_string = search_string.upper() pos = 0 found = seq.find(search_string, pos) hits = [] while found != -1: hits.append((found, '+')) pos = found + 1 found = seq.find(search_string, pos) pos = 0 search_string = Fasta('x', search_string) search_string.revcomp() search_string = search_string.seq found = seq.find(search_string, pos) while found != -1: hits.append((found, '-')) pos = found + 1 found = seq.find(search_string, pos) return hits
Returns a Fasta sequence translated into amino acids. Starts translating from frame where frame expected to be 0 1 or 2
def translate(self, frame=0): '''Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2''' return Fasta(self.id, ''.join([genetic_codes.codes[genetic_code].get(self.seq[x:x+3].upper(), 'X') for x in range(frame, len(self)-1-frame, 3)]))
Returns the GC content for the sequence. Notes: This method ignores N when calculating the length of the sequence. It does not however ignore other ambiguous bases. It also only includes the ambiguous base S ( G or C ). In this sense the method is conservative with its calculation.
def gc_content(self, as_decimal=True): """Returns the GC content for the sequence. Notes: This method ignores N when calculating the length of the sequence. It does not, however ignore other ambiguous bases. It also only includes the ambiguous base S (G or C). In this sense the method is conservative with its calculation. Args: as_decimal (bool): Return the result as a decimal. Setting to False will return as a percentage. i.e for the sequence GCAT it will return 0.5 by default and 50.00 if set to False. Returns: float: GC content calculated as the number of G, C, and S divided by the number of (non-N) bases (length). """ gc_total = 0.0 num_bases = 0.0 n_tuple = tuple('nN') accepted_bases = tuple('cCgGsS') # counter sums all unique characters in sequence. Case insensitive. for base, count in Counter(self.seq).items(): # dont count N in the number of bases if base not in n_tuple: num_bases += count if base in accepted_bases: # S is a G or C gc_total += count gc_content = gc_total / num_bases if not as_decimal: # return as percentage gc_content *= 100 return gc_content
Returns Fastq object with the same name of the bases from start to end but not including end
def subseq(self, start, end): '''Returns Fastq object with the same name, of the bases from start to end, but not including end''' return Fastq(self.id, self.seq[start:end], self.qual[start:end])
Removes first start/ end bases off the start/ end of the sequence
def trim(self, start, end): '''Removes first 'start'/'end' bases off the start/end of the sequence''' super().trim(start, end) self.qual = self.qual[start:len(self.qual) - end]
Removes any leading or trailing N or n characters from the sequence
def trim_Ns(self): '''Removes any leading or trailing N or n characters from the sequence''' # get index of first base that is not an N i = 0 while i < len(self) and self.seq[i] in 'nN': i += 1 # strip off start of sequence and quality self.seq = self.seq[i:] self.qual = self.qual[i:] # strip the ends self.seq = self.seq.rstrip('Nn') self.qual = self.qual[:len(self.seq)]
Replaces the sequence from start to end with the sequence new
def replace_interval(self, start, end, new, qual_string): '''Replaces the sequence from start to end with the sequence "new"''' if len(new) != len(qual_string): raise Error('Length of new seq and qual string in replace_interval() must be equal. Cannot continue') super().replace_interval(start, end, new) self.qual = self.qual[0:start] + qual_string + self.qual[end + 1:]
Returns a Fasta sequence translated into amino acids. Starts translating from frame where frame expected to be 0 1 or 2
def translate(self): '''Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2''' fa = super().translate() return Fastq(fa.id, fa.seq, 'I'*len(fa.seq))
Replace every non - acgtn ( case insensitve ) character with an N
def acgtn_only(infile, outfile): '''Replace every non-acgtn (case insensitve) character with an N''' f = utils.open_file_write(outfile) for seq in sequences.file_reader(infile): seq.replace_non_acgt() print(seq, file=f) utils.close(f)
Convert a CAF file to fastq. Reads shorter than min_length are not output. If clipping information is in the CAF file ( with a line Clipping QUAL... ) and trim = True then trim the reads
def caf_to_fastq(infile, outfile, min_length=0, trim=False): '''Convert a CAF file to fastq. Reads shorter than min_length are not output. If clipping information is in the CAF file (with a line Clipping QUAL ...) and trim=True, then trim the reads''' caf_reader = caf.file_reader(infile) fout = utils.open_file_write(outfile) for c in caf_reader: if trim: if c.clip_start is not None and c.clip_end is not None: c.seq.seq = c.seq.seq[c.clip_start:c.clip_end + 1] c.seq.qual = c.seq.qual[c.clip_start:c.clip_end + 1] else: print('Warning: no clipping info for sequence', c.id, file=sys.stderr) if len(c.seq) >= min_length: print(c.seq, file=fout) utils.close(fout)
Returns the number of sequences in a file
def count_sequences(infile): '''Returns the number of sequences in a file''' seq_reader = sequences.file_reader(infile) n = 0 for seq in seq_reader: n += 1 return n
Makes interleaved file from two sequence files. If used will append suffix1 onto end of every sequence name in infile_1 unless it already ends with suffix1. Similar for sufffix2.
def interleave(infile_1, infile_2, outfile, suffix1=None, suffix2=None): '''Makes interleaved file from two sequence files. If used, will append suffix1 onto end of every sequence name in infile_1, unless it already ends with suffix1. Similar for sufffix2.''' seq_reader_1 = sequences.file_reader(infile_1) seq_reader_2 = sequences.file_reader(infile_2) f_out = utils.open_file_write(outfile) for seq_1 in seq_reader_1: try: seq_2 = next(seq_reader_2) except: utils.close(f_out) raise Error('Error getting mate for sequence', seq_1.id, ' ... cannot continue') if suffix1 is not None and not seq_1.id.endswith(suffix1): seq_1.id += suffix1 if suffix2 is not None and not seq_2.id.endswith(suffix2): seq_2.id += suffix2 print(seq_1, file=f_out) print(seq_2, file=f_out) try: seq_2 = next(seq_reader_2) except: seq_2 = None if seq_2 is not None: utils.close(f_out) raise Error('Error getting mate for sequence', seq_2.id, ' ... cannot continue') utils.close(f_out)
Makes a multi fasta file of random sequences all the same length
def make_random_contigs(contigs, length, outfile, name_by_letters=False, prefix='', seed=None, first_number=1): '''Makes a multi fasta file of random sequences, all the same length''' random.seed(a=seed) fout = utils.open_file_write(outfile) letters = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ') letters_index = 0 for i in range(contigs): if name_by_letters: name = letters[letters_index] letters_index += 1 if letters_index == len(letters): letters_index = 0 else: name = str(i + first_number) fa = sequences.Fasta(prefix + name, ''.join([random.choice('ACGT') for x in range(length)])) print(fa, file=fout) utils.close(fout)
Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences use limit = N
def mean_length(infile, limit=None): '''Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N''' total = 0 count = 0 seq_reader = sequences.file_reader(infile) for seq in seq_reader: total += len(seq) count += 1 if limit is not None and count >= limit: break assert count > 0 return total / count
Takes a multi fasta or fastq file and writes a new file that contains just one sequence with the original sequences catted together preserving their order
def merge_to_one_seq(infile, outfile, seqname='union'): '''Takes a multi fasta or fastq file and writes a new file that contains just one sequence, with the original sequences catted together, preserving their order''' seq_reader = sequences.file_reader(infile) seqs = [] for seq in seq_reader: seqs.append(copy.copy(seq)) new_seq = ''.join([seq.seq for seq in seqs]) if type(seqs[0]) == sequences.Fastq: new_qual = ''.join([seq.qual for seq in seqs]) seqs[:] = [] merged = sequences.Fastq(seqname, new_seq, new_qual) else: merged = sequences.Fasta(seqname, new_seq) seqs[:] = [] f = utils.open_file_write(outfile) print(merged, file=f) utils.close(f)
Makes a file of contigs from scaffolds by splitting at every N. Use number_contigs = True to add. 1. 2 etc onto end of each contig instead of default to append coordinates.
def scaffolds_to_contigs(infile, outfile, number_contigs=False): '''Makes a file of contigs from scaffolds by splitting at every N. Use number_contigs=True to add .1, .2, etc onto end of each contig, instead of default to append coordinates.''' seq_reader = sequences.file_reader(infile) fout = utils.open_file_write(outfile) for seq in seq_reader: contigs = seq.contig_coords() counter = 1 for contig in contigs: if number_contigs: name = seq.id + '.' + str(counter) counter += 1 else: name = '.'.join([seq.id, str(contig.start + 1), str(contig.end + 1)]) print(sequences.Fasta(name, seq[contig.start:contig.end+1]), file=fout) utils.close(fout)
Sorts input sequence file by biggest sequence first writes sorted output file. Set smallest_first = True to have smallest first
def sort_by_size(infile, outfile, smallest_first=False): '''Sorts input sequence file by biggest sequence first, writes sorted output file. Set smallest_first=True to have smallest first''' seqs = {} file_to_dict(infile, seqs) seqs = list(seqs.values()) seqs.sort(key=lambda x: len(x), reverse=not smallest_first) fout = utils.open_file_write(outfile) for seq in seqs: print(seq, file=fout) utils.close(fout)
Sorts input sequence file by sort - d - k1 1 writes sorted output file.
def sort_by_name(infile, outfile): '''Sorts input sequence file by sort -d -k1,1, writes sorted output file.''' seqs = {} file_to_dict(infile, seqs) #seqs = list(seqs.values()) #seqs.sort() fout = utils.open_file_write(outfile) for name in sorted(seqs): print(seqs[name], file=fout) utils.close(fout)
Writes a FASTG file in SPAdes format from input file. Currently only whether or not a sequence is circular is supported. Put circular = set of ids or circular = filename to make those sequences circular in the output. Puts coverage = 1 on all contigs
def to_fastg(infile, outfile, circular=None): '''Writes a FASTG file in SPAdes format from input file. Currently only whether or not a sequence is circular is supported. Put circular=set of ids, or circular=filename to make those sequences circular in the output. Puts coverage=1 on all contigs''' if circular is None: to_circularise = set() elif type(circular) is not set: f = utils.open_file_read(circular) to_circularise = set([x.rstrip() for x in f.readlines()]) utils.close(f) else: to_circularise = circular seq_reader = sequences.file_reader(infile) fout = utils.open_file_write(outfile) nodes = 1 for seq in seq_reader: new_id = '_'.join([ 'NODE', str(nodes), 'length', str(len(seq)), 'cov', '1', 'ID', seq.id ]) if seq.id in to_circularise: seq.id = new_id + ':' + new_id + ';' print(seq, file=fout) seq.revcomp() seq.id = new_id + "':" + new_id + "';" print(seq, file=fout) else: seq.id = new_id + ';' print(seq, file=fout) seq.revcomp() seq.id = new_id + "';" print(seq, file=fout) nodes += 1 utils.close(fout)
Returns a dictionary of positions of the start of each sequence as if all the sequences were catted into one sequence. eg if file has three sequences seq1 10bp seq2 30bp seq3 20bp then the output would be: { seq1: 0 seq2: 10 seq3: 40 }
def length_offsets_from_fai(fai_file): '''Returns a dictionary of positions of the start of each sequence, as if all the sequences were catted into one sequence. eg if file has three sequences, seq1 10bp, seq2 30bp, seq3 20bp, then the output would be: {'seq1': 0, 'seq2': 10, 'seq3': 40}''' positions = {} total_length = 0 f = utils.open_file_read(fai_file) for line in f: try: (name, length) = line.rstrip().split()[:2] length = int(length) except: raise Error('Error reading the following line of fai file ' + fai_file + '\n' + line) positions[name] = total_length total_length += length utils.close(f) return positions
Splits a fasta/ q file into separate files file size determined by number of bases.
def split_by_base_count(infile, outfiles_prefix, max_bases, max_seqs=None): '''Splits a fasta/q file into separate files, file size determined by number of bases. Puts <= max_bases in each split file The exception is a single sequence >=max_bases is put in its own file. This does not split sequences. ''' seq_reader = sequences.file_reader(infile) base_count = 0 file_count = 1 seq_count = 0 fout = None if max_seqs is None: max_seqs = float('inf') for seq in seq_reader: if base_count == 0: fout = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) file_count += 1 if base_count + len(seq) > max_bases or seq_count >= max_seqs: if base_count == 0: print(seq, file=fout) utils.close(fout) else: utils.close(fout) fout = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) print(seq, file=fout) base_count = len(seq) file_count += 1 seq_count = 1 else: base_count += len(seq) seq_count += 1 print(seq, file=fout) utils.close(fout)
Splits fasta/ q file into separate files with up to ( chunk_size + tolerance ) bases in each file
def split_by_fixed_size(infile, outfiles_prefix, chunk_size, tolerance, skip_if_all_Ns=False): '''Splits fasta/q file into separate files, with up to (chunk_size + tolerance) bases in each file''' file_count = 1 coords = [] small_sequences = [] # sequences shorter than chunk_size seq_reader = sequences.file_reader(infile) f_coords = utils.open_file_write(outfiles_prefix + '.coords') for seq in seq_reader: if skip_if_all_Ns and seq.is_all_Ns(): continue if len(seq) < chunk_size: small_sequences.append(copy.copy(seq)) elif len(seq) <= chunk_size + tolerance: f = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) print(seq, file=f) utils.close(f) file_count += 1 else: # make list of chunk coords chunks = [(x,x+chunk_size) for x in range(0, len(seq), chunk_size)] if chunks[-1][1] - 1 > len(seq): chunks[-1] = (chunks[-1][0], len(seq)) if len(chunks) > 1 and (chunks[-1][1] - chunks[-1][0]) <= tolerance: chunks[-2] = (chunks[-2][0], chunks[-1][1]) chunks.pop() # write one output file per chunk offset = 0 for chunk in chunks: if not(skip_if_all_Ns and seq.is_all_Ns(start=chunk[0], end=chunk[1]-1)): f = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) chunk_id = seq.id + ':' + str(chunk[0]+1) + '-' + str(chunk[1]) print(sequences.Fasta(chunk_id, seq[chunk[0]:chunk[1]]), file=f) print(chunk_id, seq.id, offset, sep='\t', file=f_coords) utils.close(f) file_count += 1 offset += chunk[1] - chunk[0] # write files of small sequences if len(small_sequences): f = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) file_count += 1 base_count = 0 for seq in small_sequences: if base_count > 0 and base_count + len(seq) > chunk_size + tolerance: utils.close(f) f = utils.open_file_write(outfiles_prefix + '.' + str(file_count)) file_count += 1 base_count = 0 print(seq, file=f) base_count += len(seq) utils.close(f)
Splits each sequence in infile into chunks of fixed size last chunk can be up to ( chunk_size + tolerance ) in length
def split_by_fixed_size_onefile(infile, outfile, chunk_size, tolerance, skip_if_all_Ns=False): '''Splits each sequence in infile into chunks of fixed size, last chunk can be up to (chunk_size + tolerance) in length''' seq_reader = sequences.file_reader(infile) f_out = utils.open_file_write(outfile) for seq in seq_reader: for i in range(0, len(seq), chunk_size): if i + chunk_size + tolerance >= len(seq): end = len(seq) else: end = i + chunk_size subseq = seq.subseq(i, end) if not (skip_if_all_Ns and subseq.is_all_Ns()): subseq.id += '.' + str(i+1) + '_' + str(end) print(subseq, file=f_out) if end == len(seq): break utils.close(f_out)
Returns dictionary of length stats from an fai file. Keys are: longest shortest mean total_length N50 number
def stats_from_fai(infile): '''Returns dictionary of length stats from an fai file. Keys are: longest, shortest, mean, total_length, N50, number''' f = utils.open_file_read(infile) try: lengths = sorted([int(line.split('\t')[1]) for line in f], reverse=True) except: raise Error('Error getting lengths from fai file ' + infile) utils.close(f) stats = {} if len(lengths) > 0: stats['longest'] = max(lengths) stats['shortest'] = min(lengths) stats['total_length'] = sum(lengths) stats['mean'] = stats['total_length'] / len(lengths) stats['number'] = len(lengths) cumulative_length = 0 for length in lengths: cumulative_length += length if cumulative_length >= 0.5 * stats['total_length']: stats['N50'] = length break else: stats = {x: 0 for x in ('longest', 'shortest', 'mean', 'N50', 'total_length', 'number')} return stats
Converts input sequence file into a Boulder - IO format as used by primer3
def to_boulderio(infile, outfile): '''Converts input sequence file into a "Boulder-IO format", as used by primer3''' seq_reader = sequences.file_reader(infile) f_out = utils.open_file_write(outfile) for sequence in seq_reader: print("SEQUENCE_ID=" + sequence.id, file=f_out) print("SEQUENCE_TEMPLATE=" + sequence.seq, file=f_out) print("=", file=f_out) utils.close(f_out)
Returns the HMAC - HASH of value using a key generated from key_salt and a secret ( which defaults to settings. SECRET_KEY ).
def salted_hmac(key_salt, value, secret=None): """ Returns the HMAC-HASH of 'value', using a key generated from key_salt and a secret (which defaults to settings.SECRET_KEY). A different key_salt should be passed in for every application of HMAC. :type key_salt: any :type value: any :type secret: any :rtype: HMAC """ if secret is None: secret = settings.SECRET_KEY key_salt = force_bytes(key_salt) secret = force_bytes(secret) # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function and # SHA1 works nicely. digest = hashes.Hash( settings.CRYPTOGRAPHY_DIGEST, backend=settings.CRYPTOGRAPHY_BACKEND) digest.update(key_salt + secret) key = digest.finalize() # If len(key_salt + secret) > sha_constructor().block_size, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. h = HMAC( key, settings.CRYPTOGRAPHY_DIGEST, backend=settings.CRYPTOGRAPHY_BACKEND) h.update(force_bytes(value)) return h
Implements PBKDF2 with the same API as Django s existing implementation using cryptography.
def pbkdf2(password, salt, iterations, dklen=0, digest=None): """ Implements PBKDF2 with the same API as Django's existing implementation, using cryptography. :type password: any :type salt: any :type iterations: int :type dklen: int :type digest: cryptography.hazmat.primitives.hashes.HashAlgorithm """ if digest is None: digest = settings.CRYPTOGRAPHY_DIGEST if not dklen: dklen = digest.digest_size password = force_bytes(password) salt = force_bytes(salt) kdf = PBKDF2HMAC( algorithm=digest, length=dklen, salt=salt, iterations=iterations, backend=settings.CRYPTOGRAPHY_BACKEND) return kdf.derive(password)
: type data: any: rtype: any
def encrypt(self, data): """ :type data: any :rtype: any """ data = force_bytes(data) iv = os.urandom(16) return self._encrypt_from_parts(data, iv)
: type data: bytes: type iv: bytes: rtype: any
def _encrypt_from_parts(self, data, iv): """ :type data: bytes :type iv: bytes :rtype: any """ padder = padding.PKCS7(algorithms.AES.block_size).padder() padded_data = padder.update(data) + padder.finalize() encryptor = Cipher( algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend).encryptor() ciphertext = encryptor.update(padded_data) + encryptor.finalize() return self._signer.sign(iv + ciphertext)
: type data: bytes: type ttl: int: rtype: bytes
def decrypt(self, data, ttl=None): """ :type data: bytes :type ttl: int :rtype: bytes """ data = self._signer.unsign(data, ttl) iv = data[:16] ciphertext = data[16:] decryptor = Cipher( algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend).decryptor() plaintext_padded = decryptor.update(ciphertext) try: plaintext_padded += decryptor.finalize() except ValueError: raise InvalidToken # Remove padding unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() unpadded = unpadder.update(plaintext_padded) try: unpadded += unpadder.finalize() except ValueError: raise InvalidToken return unpadded
A get or create method for encrypted fields we cache the field in the module to avoid recreation. This also allows us to always return the same class reference for a field.
def get_encrypted_field(base_class): """ A get or create method for encrypted fields, we cache the field in the module to avoid recreation. This also allows us to always return the same class reference for a field. :type base_class: models.Field[T] :rtype: models.Field[EncryptedMixin, T] """ assert not isinstance(base_class, models.Field) field_name = 'Encrypted' + base_class.__name__ if base_class not in FIELD_CACHE: FIELD_CACHE[base_class] = type(field_name, (EncryptedMixin, base_class), { 'base_class': base_class, }) return FIELD_CACHE[base_class]
A decorator for creating encrypted model fields.
def encrypt(base_field, key=None, ttl=None): """ A decorator for creating encrypted model fields. :type base_field: models.Field[T] :param bytes key: This is an optional argument. Allows for specifying an instance specific encryption key. :param int ttl: This is an optional argument. The amount of time in seconds that a value can be stored for. If the time to live of the data has passed, it will become unreadable. The expired value will return an :class:`Expired` object. :rtype: models.Field[EncryptedMixin, T] """ if not isinstance(base_field, models.Field): assert key is None assert ttl is None return get_encrypted_field(base_field) name, path, args, kwargs = base_field.deconstruct() kwargs.update({'key': key, 'ttl': ttl}) return get_encrypted_field(base_field.__class__)(*args, **kwargs)
Pickled data is serialized as base64
def value_to_string(self, obj): """Pickled data is serialized as base64""" value = self.value_from_object(obj) return b64encode(self._dump(value)).decode('ascii')
Returns URL - safe sha1 signed base64 compressed JSON string. If key is None settings. SECRET_KEY is used instead.
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False): """ Returns URL-safe, sha1 signed base64 compressed JSON string. If key is None, settings.SECRET_KEY is used instead. If compress is True (not the default) checks if compressing using zlib can save some space. Prepends a '.' to signify compression. This is included in the signature, to protect against zip bombs. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application without good cause is a security risk. The serializer is expected to return a bytestring. """ data = serializer().dumps(obj) # Flag for if it's been compressed or not is_compressed = False if compress: # Avoid zlib dependency unless compress is being used compressed = zlib.compress(data) if len(compressed) < (len(data) - 1): data = compressed is_compressed = True base64d = b64_encode(data) if is_compressed: base64d = b'.' + base64d return TimestampSigner(key, salt=salt).sign(base64d)
: type value: any: rtype: HMAC
def signature(self, value): """ :type value: any :rtype: HMAC """ h = HMAC(self.key, self.digest, backend=settings.CRYPTOGRAPHY_BACKEND) h.update(force_bytes(value)) return h
: type value: any: rtype: bytes
def sign(self, value): """ :type value: any :rtype: bytes """ payload = struct.pack('>cQ', self.version, int(time.time())) payload += force_bytes(value) return payload + self.signature(payload).finalize()
Retrieve original value and check it wasn t signed more than max_age seconds ago.
def unsign(self, signed_value, ttl=None): """ Retrieve original value and check it wasn't signed more than max_age seconds ago. :type signed_value: bytes :type ttl: int | datetime.timedelta """ h_size, d_size = struct.calcsize('>cQ'), self.digest.digest_size fmt = '>cQ%ds%ds' % (len(signed_value) - h_size - d_size, d_size) try: version, timestamp, value, sig = struct.unpack(fmt, signed_value) except struct.error: raise BadSignature('Signature is not valid') if version != self.version: raise BadSignature('Signature version not supported') if ttl is not None: if isinstance(ttl, datetime.timedelta): ttl = ttl.total_seconds() # Check timestamp is not older than ttl age = abs(time.time() - timestamp) if age > ttl + _MAX_CLOCK_SKEW: raise SignatureExpired('Signature age %s > %s seconds' % (age, ttl)) try: self.signature(signed_value[:-d_size]).verify(sig) except InvalidSignature: raise BadSignature( 'Signature "%s" does not match' % binascii.b2a_base64(sig)) return value
Returns a PEP 386 - compliant version number from VERSION.
def get_version(version=None): """ Returns a PEP 386-compliant version number from VERSION. """ version = get_complete_version(version) # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases main = get_main_version(version) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[version[3]] + str(version[4]) return str(main + sub)
Returns a tuple of the django_cryptography version. If version argument is non - empty then checks for correctness of the tuple provided.
def get_complete_version(version=None): """ Returns a tuple of the django_cryptography version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version is None: from django_cryptography import VERSION as version else: assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') return version
Return a value check function which raises a value error if the value is not in a pre - defined enumeration of values.
def enumeration(*args): """ Return a value check function which raises a value error if the value is not in a pre-defined enumeration of values. If you pass in a list, tuple or set as the single argument, it is assumed that the list/tuple/set defines the membership of the enumeration. If you pass in more than on argument, it is assumed the arguments themselves define the enumeration. """ assert len(args) > 0, 'at least one argument is required' if len(args) == 1: # assume the first argument defines the membership members = args[0] else: # assume the arguments are the members members = args def checker(value): if value not in members: raise ValueError(value) return checker
Return a value check function which raises a ValueError if the value does not match the supplied regular expression see also re. match.
def match_pattern(regex): """ Return a value check function which raises a ValueError if the value does not match the supplied regular expression, see also `re.match`. """ prog = re.compile(regex) def checker(v): result = prog.match(v) if result is None: raise ValueError(v) return checker
Return a value check function which raises a ValueError if the supplied regular expression does not match anywhere in the value see also re. search.
def search_pattern(regex): """ Return a value check function which raises a ValueError if the supplied regular expression does not match anywhere in the value, see also `re.search`. """ prog = re.compile(regex) def checker(v): result = prog.search(v) if result is None: raise ValueError(v) return checker
Return a value check function which raises a ValueError if the supplied value when cast as type is less than min or greater than max.
def number_range_inclusive(min, max, type=float): """ Return a value check function which raises a ValueError if the supplied value when cast as `type` is less than `min` or greater than `max`. """ def checker(v): if type(v) < min or type(v) > max: raise ValueError(v) return checker
Return a value check function which raises a ValueError if the supplied value when cast as type is less than or equal to min or greater than or equal to max.
def number_range_exclusive(min, max, type=float): """ Return a value check function which raises a ValueError if the supplied value when cast as `type` is less than or equal to `min` or greater than or equal to `max`. """ def checker(v): if type(v) <= min or type(v) >= max: raise ValueError(v) return checker
Return a value check function which raises a ValueError if the supplied value when converted to a datetime using the supplied format string is less than min or greater than max.
def datetime_range_inclusive(min, max, format): """ Return a value check function which raises a ValueError if the supplied value when converted to a datetime using the supplied `format` string is less than `min` or greater than `max`. """ dmin = datetime.strptime(min, format) dmax = datetime.strptime(max, format) def checker(v): dv = datetime.strptime(v, format) if dv < dmin or dv > dmax: raise ValueError(v) return checker
Write problems as restructured text to a file ( or stdout/ stderr ).
def write_problems(problems, file, summarize=False, limit=0): """ Write problems as restructured text to a file (or stdout/stderr). """ w = file.write # convenience variable w(""" ================= Validation Report ================= """) counts = dict() # store problem counts per problem code total = 0 for i, p in enumerate(problems): if limit and i >= limit: break # bail out if total == 0 and not summarize: w(""" Problems ======== """) total += 1 code = p['code'] if code in counts: counts[code] += 1 else: counts[code] = 1 if not summarize: ptitle = '\n%s - %s\n' % (p['code'], p['message']) w(ptitle) underline = '' for i in range(len(ptitle.strip())): underline += '-' underline += '\n' w(underline) for k in sorted(p.viewkeys() - set(['code', 'message', 'context'])): w(':%s: %s\n' % (k, p[k])) if 'context' in p: c = p['context'] for k in sorted(c.viewkeys()): w(':%s: %s\n' % (k, c[k])) w(""" Summary ======= Found %s%s problem%s in total. """ % ('at least ' if limit else '', total, 's' if total != 1 else '')) for code in sorted(counts.viewkeys()): w(':%s: %s\n' % (code, counts[code])) return total
Add a header check i. e. check whether the header record is consistent with the expected field names.
def add_header_check(self, code=HEADER_CHECK_FAILED, message=MESSAGES[HEADER_CHECK_FAILED]): """ Add a header check, i.e., check whether the header record is consistent with the expected field names. Arguments --------- `code` - problem code to report if the header record is not valid, defaults to `HEADER_CHECK_FAILED` `message` - problem message to report if a value is not valid """ t = code, message self._header_checks.append(t)
Add a record length check i. e. check whether the length of a record is consistent with the number of expected fields.
def add_record_length_check(self, code=RECORD_LENGTH_CHECK_FAILED, message=MESSAGES[RECORD_LENGTH_CHECK_FAILED], modulus=1): """ Add a record length check, i.e., check whether the length of a record is consistent with the number of expected fields. Arguments --------- `code` - problem code to report if a record is not valid, defaults to `RECORD_LENGTH_CHECK_FAILED` `message` - problem message to report if a record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ t = code, message, modulus self._record_length_checks.append(t)
Add a value check function for the specified field.
def add_value_check(self, field_name, value_check, code=VALUE_CHECK_FAILED, message=MESSAGES[VALUE_CHECK_FAILED], modulus=1): """ Add a value check function for the specified field. Arguments --------- `field_name` - the name of the field to attach the value check function to `value_check` - a function that accepts a single argument (a value) and raises a `ValueError` if the value is not valid `code` - problem code to report if a value is not valid, defaults to `VALUE_CHECK_FAILED` `message` - problem message to report if a value is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ # guard conditions assert field_name in self._field_names, 'unexpected field name: %s' % field_name assert callable(value_check), 'value check must be a callable function' t = field_name, value_check, code, message, modulus self._value_checks.append(t)
Add a value predicate function for the specified field.
def add_value_predicate(self, field_name, value_predicate, code=VALUE_PREDICATE_FALSE, message=MESSAGES[VALUE_PREDICATE_FALSE], modulus=1): """ Add a value predicate function for the specified field. N.B., everything you can do with value predicates can also be done with value check functions, whether you use one or the other is a matter of style. Arguments --------- `field_name` - the name of the field to attach the value predicate function to `value_predicate` - a function that accepts a single argument (a value) and returns False if the value is not valid `code` - problem code to report if a value is not valid, defaults to `VALUE_PREDICATE_FALSE` `message` - problem message to report if a value is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ assert field_name in self._field_names, 'unexpected field name: %s' % field_name assert callable(value_predicate), 'value predicate must be a callable function' t = field_name, value_predicate, code, message, modulus self._value_predicates.append(t)
Add a record check function.
def add_record_check(self, record_check, modulus=1): """ Add a record check function. Arguments --------- `record_check` - a function that accepts a single argument (a record as a dictionary of values indexed by field name) and raises a `RecordError` if the record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ assert callable(record_check), 'record check must be a callable function' t = record_check, modulus self._record_checks.append(t)
Add a record predicate function.
def add_record_predicate(self, record_predicate, code=RECORD_PREDICATE_FALSE, message=MESSAGES[RECORD_PREDICATE_FALSE], modulus=1): """ Add a record predicate function. N.B., everything you can do with record predicates can also be done with record check functions, whether you use one or the other is a matter of style. Arguments --------- `record_predicate` - a function that accepts a single argument (a record as a dictionary of values indexed by field name) and returns False if the value is not valid `code` - problem code to report if a record is not valid, defaults to `RECORD_PREDICATE_FALSE` `message` - problem message to report if a record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record) """ assert callable(record_predicate), 'record predicate must be a callable function' t = record_predicate, code, message, modulus self._record_predicates.append(t)
Add a unique check on a single column or combination of columns.
def add_unique_check(self, key, code=UNIQUE_CHECK_FAILED, message=MESSAGES[UNIQUE_CHECK_FAILED]): """ Add a unique check on a single column or combination of columns. Arguments --------- `key` - a single field name (string) specifying a field in which all values are expected to be unique, or a sequence of field names (tuple or list of strings) specifying a compound key `code` - problem code to report if a record is not valid, defaults to `UNIQUE_CHECK_FAILED` `message` - problem message to report if a record is not valid """ if isinstance(key, basestring): assert key in self._field_names, 'unexpected field name: %s' % key else: for f in key: assert f in self._field_names, 'unexpected field name: %s' % key t = key, code, message self._unique_checks.append(t)
Validate data and return a list of validation problems found.
def validate(self, data, expect_header_row=True, ignore_lines=0, summarize=False, limit=0, context=None, report_unexpected_exceptions=True): """ Validate `data` and return a list of validation problems found. Arguments --------- `data` - any source of row-oriented data, e.g., as provided by a `csv.reader`, or a list of lists of strings, or ... `expect_header_row` - does the data contain a header row (i.e., the first record is a list of field names)? Defaults to True. `ignore_lines` - ignore n lines (rows) at the beginning of the data `summarize` - only report problem codes, no other details `limit` - report at most n problems `context` - a dictionary of any additional information to be added to any problems found - useful if problems are being aggregated from multiple validators `report_unexpected_exceptions` - value check function, value predicates, record check functions, record predicates, and other user-supplied validation functions may raise unexpected exceptions. If this argument is true, any unexpected exceptions will be reported as validation problems; if False, unexpected exceptions will be handled silently. """ problems = list() problem_generator = self.ivalidate(data, expect_header_row, ignore_lines, summarize, context, report_unexpected_exceptions) for i, p in enumerate(problem_generator): if not limit or i < limit: problems.append(p) return problems
Validate data and return a iterator over problems found.
def ivalidate(self, data, expect_header_row=True, ignore_lines=0, summarize=False, context=None, report_unexpected_exceptions=True): """ Validate `data` and return a iterator over problems found. Use this function rather than validate() if you expect a large number of problems. Arguments --------- `data` - any source of row-oriented data, e.g., as provided by a `csv.reader`, or a list of lists of strings, or ... `expect_header_row` - does the data contain a header row (i.e., the first record is a list of field names)? Defaults to True. `ignore_lines` - ignore n lines (rows) at the beginning of the data `summarize` - only report problem codes, no other details `context` - a dictionary of any additional information to be added to any problems found - useful if problems are being aggregated from multiple validators `report_unexpected_exceptions` - value check function, value predicates, record check functions, record predicates, and other user-supplied validation functions may raise unexpected exceptions. If this argument is true, any unexpected exceptions will be reported as validation problems; if False, unexpected exceptions will be handled silently. """ unique_sets = self._init_unique_sets() # used for unique checks for i, r in enumerate(data): if expect_header_row and i == ignore_lines: # r is the header row for p in self._apply_header_checks(i, r, summarize, context): yield p elif i >= ignore_lines: # r is a data row skip = False for p in self._apply_skips(i, r, summarize, report_unexpected_exceptions, context): if p is True: skip = True else: yield p if not skip: for p in self._apply_each_methods(i, r, summarize, report_unexpected_exceptions, context): yield p # may yield a problem if an exception is raised for p in self._apply_value_checks(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_record_length_checks(i, r, summarize, context): yield p for p in self._apply_value_predicates(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_record_checks(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_record_predicates(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_unique_checks(i, r, unique_sets, summarize): yield p for p in self._apply_check_methods(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_assert_methods(i, r, summarize, report_unexpected_exceptions, context): yield p for p in self._apply_finally_assert_methods(summarize, report_unexpected_exceptions, context): yield p
Initialise sets used for uniqueness checking.
def _init_unique_sets(self): """Initialise sets used for uniqueness checking.""" ks = dict() for t in self._unique_checks: key = t[0] ks[key] = set() # empty set return ks
Apply value check functions on the given record r.
def _apply_value_checks(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply value check functions on the given record `r`.""" for field_name, check, code, message, modulus in self._value_checks: if i % modulus == 0: # support sampling fi = self._field_names.index(field_name) if fi < len(r): # only apply checks if there is a value value = r[fi] try: check(value) except ValueError: p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['column'] = fi + 1 p['field'] = field_name p['value'] = value p['record'] = r if context is not None: p['context'] = context yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['column'] = fi + 1 p['field'] = field_name p['value'] = value p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (check.__name__, check.__doc__) if context is not None: p['context'] = context yield p
Apply header checks on the given record r.
def _apply_header_checks(self, i, r, summarize=False, context=None): """Apply header checks on the given record `r`.""" for code, message in self._header_checks: if tuple(r) != self._field_names: p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['record'] = tuple(r) p['missing'] = set(self._field_names) - set(r) p['unexpected'] = set(r) - set(self._field_names) if context is not None: p['context'] = context yield p
Apply record length checks on the given record r.
def _apply_record_length_checks(self, i, r, summarize=False, context=None): """Apply record length checks on the given record `r`.""" for code, message, modulus in self._record_length_checks: if i % modulus == 0: # support sampling if len(r) != len(self._field_names): p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['record'] = r p['length'] = len(r) if context is not None: p['context'] = context yield p
Apply value predicates on the given record r.
def _apply_value_predicates(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply value predicates on the given record `r`.""" for field_name, predicate, code, message, modulus in self._value_predicates: if i % modulus == 0: # support sampling fi = self._field_names.index(field_name) if fi < len(r): # only apply predicate if there is a value value = r[fi] try: valid = predicate(value) if not valid: p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['column'] = fi + 1 p['field'] = field_name p['value'] = value p['record'] = r if context is not None: p['context'] = context yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['column'] = fi + 1 p['field'] = field_name p['value'] = value p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (predicate.__name__, predicate.__doc__) if context is not None: p['context'] = context yield p
Apply record checks on r.
def _apply_record_checks(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply record checks on `r`.""" for check, modulus in self._record_checks: if i % modulus == 0: # support sampling rdict = self._as_dict(r) try: check(rdict) except RecordError as e: code = e.code if e.code is not None else RECORD_CHECK_FAILED p = {'code': code} if not summarize: message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED] p['message'] = message p['row'] = i + 1 p['record'] = r if context is not None: p['context'] = context if e.details is not None: p['details'] = e.details yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (check.__name__, check.__doc__) if context is not None: p['context'] = context yield p
Apply record predicates on r.
def _apply_record_predicates(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply record predicates on `r`.""" for predicate, code, message, modulus in self._record_predicates: if i % modulus == 0: # support sampling rdict = self._as_dict(r) try: valid = predicate(rdict) if not valid: p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['record'] = r if context is not None: p['context'] = context yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (predicate.__name__, predicate.__doc__) if context is not None: p['context'] = context yield p
Apply unique checks on r.
def _apply_unique_checks(self, i, r, unique_sets, summarize=False, context=None): """Apply unique checks on `r`.""" for key, code, message in self._unique_checks: value = None values = unique_sets[key] if isinstance(key, basestring): # assume key is a field name fi = self._field_names.index(key) if fi >= len(r): continue value = r[fi] else: # assume key is a list or tuple, i.e., compound key value = [] for f in key: fi = self._field_names.index(f) if fi >= len(r): break value.append(r[fi]) value = tuple(value) # enable hashing if value in values: p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['record'] = r p['key'] = key p['value'] = value if context is not None: p['context'] = context yield p values.add(value)
Invoke each methods on r.
def _apply_each_methods(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Invoke 'each' methods on `r`.""" for a in dir(self): if a.startswith('each'): rdict = self._as_dict(r) f = getattr(self, a) try: f(rdict) except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (f.__name__, f.__doc__) if context is not None: p['context'] = context yield p
Apply assert methods on r.
def _apply_assert_methods(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply 'assert' methods on `r`.""" for a in dir(self): if a.startswith('assert'): rdict = self._as_dict(r) f = getattr(self, a) try: f(rdict) except AssertionError as e: code = ASSERT_CHECK_FAILED message = MESSAGES[ASSERT_CHECK_FAILED] if len(e.args) > 0: custom = e.args[0] if isinstance(custom, (list, tuple)): if len(custom) > 0: code = custom[0] if len(custom) > 1: message = custom[1] else: code = custom p = {'code': code} if not summarize: p['message'] = message p['row'] = i + 1 p['record'] = r if context is not None: p['context'] = context yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (f.__name__, f.__doc__) if context is not None: p['context'] = context yield p
Apply check methods on r.
def _apply_check_methods(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply 'check' methods on `r`.""" for a in dir(self): if a.startswith('check'): rdict = self._as_dict(r) f = getattr(self, a) try: f(rdict) except RecordError as e: code = e.code if e.code is not None else RECORD_CHECK_FAILED p = {'code': code} if not summarize: message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED] p['message'] = message p['row'] = i + 1 p['record'] = r if context is not None: p['context'] = context if e.details is not None: p['details'] = e.details yield p except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (f.__name__, f.__doc__) if context is not None: p['context'] = context yield p
Apply skip functions on r.
def _apply_skips(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply skip functions on `r`.""" for skip in self._skips: try: result = skip(r) if result is True: yield True except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (skip.__name__, skip.__doc__) if context is not None: p['context'] = context yield p
Convert the record to a dictionary using field names as keys.
def _as_dict(self, r): """Convert the record to a dictionary using field names as keys.""" d = dict() for i, f in enumerate(self._field_names): d[f] = r[i] if i < len(r) else None return d
Create an example CSV validator for patient demographic data.
def create_validator(): """Create an example CSV validator for patient demographic data.""" field_names = ( 'study_id', 'patient_id', 'gender', 'age_years', 'age_months', 'date_inclusion' ) validator = CSVValidator(field_names) # basic header and record length checks validator.add_header_check('EX1', 'bad header') validator.add_record_length_check('EX2', 'unexpected record length') # some simple value checks validator.add_value_check('study_id', int, 'EX3', 'study id must be an integer') validator.add_value_check('patient_id', int, 'EX4', 'patient id must be an integer') validator.add_value_check('gender', enumeration('M', 'F'), 'EX5', 'invalid gender') validator.add_value_check('age_years', number_range_inclusive(0, 120, int), 'EX6', 'invalid age in years') validator.add_value_check('date_inclusion', datetime_string('%Y-%m-%d'), 'EX7', 'invalid date') # a more complicated record check def check_age_variables(r): age_years = int(r['age_years']) age_months = int(r['age_months']) valid = (age_months >= age_years * 12 and age_months % age_years < 12) if not valid: raise RecordError('EX8', 'invalid age variables') validator.add_record_check(check_age_variables) return validator
Main function.
def main(): """Main function.""" # define a command-line argument parser description = 'Validate a CSV data file.' parser = argparse.ArgumentParser(description=description) parser.add_argument('file', metavar='FILE', help='a file to be validated') parser.add_argument('-l', '--limit', dest='limit', type=int, action='store', default=0, help='limit the number of problems reported' ) parser.add_argument('-s', '--summarize', dest='summarize', action='store_true', default=False, help='output only a summary of the different types of problem found' ) parser.add_argument('-e', '--report-unexpected-exceptions', dest='report_unexpected_exceptions', action='store_true', default=False, help='report any unexpected exceptions as problems' ) # parse arguments args = parser.parse_args() # sanity check arguments if not os.path.isfile(args.file): print '%s is not a file' % args.file sys.exit(1) with open(args.file, 'r') as f: # set up a csv reader for the data data = csv.reader(f, delimiter='\t') # create a validator validator = create_validator() # validate the data from the csv reader # N.B., validate() returns a list of problems; # if you expect a large number of problems, use ivalidate() instead # of validate(), but bear in mind that ivalidate() returns an iterator # so there is no len() problems = validator.validate(data, summarize=args.summarize, report_unexpected_exceptions=args.report_unexpected_exceptions, context={'file': args.file}) # write problems to stdout as restructured text write_problems(problems, sys.stdout, summarize=args.summarize, limit=args.limit) # decide how to exit if problems: # will not work with ivalidate() because it returns an iterator sys.exit(1) else: sys.exit(0)
Convert markdown tables to html since recommonmark can t. This requires 3 steps: Snip out table sections from the markdown Convert them to html Replace the old markdown table with an html table
def process_tables(app, docname, source): """ Convert markdown tables to html, since recommonmark can't. This requires 3 steps: Snip out table sections from the markdown Convert them to html Replace the old markdown table with an html table This function is called by sphinx for each document. `source` is a 1-item list. To update the document, replace element 0 in `source`. """ import markdown md = markdown.Markdown(extensions=['markdown.extensions.tables']) table_processor = markdown.extensions.tables.TableProcessor(md.parser) raw_markdown = source[0] blocks = re.split(r'\n{2,}', raw_markdown) for i, block in enumerate(blocks): if table_processor.test(None, block): html = md.convert(block) styled = html.replace('<table>', '<table border="1" class="docutils">', 1) # apply styling blocks[i] = styled # re-assemble into markdown-with-tables-replaced # must replace element 0 for changes to persist source[0] = '\n\n'.join(blocks)
Pack given values v1 v2... into given bytearray buf starting at given bit offset offset. Pack according to given format string fmt. Give fill_padding as False to leave padding bits in buf unmodified.
def pack_into(fmt, buf, offset, *args, **kwargs): """Pack given values v1, v2, ... into given bytearray `buf`, starting at given bit offset `offset`. Pack according to given format string `fmt`. Give `fill_padding` as ``False`` to leave padding bits in `buf` unmodified. """ return CompiledFormat(fmt).pack_into(buf, offset, *args, **kwargs)