partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
_form_onset_offset_fronts
Takes an array of onsets or offsets (shape = [nfrequencies, nsamples], where a 1 corresponds to an on/offset, and samples are 0 otherwise), and returns a new array of the same shape, where each 1 has been replaced by either a 0, if the on/offset has been discarded, or a non-zero positive integer, such that each front within the array has a unique ID - for example, all 2s in the array will be the front for on/offset front 2, and all the 15s will be the front for on/offset front 15, etc. Due to implementation details, there will be no 1 IDs.
algorithms/asa.py
def _form_onset_offset_fronts(ons_or_offs, sample_rate_hz, threshold_ms=20): """ Takes an array of onsets or offsets (shape = [nfrequencies, nsamples], where a 1 corresponds to an on/offset, and samples are 0 otherwise), and returns a new array of the same shape, where each 1 has been replaced by either a 0, if the on/offset has been discarded, or a non-zero positive integer, such that each front within the array has a unique ID - for example, all 2s in the array will be the front for on/offset front 2, and all the 15s will be the front for on/offset front 15, etc. Due to implementation details, there will be no 1 IDs. """ threshold_s = threshold_ms / 1000 threshold_samples = sample_rate_hz * threshold_s ons_or_offs = np.copy(ons_or_offs) claimed = [] this_id = 2 # For each frequency, for frequency_index, row in enumerate(ons_or_offs[:, :]): ones = np.reshape(np.where(row == 1), (-1,)) # for each 1 in that frequency, for top_level_frequency_one_index in ones: claimed.append((frequency_index, top_level_frequency_one_index)) found_a_front = False # for each frequencies[i:], for other_frequency_index, other_row in enumerate(ons_or_offs[frequency_index + 1:, :], start=frequency_index + 1): # for each non-claimed 1 which is less than theshold_ms away in time, upper_limit_index = top_level_frequency_one_index + threshold_samples lower_limit_index = top_level_frequency_one_index - threshold_samples other_ones = np.reshape(np.where(other_row == 1), (-1,)) # Get the indexes of all the 1s in row tmp = np.reshape(np.where((other_ones >= lower_limit_index) # Get the indexes in the other_ones array of all items in bounds & (other_ones <= upper_limit_index)), (-1,)) other_ones = other_ones[tmp] # Get the indexes of all the 1s in the row that are in bounds if len(other_ones) > 0: unclaimed_idx = other_ones[0] # Take the first one claimed.append((other_frequency_index, unclaimed_idx)) elif len(claimed) < 3: # revert the top-most 1 to 0 ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] break # Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq elif len(claimed) >= 3: found_a_front = True # this group of so-far-claimed forms a front claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] break # Move on to the next item in the top-most array # If we never found a frequency that did not have a matching offset, handle that case here if len(claimed) >= 3: claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] elif found_a_front: this_id += 1 else: ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] return ons_or_offs
def _form_onset_offset_fronts(ons_or_offs, sample_rate_hz, threshold_ms=20): """ Takes an array of onsets or offsets (shape = [nfrequencies, nsamples], where a 1 corresponds to an on/offset, and samples are 0 otherwise), and returns a new array of the same shape, where each 1 has been replaced by either a 0, if the on/offset has been discarded, or a non-zero positive integer, such that each front within the array has a unique ID - for example, all 2s in the array will be the front for on/offset front 2, and all the 15s will be the front for on/offset front 15, etc. Due to implementation details, there will be no 1 IDs. """ threshold_s = threshold_ms / 1000 threshold_samples = sample_rate_hz * threshold_s ons_or_offs = np.copy(ons_or_offs) claimed = [] this_id = 2 # For each frequency, for frequency_index, row in enumerate(ons_or_offs[:, :]): ones = np.reshape(np.where(row == 1), (-1,)) # for each 1 in that frequency, for top_level_frequency_one_index in ones: claimed.append((frequency_index, top_level_frequency_one_index)) found_a_front = False # for each frequencies[i:], for other_frequency_index, other_row in enumerate(ons_or_offs[frequency_index + 1:, :], start=frequency_index + 1): # for each non-claimed 1 which is less than theshold_ms away in time, upper_limit_index = top_level_frequency_one_index + threshold_samples lower_limit_index = top_level_frequency_one_index - threshold_samples other_ones = np.reshape(np.where(other_row == 1), (-1,)) # Get the indexes of all the 1s in row tmp = np.reshape(np.where((other_ones >= lower_limit_index) # Get the indexes in the other_ones array of all items in bounds & (other_ones <= upper_limit_index)), (-1,)) other_ones = other_ones[tmp] # Get the indexes of all the 1s in the row that are in bounds if len(other_ones) > 0: unclaimed_idx = other_ones[0] # Take the first one claimed.append((other_frequency_index, unclaimed_idx)) elif len(claimed) < 3: # revert the top-most 1 to 0 ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] break # Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq elif len(claimed) >= 3: found_a_front = True # this group of so-far-claimed forms a front claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] break # Move on to the next item in the top-most array # If we never found a frequency that did not have a matching offset, handle that case here if len(claimed) >= 3: claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] elif found_a_front: this_id += 1 else: ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] return ons_or_offs
[ "Takes", "an", "array", "of", "onsets", "or", "offsets", "(", "shape", "=", "[", "nfrequencies", "nsamples", "]", "where", "a", "1", "corresponds", "to", "an", "on", "/", "offset", "and", "samples", "are", "0", "otherwise", ")", "and", "returns", "a", "new", "array", "of", "the", "same", "shape", "where", "each", "1", "has", "been", "replaced", "by", "either", "a", "0", "if", "the", "on", "/", "offset", "has", "been", "discarded", "or", "a", "non", "-", "zero", "positive", "integer", "such", "that", "each", "front", "within", "the", "array", "has", "a", "unique", "ID", "-", "for", "example", "all", "2s", "in", "the", "array", "will", "be", "the", "front", "for", "on", "/", "offset", "front", "2", "and", "all", "the", "15s", "will", "be", "the", "front", "for", "on", "/", "offset", "front", "15", "etc", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L197-L261
[ "def", "_form_onset_offset_fronts", "(", "ons_or_offs", ",", "sample_rate_hz", ",", "threshold_ms", "=", "20", ")", ":", "threshold_s", "=", "threshold_ms", "/", "1000", "threshold_samples", "=", "sample_rate_hz", "*", "threshold_s", "ons_or_offs", "=", "np", ".", "copy", "(", "ons_or_offs", ")", "claimed", "=", "[", "]", "this_id", "=", "2", "# For each frequency,", "for", "frequency_index", ",", "row", "in", "enumerate", "(", "ons_or_offs", "[", ":", ",", ":", "]", ")", ":", "ones", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "row", "==", "1", ")", ",", "(", "-", "1", ",", ")", ")", "# for each 1 in that frequency,", "for", "top_level_frequency_one_index", "in", "ones", ":", "claimed", ".", "append", "(", "(", "frequency_index", ",", "top_level_frequency_one_index", ")", ")", "found_a_front", "=", "False", "# for each frequencies[i:],", "for", "other_frequency_index", ",", "other_row", "in", "enumerate", "(", "ons_or_offs", "[", "frequency_index", "+", "1", ":", ",", ":", "]", ",", "start", "=", "frequency_index", "+", "1", ")", ":", "# for each non-claimed 1 which is less than theshold_ms away in time,", "upper_limit_index", "=", "top_level_frequency_one_index", "+", "threshold_samples", "lower_limit_index", "=", "top_level_frequency_one_index", "-", "threshold_samples", "other_ones", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "other_row", "==", "1", ")", ",", "(", "-", "1", ",", ")", ")", "# Get the indexes of all the 1s in row", "tmp", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "(", "other_ones", ">=", "lower_limit_index", ")", "# Get the indexes in the other_ones array of all items in bounds", "&", "(", "other_ones", "<=", "upper_limit_index", ")", ")", ",", "(", "-", "1", ",", ")", ")", "other_ones", "=", "other_ones", "[", "tmp", "]", "# Get the indexes of all the 1s in the row that are in bounds", "if", "len", "(", "other_ones", ")", ">", "0", ":", "unclaimed_idx", "=", "other_ones", "[", "0", "]", "# Take the first one", "claimed", ".", "append", "(", "(", "other_frequency_index", ",", "unclaimed_idx", ")", ")", "elif", "len", "(", "claimed", ")", "<", "3", ":", "# revert the top-most 1 to 0", "ons_or_offs", "[", "frequency_index", ",", "top_level_frequency_one_index", "]", "=", "0", "claimed", "=", "[", "]", "break", "# Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq", "elif", "len", "(", "claimed", ")", ">=", "3", ":", "found_a_front", "=", "True", "# this group of so-far-claimed forms a front", "claimed_as_indexes", "=", "tuple", "(", "np", ".", "array", "(", "claimed", ")", ".", "T", ")", "ons_or_offs", "[", "claimed_as_indexes", "]", "=", "this_id", "this_id", "+=", "1", "claimed", "=", "[", "]", "break", "# Move on to the next item in the top-most array", "# If we never found a frequency that did not have a matching offset, handle that case here", "if", "len", "(", "claimed", ")", ">=", "3", ":", "claimed_as_indexes", "=", "tuple", "(", "np", ".", "array", "(", "claimed", ")", ".", "T", ")", "ons_or_offs", "[", "claimed_as_indexes", "]", "=", "this_id", "this_id", "+=", "1", "claimed", "=", "[", "]", "elif", "found_a_front", ":", "this_id", "+=", "1", "else", ":", "ons_or_offs", "[", "frequency_index", ",", "top_level_frequency_one_index", "]", "=", "0", "claimed", "=", "[", "]", "return", "ons_or_offs" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_lookup_offset_by_onset_idx
Takes an onset index (freq, sample) and returns the offset index (freq, sample) such that frequency index is the same, and sample index is the minimum of all offsets ocurring after the given onset. If there are no offsets after the given onset in that frequency channel, the final sample in that channel is returned.
algorithms/asa.py
def _lookup_offset_by_onset_idx(onset_idx, onsets, offsets): """ Takes an onset index (freq, sample) and returns the offset index (freq, sample) such that frequency index is the same, and sample index is the minimum of all offsets ocurring after the given onset. If there are no offsets after the given onset in that frequency channel, the final sample in that channel is returned. """ assert len(onset_idx) == 2, "Onset_idx must be a tuple of the form (freq_idx, sample_idx)" frequency_idx, sample_idx = onset_idx offset_sample_idxs = np.reshape(np.where(offsets[frequency_idx, :] == 1), (-1,)) # get the offsets which occur after onset offset_sample_idxs = offset_sample_idxs[offset_sample_idxs > sample_idx] if len(offset_sample_idxs) == 0: # There is no offset in this frequency that occurs after the onset, just return the last sample chosen_offset_sample_idx = offsets.shape[1] - 1 assert offsets[frequency_idx, chosen_offset_sample_idx] == 0 else: # Return the closest offset to the onset chosen_offset_sample_idx = offset_sample_idxs[0] assert offsets[frequency_idx, chosen_offset_sample_idx] != 0 return frequency_idx, chosen_offset_sample_idx
def _lookup_offset_by_onset_idx(onset_idx, onsets, offsets): """ Takes an onset index (freq, sample) and returns the offset index (freq, sample) such that frequency index is the same, and sample index is the minimum of all offsets ocurring after the given onset. If there are no offsets after the given onset in that frequency channel, the final sample in that channel is returned. """ assert len(onset_idx) == 2, "Onset_idx must be a tuple of the form (freq_idx, sample_idx)" frequency_idx, sample_idx = onset_idx offset_sample_idxs = np.reshape(np.where(offsets[frequency_idx, :] == 1), (-1,)) # get the offsets which occur after onset offset_sample_idxs = offset_sample_idxs[offset_sample_idxs > sample_idx] if len(offset_sample_idxs) == 0: # There is no offset in this frequency that occurs after the onset, just return the last sample chosen_offset_sample_idx = offsets.shape[1] - 1 assert offsets[frequency_idx, chosen_offset_sample_idx] == 0 else: # Return the closest offset to the onset chosen_offset_sample_idx = offset_sample_idxs[0] assert offsets[frequency_idx, chosen_offset_sample_idx] != 0 return frequency_idx, chosen_offset_sample_idx
[ "Takes", "an", "onset", "index", "(", "freq", "sample", ")", "and", "returns", "the", "offset", "index", "(", "freq", "sample", ")", "such", "that", "frequency", "index", "is", "the", "same", "and", "sample", "index", "is", "the", "minimum", "of", "all", "offsets", "ocurring", "after", "the", "given", "onset", ".", "If", "there", "are", "no", "offsets", "after", "the", "given", "onset", "in", "that", "frequency", "channel", "the", "final", "sample", "in", "that", "channel", "is", "returned", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L263-L283
[ "def", "_lookup_offset_by_onset_idx", "(", "onset_idx", ",", "onsets", ",", "offsets", ")", ":", "assert", "len", "(", "onset_idx", ")", "==", "2", ",", "\"Onset_idx must be a tuple of the form (freq_idx, sample_idx)\"", "frequency_idx", ",", "sample_idx", "=", "onset_idx", "offset_sample_idxs", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "offsets", "[", "frequency_idx", ",", ":", "]", "==", "1", ")", ",", "(", "-", "1", ",", ")", ")", "# get the offsets which occur after onset", "offset_sample_idxs", "=", "offset_sample_idxs", "[", "offset_sample_idxs", ">", "sample_idx", "]", "if", "len", "(", "offset_sample_idxs", ")", "==", "0", ":", "# There is no offset in this frequency that occurs after the onset, just return the last sample", "chosen_offset_sample_idx", "=", "offsets", ".", "shape", "[", "1", "]", "-", "1", "assert", "offsets", "[", "frequency_idx", ",", "chosen_offset_sample_idx", "]", "==", "0", "else", ":", "# Return the closest offset to the onset", "chosen_offset_sample_idx", "=", "offset_sample_idxs", "[", "0", "]", "assert", "offsets", "[", "frequency_idx", ",", "chosen_offset_sample_idx", "]", "!=", "0", "return", "frequency_idx", ",", "chosen_offset_sample_idx" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_front_idxs_from_id
Return a list of tuples of the form (frequency_idx, sample_idx), corresponding to all the indexes of the given front.
algorithms/asa.py
def _get_front_idxs_from_id(fronts, id): """ Return a list of tuples of the form (frequency_idx, sample_idx), corresponding to all the indexes of the given front. """ if id == -1: # This is the only special case. # -1 is the index of the catch-all final column offset front. freq_idxs = np.arange(fronts.shape[0], dtype=np.int64) sample_idxs = np.ones(len(freq_idxs), dtype=np.int64) * (fronts.shape[1] - 1) else: freq_idxs, sample_idxs = np.where(fronts == id) return [(f, i) for f, i in zip(freq_idxs, sample_idxs)]
def _get_front_idxs_from_id(fronts, id): """ Return a list of tuples of the form (frequency_idx, sample_idx), corresponding to all the indexes of the given front. """ if id == -1: # This is the only special case. # -1 is the index of the catch-all final column offset front. freq_idxs = np.arange(fronts.shape[0], dtype=np.int64) sample_idxs = np.ones(len(freq_idxs), dtype=np.int64) * (fronts.shape[1] - 1) else: freq_idxs, sample_idxs = np.where(fronts == id) return [(f, i) for f, i in zip(freq_idxs, sample_idxs)]
[ "Return", "a", "list", "of", "tuples", "of", "the", "form", "(", "frequency_idx", "sample_idx", ")", "corresponding", "to", "all", "the", "indexes", "of", "the", "given", "front", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L285-L297
[ "def", "_get_front_idxs_from_id", "(", "fronts", ",", "id", ")", ":", "if", "id", "==", "-", "1", ":", "# This is the only special case.", "# -1 is the index of the catch-all final column offset front.", "freq_idxs", "=", "np", ".", "arange", "(", "fronts", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "int64", ")", "sample_idxs", "=", "np", ".", "ones", "(", "len", "(", "freq_idxs", ")", ",", "dtype", "=", "np", ".", "int64", ")", "*", "(", "fronts", ".", "shape", "[", "1", "]", "-", "1", ")", "else", ":", "freq_idxs", ",", "sample_idxs", "=", "np", ".", "where", "(", "fronts", "==", "id", ")", "return", "[", "(", "f", ",", "i", ")", "for", "f", ",", "i", "in", "zip", "(", "freq_idxs", ",", "sample_idxs", ")", "]" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_choose_front_id_from_candidates
Returns a front ID which is the id of the offset front that contains the most overlap with offsets that correspond to the given onset front ID.
algorithms/asa.py
def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets): """ Returns a front ID which is the id of the offset front that contains the most overlap with offsets that correspond to the given onset front ID. """ noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id) for offset_front_id in candidate_offset_front_ids: offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id) offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)] noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets))) noverlaps.append((noverlap_this_id, offset_front_id)) _overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[0]) return int(chosen_offset_front_id)
def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets): """ Returns a front ID which is the id of the offset front that contains the most overlap with offsets that correspond to the given onset front ID. """ noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id) for offset_front_id in candidate_offset_front_ids: offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id) offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)] noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets))) noverlaps.append((noverlap_this_id, offset_front_id)) _overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[0]) return int(chosen_offset_front_id)
[ "Returns", "a", "front", "ID", "which", "is", "the", "id", "of", "the", "offset", "front", "that", "contains", "the", "most", "overlap", "with", "offsets", "that", "correspond", "to", "the", "given", "onset", "front", "ID", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L299-L311
[ "def", "_choose_front_id_from_candidates", "(", "candidate_offset_front_ids", ",", "offset_fronts", ",", "offsets_corresponding_to_onsets", ")", ":", "noverlaps", "=", "[", "]", "# will contain tuples of the form (number_overlapping, offset_front_id)", "for", "offset_front_id", "in", "candidate_offset_front_ids", ":", "offset_front_f_idxs", ",", "offset_front_s_idxs", "=", "np", ".", "where", "(", "offset_fronts", "==", "offset_front_id", ")", "offset_front_idxs", "=", "[", "(", "f", ",", "i", ")", "for", "f", ",", "i", "in", "zip", "(", "offset_front_f_idxs", ",", "offset_front_s_idxs", ")", "]", "noverlap_this_id", "=", "len", "(", "set", "(", "offset_front_idxs", ")", ".", "symmetric_difference", "(", "set", "(", "offsets_corresponding_to_onsets", ")", ")", ")", "noverlaps", ".", "append", "(", "(", "noverlap_this_id", ",", "offset_front_id", ")", ")", "_overlapped", ",", "chosen_offset_front_id", "=", "max", "(", "noverlaps", ",", "key", "=", "lambda", "t", ":", "t", "[", "0", "]", ")", "return", "int", "(", "chosen_offset_front_id", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_offset_front_id_after_onset_sample_idx
Returns the offset_front_id which corresponds to the offset front which occurs first entirely after the given onset sample_idx.
algorithms/asa.py
def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts): """ Returns the offset_front_id which corresponds to the offset front which occurs first entirely after the given onset sample_idx. """ # get all the offset_front_ids offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0] best_id_so_far = -1 closest_offset_sample_idx = sys.maxsize for offset_front_id in offset_front_ids: # get all that offset front's indexes offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id) # get the sample indexes offset_front_sample_idxs = [s for _f, s in offset_front_idxs] # if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far min_sample_idx = min(offset_front_sample_idxs) if min_sample_idx > onset_sample_idx and min_sample_idx < closest_offset_sample_idx: closest_offset_sample_idx = min_sample_idx best_id_so_far = offset_front_id assert best_id_so_far > 1 or best_id_so_far == -1 return best_id_so_far
def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts): """ Returns the offset_front_id which corresponds to the offset front which occurs first entirely after the given onset sample_idx. """ # get all the offset_front_ids offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0] best_id_so_far = -1 closest_offset_sample_idx = sys.maxsize for offset_front_id in offset_front_ids: # get all that offset front's indexes offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id) # get the sample indexes offset_front_sample_idxs = [s for _f, s in offset_front_idxs] # if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far min_sample_idx = min(offset_front_sample_idxs) if min_sample_idx > onset_sample_idx and min_sample_idx < closest_offset_sample_idx: closest_offset_sample_idx = min_sample_idx best_id_so_far = offset_front_id assert best_id_so_far > 1 or best_id_so_far == -1 return best_id_so_far
[ "Returns", "the", "offset_front_id", "which", "corresponds", "to", "the", "offset", "front", "which", "occurs", "first", "entirely", "after", "the", "given", "onset", "sample_idx", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L313-L337
[ "def", "_get_offset_front_id_after_onset_sample_idx", "(", "onset_sample_idx", ",", "offset_fronts", ")", ":", "# get all the offset_front_ids", "offset_front_ids", "=", "[", "i", "for", "i", "in", "np", ".", "unique", "(", "offset_fronts", ")", "if", "i", "!=", "0", "]", "best_id_so_far", "=", "-", "1", "closest_offset_sample_idx", "=", "sys", ".", "maxsize", "for", "offset_front_id", "in", "offset_front_ids", ":", "# get all that offset front's indexes", "offset_front_idxs", "=", "_get_front_idxs_from_id", "(", "offset_fronts", ",", "offset_front_id", ")", "# get the sample indexes", "offset_front_sample_idxs", "=", "[", "s", "for", "_f", ",", "s", "in", "offset_front_idxs", "]", "# if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far", "min_sample_idx", "=", "min", "(", "offset_front_sample_idxs", ")", "if", "min_sample_idx", ">", "onset_sample_idx", "and", "min_sample_idx", "<", "closest_offset_sample_idx", ":", "closest_offset_sample_idx", "=", "min_sample_idx", "best_id_so_far", "=", "offset_front_id", "assert", "best_id_so_far", ">", "1", "or", "best_id_so_far", "==", "-", "1", "return", "best_id_so_far" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_offset_front_id_after_onset_front
Get the ID corresponding to the offset which occurs first after the given onset_front_id. By `first` I mean the front which contains the offset which is closest to the latest point in the onset front. By `after`, I mean that the offset must contain only offsets which occur after the latest onset in the onset front. If there is no appropriate offset front, the id returned is -1.
algorithms/asa.py
def _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts): """ Get the ID corresponding to the offset which occurs first after the given onset_front_id. By `first` I mean the front which contains the offset which is closest to the latest point in the onset front. By `after`, I mean that the offset must contain only offsets which occur after the latest onset in the onset front. If there is no appropriate offset front, the id returned is -1. """ # get the onset idxs for this front onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id) # get the sample idxs for this front onset_sample_idxs = [s for _f, s in onset_idxs] # get the latest onset in this onset front latest_onset_in_front = max(onset_sample_idxs) offset_front_id_after_this_onset_front = _get_offset_front_id_after_onset_sample_idx(latest_onset_in_front, offset_fronts) return int(offset_front_id_after_this_onset_front)
def _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts): """ Get the ID corresponding to the offset which occurs first after the given onset_front_id. By `first` I mean the front which contains the offset which is closest to the latest point in the onset front. By `after`, I mean that the offset must contain only offsets which occur after the latest onset in the onset front. If there is no appropriate offset front, the id returned is -1. """ # get the onset idxs for this front onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id) # get the sample idxs for this front onset_sample_idxs = [s for _f, s in onset_idxs] # get the latest onset in this onset front latest_onset_in_front = max(onset_sample_idxs) offset_front_id_after_this_onset_front = _get_offset_front_id_after_onset_sample_idx(latest_onset_in_front, offset_fronts) return int(offset_front_id_after_this_onset_front)
[ "Get", "the", "ID", "corresponding", "to", "the", "offset", "which", "occurs", "first", "after", "the", "given", "onset_front_id", ".", "By", "first", "I", "mean", "the", "front", "which", "contains", "the", "offset", "which", "is", "closest", "to", "the", "latest", "point", "in", "the", "onset", "front", ".", "By", "after", "I", "mean", "that", "the", "offset", "must", "contain", "only", "offsets", "which", "occur", "after", "the", "latest", "onset", "in", "the", "onset", "front", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L339-L359
[ "def", "_get_offset_front_id_after_onset_front", "(", "onset_front_id", ",", "onset_fronts", ",", "offset_fronts", ")", ":", "# get the onset idxs for this front", "onset_idxs", "=", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ")", "# get the sample idxs for this front", "onset_sample_idxs", "=", "[", "s", "for", "_f", ",", "s", "in", "onset_idxs", "]", "# get the latest onset in this onset front", "latest_onset_in_front", "=", "max", "(", "onset_sample_idxs", ")", "offset_front_id_after_this_onset_front", "=", "_get_offset_front_id_after_onset_sample_idx", "(", "latest_onset_in_front", ",", "offset_fronts", ")", "return", "int", "(", "offset_front_id_after_this_onset_front", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_match_offset_front_id_to_onset_front_id
Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the given onset front. The offset front which contains the most of such offsets is the match. If there are no such offset fronts, return -1.
algorithms/asa.py
def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets): """ Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the given onset front. The offset front which contains the most of such offsets is the match. If there are no such offset fronts, return -1. """ # find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front # the offset front which contains the most of such offsets is the match # get the onsets that make up front_id onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id) # get the offsets that match the onsets in front_id offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs] # get all offset_fronts which contain at least one of these offsets candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs]) # It is possible that offset_idxs contains offset indexes that correspond to offsets that did not # get formed into a front - those will have a front ID of 0. Remove them. candidate_offset_front_ids = [id for id in candidate_offset_front_ids if id != 0] if candidate_offset_front_ids: chosen_offset_front_id = _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offset_idxs) else: chosen_offset_front_id = _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts) return chosen_offset_front_id
def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets): """ Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the given onset front. The offset front which contains the most of such offsets is the match. If there are no such offset fronts, return -1. """ # find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front # the offset front which contains the most of such offsets is the match # get the onsets that make up front_id onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id) # get the offsets that match the onsets in front_id offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs] # get all offset_fronts which contain at least one of these offsets candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs]) # It is possible that offset_idxs contains offset indexes that correspond to offsets that did not # get formed into a front - those will have a front ID of 0. Remove them. candidate_offset_front_ids = [id for id in candidate_offset_front_ids if id != 0] if candidate_offset_front_ids: chosen_offset_front_id = _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offset_idxs) else: chosen_offset_front_id = _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts) return chosen_offset_front_id
[ "Find", "all", "offset", "fronts", "which", "are", "composed", "of", "at", "least", "one", "offset", "which", "corresponds", "to", "one", "of", "the", "onsets", "in", "the", "given", "onset", "front", ".", "The", "offset", "front", "which", "contains", "the", "most", "of", "such", "offsets", "is", "the", "match", ".", "If", "there", "are", "no", "such", "offset", "fronts", "return", "-", "1", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L361-L389
[ "def", "_match_offset_front_id_to_onset_front_id", "(", "onset_front_id", ",", "onset_fronts", ",", "offset_fronts", ",", "onsets", ",", "offsets", ")", ":", "# find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front", "# the offset front which contains the most of such offsets is the match", "# get the onsets that make up front_id", "onset_idxs", "=", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ")", "# get the offsets that match the onsets in front_id", "offset_idxs", "=", "[", "_lookup_offset_by_onset_idx", "(", "i", ",", "onsets", ",", "offsets", ")", "for", "i", "in", "onset_idxs", "]", "# get all offset_fronts which contain at least one of these offsets", "candidate_offset_front_ids", "=", "set", "(", "[", "int", "(", "offset_fronts", "[", "f", ",", "i", "]", ")", "for", "f", ",", "i", "in", "offset_idxs", "]", ")", "# It is possible that offset_idxs contains offset indexes that correspond to offsets that did not", "# get formed into a front - those will have a front ID of 0. Remove them.", "candidate_offset_front_ids", "=", "[", "id", "for", "id", "in", "candidate_offset_front_ids", "if", "id", "!=", "0", "]", "if", "candidate_offset_front_ids", ":", "chosen_offset_front_id", "=", "_choose_front_id_from_candidates", "(", "candidate_offset_front_ids", ",", "offset_fronts", ",", "offset_idxs", ")", "else", ":", "chosen_offset_front_id", "=", "_get_offset_front_id_after_onset_front", "(", "onset_front_id", ",", "onset_fronts", ",", "offset_fronts", ")", "return", "chosen_offset_front_id" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_consecutive_portions_of_front
Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form), such that each list yielded is consecutive in frequency.
algorithms/asa.py
def _get_consecutive_portions_of_front(front): """ Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form), such that each list yielded is consecutive in frequency. """ last_f = None ls = [] for f, s in front: if last_f is not None and f != last_f + 1: yield ls ls = [] ls.append((f, s)) last_f = f yield ls
def _get_consecutive_portions_of_front(front): """ Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form), such that each list yielded is consecutive in frequency. """ last_f = None ls = [] for f, s in front: if last_f is not None and f != last_f + 1: yield ls ls = [] ls.append((f, s)) last_f = f yield ls
[ "Yields", "lists", "of", "the", "form", "[", "(", "f", "s", ")", "(", "f", "s", ")", "]", "one", "at", "a", "time", "from", "the", "given", "front", "(", "which", "is", "a", "list", "of", "the", "same", "form", ")", "such", "that", "each", "list", "yielded", "is", "consecutive", "in", "frequency", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L391-L404
[ "def", "_get_consecutive_portions_of_front", "(", "front", ")", ":", "last_f", "=", "None", "ls", "=", "[", "]", "for", "f", ",", "s", "in", "front", ":", "if", "last_f", "is", "not", "None", "and", "f", "!=", "last_f", "+", "1", ":", "yield", "ls", "ls", "=", "[", "]", "ls", ".", "append", "(", "(", "f", ",", "s", ")", ")", "last_f", "=", "f", "yield", "ls" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_consecutive_and_overlapping_fronts
Gets an onset_front and an offset_front such that they both occupy at least some of the same frequency channels, then returns the portion of each that overlaps with the other.
algorithms/asa.py
def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id): """ Gets an onset_front and an offset_front such that they both occupy at least some of the same frequency channels, then returns the portion of each that overlaps with the other. """ # Get the onset front of interest onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id) # Get the offset front of interest offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id) # Keep trying consecutive portions of this onset front until we find a consecutive portion # that overlaps with part of the offset front consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)] for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front: # Only get the segment of this front that overlaps in frequencies with the onset front of interest onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front] overlapping_offset_front = [(f, s) for f, s in offset_front if f in onset_front_frequency_indexes] # Only get as much of this overlapping portion as is actually consecutive for consecutive_portion_of_offset_front in _get_consecutive_portions_of_front(overlapping_offset_front): if consecutive_portion_of_offset_front: # Just return the first one we get - if we get any it means we found a portion of overlap return consecutive_portion_of_onset_front, consecutive_portion_of_offset_front return [], []
def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id): """ Gets an onset_front and an offset_front such that they both occupy at least some of the same frequency channels, then returns the portion of each that overlaps with the other. """ # Get the onset front of interest onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id) # Get the offset front of interest offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id) # Keep trying consecutive portions of this onset front until we find a consecutive portion # that overlaps with part of the offset front consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)] for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front: # Only get the segment of this front that overlaps in frequencies with the onset front of interest onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front] overlapping_offset_front = [(f, s) for f, s in offset_front if f in onset_front_frequency_indexes] # Only get as much of this overlapping portion as is actually consecutive for consecutive_portion_of_offset_front in _get_consecutive_portions_of_front(overlapping_offset_front): if consecutive_portion_of_offset_front: # Just return the first one we get - if we get any it means we found a portion of overlap return consecutive_portion_of_onset_front, consecutive_portion_of_offset_front return [], []
[ "Gets", "an", "onset_front", "and", "an", "offset_front", "such", "that", "they", "both", "occupy", "at", "least", "some", "of", "the", "same", "frequency", "channels", "then", "returns", "the", "portion", "of", "each", "that", "overlaps", "with", "the", "other", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L406-L430
[ "def", "_get_consecutive_and_overlapping_fronts", "(", "onset_fronts", ",", "offset_fronts", ",", "onset_front_id", ",", "offset_front_id", ")", ":", "# Get the onset front of interest", "onset_front", "=", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ")", "# Get the offset front of interest", "offset_front", "=", "_get_front_idxs_from_id", "(", "offset_fronts", ",", "offset_front_id", ")", "# Keep trying consecutive portions of this onset front until we find a consecutive portion", "# that overlaps with part of the offset front", "consecutive_portions_of_onset_front", "=", "[", "c", "for", "c", "in", "_get_consecutive_portions_of_front", "(", "onset_front", ")", "]", "for", "consecutive_portion_of_onset_front", "in", "consecutive_portions_of_onset_front", ":", "# Only get the segment of this front that overlaps in frequencies with the onset front of interest", "onset_front_frequency_indexes", "=", "[", "f", "for", "f", ",", "_", "in", "consecutive_portion_of_onset_front", "]", "overlapping_offset_front", "=", "[", "(", "f", ",", "s", ")", "for", "f", ",", "s", "in", "offset_front", "if", "f", "in", "onset_front_frequency_indexes", "]", "# Only get as much of this overlapping portion as is actually consecutive", "for", "consecutive_portion_of_offset_front", "in", "_get_consecutive_portions_of_front", "(", "overlapping_offset_front", ")", ":", "if", "consecutive_portion_of_offset_front", ":", "# Just return the first one we get - if we get any it means we found a portion of overlap", "return", "consecutive_portion_of_onset_front", ",", "consecutive_portion_of_offset_front", "return", "[", "]", ",", "[", "]" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_update_segmentation_mask
Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between `onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively. This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of less than 3 channels wide are removed. This function also returns a boolean value indicating whether the onset channel went to completion. Specifically, segments by doing the following: - Going across frequencies in the onset_front, - add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front, if the offset_front is in that frequency. Possible scenarios: Fronts line up completely: :: | | S S S | | => S S S | | S S S | | S S S Onset front starts before offset front: :: | | | | S S S | | => S S S | | S S S Onset front ends after offset front: :: | | S S S | | => S S S | | S S S | | Onset front starts before and ends after offset front: :: | | | | => S S S | | S S S | | The above three options in reverse: :: | |S S| | |S S| |S S| |S S| |S S| |S S| |S S| |S S| | | There is one last scenario: :: | | \ / \ / / \ | | Where the offset and onset fronts cross one another. If this happens, we simply reverse the indices and accept: :: |sss| \sss/ \s/ /s\ |sss| The other option would be to destroy the offset front from the crossover point on, and then search for a new offset front for the rest of the onset front.
algorithms/asa.py
def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap): """ Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between `onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively. This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of less than 3 channels wide are removed. This function also returns a boolean value indicating whether the onset channel went to completion. Specifically, segments by doing the following: - Going across frequencies in the onset_front, - add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front, if the offset_front is in that frequency. Possible scenarios: Fronts line up completely: :: | | S S S | | => S S S | | S S S | | S S S Onset front starts before offset front: :: | | | | S S S | | => S S S | | S S S Onset front ends after offset front: :: | | S S S | | => S S S | | S S S | | Onset front starts before and ends after offset front: :: | | | | => S S S | | S S S | | The above three options in reverse: :: | |S S| | |S S| |S S| |S S| |S S| |S S| |S S| |S S| | | There is one last scenario: :: | | \ / \ / / \ | | Where the offset and onset fronts cross one another. If this happens, we simply reverse the indices and accept: :: |sss| \sss/ \s/ /s\ |sss| The other option would be to destroy the offset front from the crossover point on, and then search for a new offset front for the rest of the onset front. """ # Get the portions of the onset and offset fronts that overlap and are consecutive onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap) onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id) offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap) msg = "Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty".format( onset_front, offset_front, onset_front_overlap, offset_front_overlap ) assert onset_front_overlap, msg assert offset_front_overlap, msg onset_front = onset_front_overlap offset_front = offset_front_overlap # Figure out which frequencies will go in the segment flow_on, _slow_on = onset_front[0] fhigh_on, _shigh_on = onset_front[-1] flow_off, _slow_off = offset_front[0] fhigh_off, _shigh_off = offset_front[-1] flow = max(flow_on, flow_off) fhigh = min(fhigh_on, fhigh_off) # Update all the masks with the segment for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + 1, :], start=flow): assert fidx >= flow, "Frequency index is {}, but we should have started at {}".format(fidx, flow) assert (fidx - flow) < len(onset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}".format( fidx, flow, len(onset_front), onset_front ) assert (fidx - flow) < len(offset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}".format( fidx, flow, len(offset_front), offset_front ) _, beg = onset_front[fidx - flow] _, end = offset_front[fidx - flow] if beg > end: end, beg = beg, end assert end >= beg segmentation_mask[fidx, beg:end + 1] = onset_front_id onset_fronts[fidx, (beg + 1):(end + 1)] = 0 offset_fronts[fidx, (beg + 1):(end + 1)] = 0 nfreqs_used_in_onset_front = (fidx - flow) + 1 # Update the other masks to delete fronts that have been used indexes = np.arange(flow, fhigh + 1, 1, dtype=np.int64) onset_front_sample_idxs_across_freqs = np.array([s for _, s in onset_front]) onset_front_sample_idxs_across_freqs_up_to_break = onset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front] offset_front_sample_idxs_across_freqs = np.array([s for _, s in offset_front]) offset_front_sample_idxs_across_freqs_up_to_break = offset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front] ## Remove the offset front from where we started to where we ended offset_fronts[indexes[:nfreqs_used_in_onset_front], offset_front_sample_idxs_across_freqs_up_to_break] = 0 ## Remove the onset front from where we started to where we ended onset_fronts[indexes[:nfreqs_used_in_onset_front], onset_front_sample_idxs_across_freqs_up_to_break] = 0 # Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts whole_onset_front_matched = onset_front_id not in np.unique(onset_fronts) return whole_onset_front_matched
def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap): """ Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between `onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively. This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of less than 3 channels wide are removed. This function also returns a boolean value indicating whether the onset channel went to completion. Specifically, segments by doing the following: - Going across frequencies in the onset_front, - add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front, if the offset_front is in that frequency. Possible scenarios: Fronts line up completely: :: | | S S S | | => S S S | | S S S | | S S S Onset front starts before offset front: :: | | | | S S S | | => S S S | | S S S Onset front ends after offset front: :: | | S S S | | => S S S | | S S S | | Onset front starts before and ends after offset front: :: | | | | => S S S | | S S S | | The above three options in reverse: :: | |S S| | |S S| |S S| |S S| |S S| |S S| |S S| |S S| | | There is one last scenario: :: | | \ / \ / / \ | | Where the offset and onset fronts cross one another. If this happens, we simply reverse the indices and accept: :: |sss| \sss/ \s/ /s\ |sss| The other option would be to destroy the offset front from the crossover point on, and then search for a new offset front for the rest of the onset front. """ # Get the portions of the onset and offset fronts that overlap and are consecutive onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap) onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id) offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap) msg = "Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty".format( onset_front, offset_front, onset_front_overlap, offset_front_overlap ) assert onset_front_overlap, msg assert offset_front_overlap, msg onset_front = onset_front_overlap offset_front = offset_front_overlap # Figure out which frequencies will go in the segment flow_on, _slow_on = onset_front[0] fhigh_on, _shigh_on = onset_front[-1] flow_off, _slow_off = offset_front[0] fhigh_off, _shigh_off = offset_front[-1] flow = max(flow_on, flow_off) fhigh = min(fhigh_on, fhigh_off) # Update all the masks with the segment for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + 1, :], start=flow): assert fidx >= flow, "Frequency index is {}, but we should have started at {}".format(fidx, flow) assert (fidx - flow) < len(onset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}".format( fidx, flow, len(onset_front), onset_front ) assert (fidx - flow) < len(offset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}".format( fidx, flow, len(offset_front), offset_front ) _, beg = onset_front[fidx - flow] _, end = offset_front[fidx - flow] if beg > end: end, beg = beg, end assert end >= beg segmentation_mask[fidx, beg:end + 1] = onset_front_id onset_fronts[fidx, (beg + 1):(end + 1)] = 0 offset_fronts[fidx, (beg + 1):(end + 1)] = 0 nfreqs_used_in_onset_front = (fidx - flow) + 1 # Update the other masks to delete fronts that have been used indexes = np.arange(flow, fhigh + 1, 1, dtype=np.int64) onset_front_sample_idxs_across_freqs = np.array([s for _, s in onset_front]) onset_front_sample_idxs_across_freqs_up_to_break = onset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front] offset_front_sample_idxs_across_freqs = np.array([s for _, s in offset_front]) offset_front_sample_idxs_across_freqs_up_to_break = offset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front] ## Remove the offset front from where we started to where we ended offset_fronts[indexes[:nfreqs_used_in_onset_front], offset_front_sample_idxs_across_freqs_up_to_break] = 0 ## Remove the onset front from where we started to where we ended onset_fronts[indexes[:nfreqs_used_in_onset_front], onset_front_sample_idxs_across_freqs_up_to_break] = 0 # Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts whole_onset_front_matched = onset_front_id not in np.unique(onset_fronts) return whole_onset_front_matched
[ "Returns", "an", "updated", "segmentation", "mask", "such", "that", "the", "input", "segmentation_mask", "has", "been", "updated", "by", "segmenting", "between", "onset_front_id", "and", "offset_front_id", "as", "found", "in", "onset_fronts", "and", "offset_fronts", "respectively", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L433-L575
[ "def", "_update_segmentation_mask", "(", "segmentation_mask", ",", "onset_fronts", ",", "offset_fronts", ",", "onset_front_id", ",", "offset_front_id_most_overlap", ")", ":", "# Get the portions of the onset and offset fronts that overlap and are consecutive", "onset_front_overlap", ",", "offset_front_overlap", "=", "_get_consecutive_and_overlapping_fronts", "(", "onset_fronts", ",", "offset_fronts", ",", "onset_front_id", ",", "offset_front_id_most_overlap", ")", "onset_front", "=", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ")", "offset_front", "=", "_get_front_idxs_from_id", "(", "offset_fronts", ",", "offset_front_id_most_overlap", ")", "msg", "=", "\"Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty\"", ".", "format", "(", "onset_front", ",", "offset_front", ",", "onset_front_overlap", ",", "offset_front_overlap", ")", "assert", "onset_front_overlap", ",", "msg", "assert", "offset_front_overlap", ",", "msg", "onset_front", "=", "onset_front_overlap", "offset_front", "=", "offset_front_overlap", "# Figure out which frequencies will go in the segment", "flow_on", ",", "_slow_on", "=", "onset_front", "[", "0", "]", "fhigh_on", ",", "_shigh_on", "=", "onset_front", "[", "-", "1", "]", "flow_off", ",", "_slow_off", "=", "offset_front", "[", "0", "]", "fhigh_off", ",", "_shigh_off", "=", "offset_front", "[", "-", "1", "]", "flow", "=", "max", "(", "flow_on", ",", "flow_off", ")", "fhigh", "=", "min", "(", "fhigh_on", ",", "fhigh_off", ")", "# Update all the masks with the segment", "for", "fidx", ",", "_freqchan", "in", "enumerate", "(", "segmentation_mask", "[", "flow", ":", "fhigh", "+", "1", ",", ":", "]", ",", "start", "=", "flow", ")", ":", "assert", "fidx", ">=", "flow", ",", "\"Frequency index is {}, but we should have started at {}\"", ".", "format", "(", "fidx", ",", "flow", ")", "assert", "(", "fidx", "-", "flow", ")", "<", "len", "(", "onset_front", ")", ",", "\"Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}\"", ".", "format", "(", "fidx", ",", "flow", ",", "len", "(", "onset_front", ")", ",", "onset_front", ")", "assert", "(", "fidx", "-", "flow", ")", "<", "len", "(", "offset_front", ")", ",", "\"Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}\"", ".", "format", "(", "fidx", ",", "flow", ",", "len", "(", "offset_front", ")", ",", "offset_front", ")", "_", ",", "beg", "=", "onset_front", "[", "fidx", "-", "flow", "]", "_", ",", "end", "=", "offset_front", "[", "fidx", "-", "flow", "]", "if", "beg", ">", "end", ":", "end", ",", "beg", "=", "beg", ",", "end", "assert", "end", ">=", "beg", "segmentation_mask", "[", "fidx", ",", "beg", ":", "end", "+", "1", "]", "=", "onset_front_id", "onset_fronts", "[", "fidx", ",", "(", "beg", "+", "1", ")", ":", "(", "end", "+", "1", ")", "]", "=", "0", "offset_fronts", "[", "fidx", ",", "(", "beg", "+", "1", ")", ":", "(", "end", "+", "1", ")", "]", "=", "0", "nfreqs_used_in_onset_front", "=", "(", "fidx", "-", "flow", ")", "+", "1", "# Update the other masks to delete fronts that have been used", "indexes", "=", "np", ".", "arange", "(", "flow", ",", "fhigh", "+", "1", ",", "1", ",", "dtype", "=", "np", ".", "int64", ")", "onset_front_sample_idxs_across_freqs", "=", "np", ".", "array", "(", "[", "s", "for", "_", ",", "s", "in", "onset_front", "]", ")", "onset_front_sample_idxs_across_freqs_up_to_break", "=", "onset_front_sample_idxs_across_freqs", "[", ":", "nfreqs_used_in_onset_front", "]", "offset_front_sample_idxs_across_freqs", "=", "np", ".", "array", "(", "[", "s", "for", "_", ",", "s", "in", "offset_front", "]", ")", "offset_front_sample_idxs_across_freqs_up_to_break", "=", "offset_front_sample_idxs_across_freqs", "[", ":", "nfreqs_used_in_onset_front", "]", "## Remove the offset front from where we started to where we ended", "offset_fronts", "[", "indexes", "[", ":", "nfreqs_used_in_onset_front", "]", ",", "offset_front_sample_idxs_across_freqs_up_to_break", "]", "=", "0", "## Remove the onset front from where we started to where we ended", "onset_fronts", "[", "indexes", "[", ":", "nfreqs_used_in_onset_front", "]", ",", "onset_front_sample_idxs_across_freqs_up_to_break", "]", "=", "0", "# Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts", "whole_onset_front_matched", "=", "onset_front_id", "not", "in", "np", ".", "unique", "(", "onset_fronts", ")", "return", "whole_onset_front_matched" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_front_id_from_idx
Returns the front ID found in `front` at the given `index`. :param front: An onset or offset front array of shape [nfrequencies, nsamples] :index: A tuple of the form (frequency index, sample index) :returns: The ID of the front or -1 if not found in `front` and the item at `onsets_or_offsets[index]` is not a 1.
algorithms/asa.py
def _front_id_from_idx(front, index): """ Returns the front ID found in `front` at the given `index`. :param front: An onset or offset front array of shape [nfrequencies, nsamples] :index: A tuple of the form (frequency index, sample index) :returns: The ID of the front or -1 if not found in `front` and the item at `onsets_or_offsets[index]` is not a 1. """ fidx, sidx = index id = front[fidx, sidx] if id == 0: return -1 else: return id
def _front_id_from_idx(front, index): """ Returns the front ID found in `front` at the given `index`. :param front: An onset or offset front array of shape [nfrequencies, nsamples] :index: A tuple of the form (frequency index, sample index) :returns: The ID of the front or -1 if not found in `front` and the item at `onsets_or_offsets[index]` is not a 1. """ fidx, sidx = index id = front[fidx, sidx] if id == 0: return -1 else: return id
[ "Returns", "the", "front", "ID", "found", "in", "front", "at", "the", "given", "index", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L577-L591
[ "def", "_front_id_from_idx", "(", "front", ",", "index", ")", ":", "fidx", ",", "sidx", "=", "index", "id", "=", "front", "[", "fidx", ",", "sidx", "]", "if", "id", "==", "0", ":", "return", "-", "1", "else", ":", "return", "id" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_front_ids_one_at_a_time
Yields one onset front ID at a time until they are gone. All the onset fronts from a frequency channel are yielded, then all of the next channel's, etc., though one at a time.
algorithms/asa.py
def _get_front_ids_one_at_a_time(onset_fronts): """ Yields one onset front ID at a time until they are gone. All the onset fronts from a frequency channel are yielded, then all of the next channel's, etc., though one at a time. """ yielded_so_far = set() for row in onset_fronts: for id in row: if id != 0 and id not in yielded_so_far: yield id yielded_so_far.add(id)
def _get_front_ids_one_at_a_time(onset_fronts): """ Yields one onset front ID at a time until they are gone. All the onset fronts from a frequency channel are yielded, then all of the next channel's, etc., though one at a time. """ yielded_so_far = set() for row in onset_fronts: for id in row: if id != 0 and id not in yielded_so_far: yield id yielded_so_far.add(id)
[ "Yields", "one", "onset", "front", "ID", "at", "a", "time", "until", "they", "are", "gone", ".", "All", "the", "onset", "fronts", "from", "a", "frequency", "channel", "are", "yielded", "then", "all", "of", "the", "next", "channel", "s", "etc", ".", "though", "one", "at", "a", "time", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L593-L603
[ "def", "_get_front_ids_one_at_a_time", "(", "onset_fronts", ")", ":", "yielded_so_far", "=", "set", "(", ")", "for", "row", "in", "onset_fronts", ":", "for", "id", "in", "row", ":", "if", "id", "!=", "0", "and", "id", "not", "in", "yielded_so_far", ":", "yield", "id", "yielded_so_far", ".", "add", "(", "id", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_corresponding_offsets
Gets the offsets that occur as close as possible to the onsets in the given onset-front.
algorithms/asa.py
def _get_corresponding_offsets(onset_fronts, onset_front_id, onsets, offsets): """ Gets the offsets that occur as close as possible to the onsets in the given onset-front. """ corresponding_offsets = [] for index in _get_front_idxs_from_id(onset_fronts, onset_front_id): offset_fidx, offset_sidx = _lookup_offset_by_onset_idx(index, onsets, offsets) corresponding_offsets.append((offset_fidx, offset_sidx)) return corresponding_offsets
def _get_corresponding_offsets(onset_fronts, onset_front_id, onsets, offsets): """ Gets the offsets that occur as close as possible to the onsets in the given onset-front. """ corresponding_offsets = [] for index in _get_front_idxs_from_id(onset_fronts, onset_front_id): offset_fidx, offset_sidx = _lookup_offset_by_onset_idx(index, onsets, offsets) corresponding_offsets.append((offset_fidx, offset_sidx)) return corresponding_offsets
[ "Gets", "the", "offsets", "that", "occur", "as", "close", "as", "possible", "to", "the", "onsets", "in", "the", "given", "onset", "-", "front", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L605-L613
[ "def", "_get_corresponding_offsets", "(", "onset_fronts", ",", "onset_front_id", ",", "onsets", ",", "offsets", ")", ":", "corresponding_offsets", "=", "[", "]", "for", "index", "in", "_get_front_idxs_from_id", "(", "onset_fronts", ",", "onset_front_id", ")", ":", "offset_fidx", ",", "offset_sidx", "=", "_lookup_offset_by_onset_idx", "(", "index", ",", "onsets", ",", "offsets", ")", "corresponding_offsets", ".", "append", "(", "(", "offset_fidx", ",", "offset_sidx", ")", ")", "return", "corresponding_offsets" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_all_offset_fronts_from_offsets
Returns all the offset fronts that are composed of at least one of the given offset indexes. Also returns a dict of the form {offset_front_id: ntimes saw}
algorithms/asa.py
def _get_all_offset_fronts_from_offsets(offset_fronts, corresponding_offsets): """ Returns all the offset fronts that are composed of at least one of the given offset indexes. Also returns a dict of the form {offset_front_id: ntimes saw} """ all_offset_fronts_of_interest = [] ids_ntimes_seen = {} for offset_index in corresponding_offsets: offset_id = _front_id_from_idx(offset_fronts, offset_index) if offset_id not in ids_ntimes_seen: offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_id) all_offset_fronts_of_interest.append(offset_front_idxs) ids_ntimes_seen[offset_id] = 1 else: ids_ntimes_seen[offset_id] += 1 return all_offset_fronts_of_interest, ids_ntimes_seen
def _get_all_offset_fronts_from_offsets(offset_fronts, corresponding_offsets): """ Returns all the offset fronts that are composed of at least one of the given offset indexes. Also returns a dict of the form {offset_front_id: ntimes saw} """ all_offset_fronts_of_interest = [] ids_ntimes_seen = {} for offset_index in corresponding_offsets: offset_id = _front_id_from_idx(offset_fronts, offset_index) if offset_id not in ids_ntimes_seen: offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_id) all_offset_fronts_of_interest.append(offset_front_idxs) ids_ntimes_seen[offset_id] = 1 else: ids_ntimes_seen[offset_id] += 1 return all_offset_fronts_of_interest, ids_ntimes_seen
[ "Returns", "all", "the", "offset", "fronts", "that", "are", "composed", "of", "at", "least", "one", "of", "the", "given", "offset", "indexes", ".", "Also", "returns", "a", "dict", "of", "the", "form", "{", "offset_front_id", ":", "ntimes", "saw", "}" ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L615-L630
[ "def", "_get_all_offset_fronts_from_offsets", "(", "offset_fronts", ",", "corresponding_offsets", ")", ":", "all_offset_fronts_of_interest", "=", "[", "]", "ids_ntimes_seen", "=", "{", "}", "for", "offset_index", "in", "corresponding_offsets", ":", "offset_id", "=", "_front_id_from_idx", "(", "offset_fronts", ",", "offset_index", ")", "if", "offset_id", "not", "in", "ids_ntimes_seen", ":", "offset_front_idxs", "=", "_get_front_idxs_from_id", "(", "offset_fronts", ",", "offset_id", ")", "all_offset_fronts_of_interest", ".", "append", "(", "offset_front_idxs", ")", "ids_ntimes_seen", "[", "offset_id", "]", "=", "1", "else", ":", "ids_ntimes_seen", "[", "offset_id", "]", "+=", "1", "return", "all_offset_fronts_of_interest", ",", "ids_ntimes_seen" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_remove_overlaps
Removes all points in the fronts that overlap with the segmentation mask.
algorithms/asa.py
def _remove_overlaps(segmentation_mask, fronts): """ Removes all points in the fronts that overlap with the segmentation mask. """ fidxs, sidxs = np.where((segmentation_mask != fronts) & (segmentation_mask != 0) & (fronts != 0)) fronts[fidxs, sidxs] = 0
def _remove_overlaps(segmentation_mask, fronts): """ Removes all points in the fronts that overlap with the segmentation mask. """ fidxs, sidxs = np.where((segmentation_mask != fronts) & (segmentation_mask != 0) & (fronts != 0)) fronts[fidxs, sidxs] = 0
[ "Removes", "all", "points", "in", "the", "fronts", "that", "overlap", "with", "the", "segmentation", "mask", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L632-L637
[ "def", "_remove_overlaps", "(", "segmentation_mask", ",", "fronts", ")", ":", "fidxs", ",", "sidxs", "=", "np", ".", "where", "(", "(", "segmentation_mask", "!=", "fronts", ")", "&", "(", "segmentation_mask", "!=", "0", ")", "&", "(", "fronts", "!=", "0", ")", ")", "fronts", "[", "fidxs", ",", "sidxs", "]", "=", "0" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_match_fronts
Returns a segmentation mask, which looks like this: frequency 1: 0 0 4 4 4 4 4 0 0 5 5 5 frequency 2: 0 4 4 4 4 4 0 0 0 0 5 5 frequency 3: 0 4 4 4 4 4 4 4 5 5 5 5 That is, each item in the array is either a 0 (not part of a segment) or a positive integer which indicates which segment the sample in that frequency band belongs to.
algorithms/asa.py
def _match_fronts(onset_fronts, offset_fronts, onsets, offsets, debug=False): """ Returns a segmentation mask, which looks like this: frequency 1: 0 0 4 4 4 4 4 0 0 5 5 5 frequency 2: 0 4 4 4 4 4 0 0 0 0 5 5 frequency 3: 0 4 4 4 4 4 4 4 5 5 5 5 That is, each item in the array is either a 0 (not part of a segment) or a positive integer which indicates which segment the sample in that frequency band belongs to. """ def printd(*args, **kwargs): if debug: print(*args, **kwargs) # Make copies of everything, so we can do whatever we want with them onset_fronts = np.copy(onset_fronts) offset_fronts = np.copy(offset_fronts) onsets = np.copy(onsets) offsets = np.copy(offsets) # This is what we will return segmentation_mask = np.zeros_like(onset_fronts) # - Take the first frequency in the onset_fronts matrix # [ s s s s s s s s s] <-- This frequency # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # - Follow it along in time like this: # first sample last sample # v --> v # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # until you get to the first onset front in that frequency # Here it is! # v # [ . O . . . . . . .] # [ . . O . . . . . .] # [ . . O . . . . . .] # [ . O . . . . . . .] # [ O . . . . . . . .] resulting_onset_fronts = np.copy(onset_fronts) printd(" -> Dealing with onset fronts...") for onset_front_id in _get_front_ids_one_at_a_time(onset_fronts): printd(" -> Dealing with onset front", int(onset_front_id)) front_is_complete = False while not front_is_complete: # - Now, starting at this onset front in each frequency, find that onset's corresponding offset # [ . O . . . . F . .] # [ . . O . . . F . .] # [ . . O . F . . . .] # [ . O . F . . . . .] # [ O F . . . . . . .] corresponding_offsets = _get_corresponding_offsets(resulting_onset_fronts, onset_front_id, onsets, offsets) # It is possible that onset_front_id has been removed from resulting_onset_fronts, # if so, skip it and move on to the next onset front (we are iterating over the original # to keep the iterator valid) if not corresponding_offsets: break # - Get all the offset fronts that are composed of at least one of these offset times # [ . O . . . . 1 . .] # [ . . O 3 . . 1 . .] # [ . . O 3 F . 1 . .] # [ . O . 3 . . . 1 .] # [ O F 3 . . . . . .] _all_offset_fronts_of_interest, ids_ntimes_seen = _get_all_offset_fronts_from_offsets(offset_fronts, corresponding_offsets) # - Check how many of these offset times each of the offset fronts are composed of: # [ . O . . . . Y . .] # [ . . O 3 . . Y . .] # [ . . O 3 F . 1 . .] # [ . O . X . . . 1 .] # [ O F 3 . . . . . .] # In this example, offset front 1 is made up of 4 offset times, 2 of which (the Y's) are offset times # that correspond to onsets in the onset front we are currently dealing with. Meanwhile, offset # front 3 is made up of 4 offset times, only one of which (the X) is one of the offsets that corresponds # to the onset front. # - Choose the offset front which matches the most offset time candidates. In this example, offset front 1 # is chosen because it has 2 of these offset times. # If there is a tie, we choose the ID with the lower number ntimes_seen_sorted = sorted([(k, v) for k, v in ids_ntimes_seen.items()], key=lambda tup: (-1 * tup[1], tup[0])) assert len(ntimes_seen_sorted) > 0, "We somehow got an empty dict of offset front IDs" # Only use the special final front (the -1, catch-all front composed of final samples in each frequency) if necessary offset_front_id, _ntimes_seen = ntimes_seen_sorted[0] if offset_front_id == -1 and len(ntimes_seen_sorted) > 1: offset_front_id, _ntimes_seen = ntimes_seen_sorted[1] offset_front_id_most_overlap = offset_front_id # - Finally, update the segmentation mask to follow the offset # front from where it first overlaps in frequency with the onset front to where it ends or to where # the onset front ends, whichever happens first. # [ . S S S S S S . .] # [ . . S S S S S . .] # [ . . S S S S S . .] # [ . S S S S S S S .] # [ O F 3 . . . . . .] <-- This frequency has not yet been matched with an offset front front_is_complete = _update_segmentation_mask(segmentation_mask, resulting_onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap) # Remove any onsets that are covered by the new segmentation mask _remove_overlaps(segmentation_mask, resulting_onset_fronts) # Remove any offsets that are covered by the new segmentaion mask _remove_overlaps(segmentation_mask, offset_fronts) # - Repeat this algorithm, restarting in the first frequency channel that did not match (the last frequency in # the above example). Do this until you have finished with this onset front. # - Repeat for each onset front in the rest of this frequency # - Repeat for each frequency return segmentation_mask
def _match_fronts(onset_fronts, offset_fronts, onsets, offsets, debug=False): """ Returns a segmentation mask, which looks like this: frequency 1: 0 0 4 4 4 4 4 0 0 5 5 5 frequency 2: 0 4 4 4 4 4 0 0 0 0 5 5 frequency 3: 0 4 4 4 4 4 4 4 5 5 5 5 That is, each item in the array is either a 0 (not part of a segment) or a positive integer which indicates which segment the sample in that frequency band belongs to. """ def printd(*args, **kwargs): if debug: print(*args, **kwargs) # Make copies of everything, so we can do whatever we want with them onset_fronts = np.copy(onset_fronts) offset_fronts = np.copy(offset_fronts) onsets = np.copy(onsets) offsets = np.copy(offsets) # This is what we will return segmentation_mask = np.zeros_like(onset_fronts) # - Take the first frequency in the onset_fronts matrix # [ s s s s s s s s s] <-- This frequency # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # - Follow it along in time like this: # first sample last sample # v --> v # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # [ s s s s s s s s s] # until you get to the first onset front in that frequency # Here it is! # v # [ . O . . . . . . .] # [ . . O . . . . . .] # [ . . O . . . . . .] # [ . O . . . . . . .] # [ O . . . . . . . .] resulting_onset_fronts = np.copy(onset_fronts) printd(" -> Dealing with onset fronts...") for onset_front_id in _get_front_ids_one_at_a_time(onset_fronts): printd(" -> Dealing with onset front", int(onset_front_id)) front_is_complete = False while not front_is_complete: # - Now, starting at this onset front in each frequency, find that onset's corresponding offset # [ . O . . . . F . .] # [ . . O . . . F . .] # [ . . O . F . . . .] # [ . O . F . . . . .] # [ O F . . . . . . .] corresponding_offsets = _get_corresponding_offsets(resulting_onset_fronts, onset_front_id, onsets, offsets) # It is possible that onset_front_id has been removed from resulting_onset_fronts, # if so, skip it and move on to the next onset front (we are iterating over the original # to keep the iterator valid) if not corresponding_offsets: break # - Get all the offset fronts that are composed of at least one of these offset times # [ . O . . . . 1 . .] # [ . . O 3 . . 1 . .] # [ . . O 3 F . 1 . .] # [ . O . 3 . . . 1 .] # [ O F 3 . . . . . .] _all_offset_fronts_of_interest, ids_ntimes_seen = _get_all_offset_fronts_from_offsets(offset_fronts, corresponding_offsets) # - Check how many of these offset times each of the offset fronts are composed of: # [ . O . . . . Y . .] # [ . . O 3 . . Y . .] # [ . . O 3 F . 1 . .] # [ . O . X . . . 1 .] # [ O F 3 . . . . . .] # In this example, offset front 1 is made up of 4 offset times, 2 of which (the Y's) are offset times # that correspond to onsets in the onset front we are currently dealing with. Meanwhile, offset # front 3 is made up of 4 offset times, only one of which (the X) is one of the offsets that corresponds # to the onset front. # - Choose the offset front which matches the most offset time candidates. In this example, offset front 1 # is chosen because it has 2 of these offset times. # If there is a tie, we choose the ID with the lower number ntimes_seen_sorted = sorted([(k, v) for k, v in ids_ntimes_seen.items()], key=lambda tup: (-1 * tup[1], tup[0])) assert len(ntimes_seen_sorted) > 0, "We somehow got an empty dict of offset front IDs" # Only use the special final front (the -1, catch-all front composed of final samples in each frequency) if necessary offset_front_id, _ntimes_seen = ntimes_seen_sorted[0] if offset_front_id == -1 and len(ntimes_seen_sorted) > 1: offset_front_id, _ntimes_seen = ntimes_seen_sorted[1] offset_front_id_most_overlap = offset_front_id # - Finally, update the segmentation mask to follow the offset # front from where it first overlaps in frequency with the onset front to where it ends or to where # the onset front ends, whichever happens first. # [ . S S S S S S . .] # [ . . S S S S S . .] # [ . . S S S S S . .] # [ . S S S S S S S .] # [ O F 3 . . . . . .] <-- This frequency has not yet been matched with an offset front front_is_complete = _update_segmentation_mask(segmentation_mask, resulting_onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap) # Remove any onsets that are covered by the new segmentation mask _remove_overlaps(segmentation_mask, resulting_onset_fronts) # Remove any offsets that are covered by the new segmentaion mask _remove_overlaps(segmentation_mask, offset_fronts) # - Repeat this algorithm, restarting in the first frequency channel that did not match (the last frequency in # the above example). Do this until you have finished with this onset front. # - Repeat for each onset front in the rest of this frequency # - Repeat for each frequency return segmentation_mask
[ "Returns", "a", "segmentation", "mask", "which", "looks", "like", "this", ":", "frequency", "1", ":", "0", "0", "4", "4", "4", "4", "4", "0", "0", "5", "5", "5", "frequency", "2", ":", "0", "4", "4", "4", "4", "4", "0", "0", "0", "0", "5", "5", "frequency", "3", ":", "0", "4", "4", "4", "4", "4", "4", "4", "5", "5", "5", "5" ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L639-L773
[ "def", "_match_fronts", "(", "onset_fronts", ",", "offset_fronts", ",", "onsets", ",", "offsets", ",", "debug", "=", "False", ")", ":", "def", "printd", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "debug", ":", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Make copies of everything, so we can do whatever we want with them", "onset_fronts", "=", "np", ".", "copy", "(", "onset_fronts", ")", "offset_fronts", "=", "np", ".", "copy", "(", "offset_fronts", ")", "onsets", "=", "np", ".", "copy", "(", "onsets", ")", "offsets", "=", "np", ".", "copy", "(", "offsets", ")", "# This is what we will return", "segmentation_mask", "=", "np", ".", "zeros_like", "(", "onset_fronts", ")", "# - Take the first frequency in the onset_fronts matrix", "# [ s s s s s s s s s] <-- This frequency", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# - Follow it along in time like this:", "# first sample last sample", "# v --> v", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# [ s s s s s s s s s]", "# until you get to the first onset front in that frequency", "# Here it is!", "# v", "# [ . O . . . . . . .]", "# [ . . O . . . . . .]", "# [ . . O . . . . . .]", "# [ . O . . . . . . .]", "# [ O . . . . . . . .]", "resulting_onset_fronts", "=", "np", ".", "copy", "(", "onset_fronts", ")", "printd", "(", "\" -> Dealing with onset fronts...\"", ")", "for", "onset_front_id", "in", "_get_front_ids_one_at_a_time", "(", "onset_fronts", ")", ":", "printd", "(", "\" -> Dealing with onset front\"", ",", "int", "(", "onset_front_id", ")", ")", "front_is_complete", "=", "False", "while", "not", "front_is_complete", ":", "# - Now, starting at this onset front in each frequency, find that onset's corresponding offset", "# [ . O . . . . F . .]", "# [ . . O . . . F . .]", "# [ . . O . F . . . .]", "# [ . O . F . . . . .]", "# [ O F . . . . . . .]", "corresponding_offsets", "=", "_get_corresponding_offsets", "(", "resulting_onset_fronts", ",", "onset_front_id", ",", "onsets", ",", "offsets", ")", "# It is possible that onset_front_id has been removed from resulting_onset_fronts,", "# if so, skip it and move on to the next onset front (we are iterating over the original", "# to keep the iterator valid)", "if", "not", "corresponding_offsets", ":", "break", "# - Get all the offset fronts that are composed of at least one of these offset times", "# [ . O . . . . 1 . .]", "# [ . . O 3 . . 1 . .]", "# [ . . O 3 F . 1 . .]", "# [ . O . 3 . . . 1 .]", "# [ O F 3 . . . . . .]", "_all_offset_fronts_of_interest", ",", "ids_ntimes_seen", "=", "_get_all_offset_fronts_from_offsets", "(", "offset_fronts", ",", "corresponding_offsets", ")", "# - Check how many of these offset times each of the offset fronts are composed of:", "# [ . O . . . . Y . .]", "# [ . . O 3 . . Y . .]", "# [ . . O 3 F . 1 . .]", "# [ . O . X . . . 1 .]", "# [ O F 3 . . . . . .]", "# In this example, offset front 1 is made up of 4 offset times, 2 of which (the Y's) are offset times", "# that correspond to onsets in the onset front we are currently dealing with. Meanwhile, offset", "# front 3 is made up of 4 offset times, only one of which (the X) is one of the offsets that corresponds", "# to the onset front.", "# - Choose the offset front which matches the most offset time candidates. In this example, offset front 1", "# is chosen because it has 2 of these offset times.", "# If there is a tie, we choose the ID with the lower number", "ntimes_seen_sorted", "=", "sorted", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "ids_ntimes_seen", ".", "items", "(", ")", "]", ",", "key", "=", "lambda", "tup", ":", "(", "-", "1", "*", "tup", "[", "1", "]", ",", "tup", "[", "0", "]", ")", ")", "assert", "len", "(", "ntimes_seen_sorted", ")", ">", "0", ",", "\"We somehow got an empty dict of offset front IDs\"", "# Only use the special final front (the -1, catch-all front composed of final samples in each frequency) if necessary", "offset_front_id", ",", "_ntimes_seen", "=", "ntimes_seen_sorted", "[", "0", "]", "if", "offset_front_id", "==", "-", "1", "and", "len", "(", "ntimes_seen_sorted", ")", ">", "1", ":", "offset_front_id", ",", "_ntimes_seen", "=", "ntimes_seen_sorted", "[", "1", "]", "offset_front_id_most_overlap", "=", "offset_front_id", "# - Finally, update the segmentation mask to follow the offset", "# front from where it first overlaps in frequency with the onset front to where it ends or to where", "# the onset front ends, whichever happens first.", "# [ . S S S S S S . .]", "# [ . . S S S S S . .]", "# [ . . S S S S S . .]", "# [ . S S S S S S S .]", "# [ O F 3 . . . . . .] <-- This frequency has not yet been matched with an offset front", "front_is_complete", "=", "_update_segmentation_mask", "(", "segmentation_mask", ",", "resulting_onset_fronts", ",", "offset_fronts", ",", "onset_front_id", ",", "offset_front_id_most_overlap", ")", "# Remove any onsets that are covered by the new segmentation mask", "_remove_overlaps", "(", "segmentation_mask", ",", "resulting_onset_fronts", ")", "# Remove any offsets that are covered by the new segmentaion mask", "_remove_overlaps", "(", "segmentation_mask", ",", "offset_fronts", ")", "# - Repeat this algorithm, restarting in the first frequency channel that did not match (the last frequency in", "# the above example). Do this until you have finished with this onset front.", "# - Repeat for each onset front in the rest of this frequency", "# - Repeat for each frequency", "return", "segmentation_mask" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_remove_fronts_that_are_too_small
Removes all fronts from `fronts` which are strictly smaller than `size` consecutive frequencies in length.
algorithms/asa.py
def _remove_fronts_that_are_too_small(fronts, size): """ Removes all fronts from `fronts` which are strictly smaller than `size` consecutive frequencies in length. """ ids = np.unique(fronts) for id in ids: if id == 0 or id == -1: continue front = _get_front_idxs_from_id(fronts, id) if len(front) < size: indexes = ([f for f, _ in front], [s for _, s in front]) fronts[indexes] = 0
def _remove_fronts_that_are_too_small(fronts, size): """ Removes all fronts from `fronts` which are strictly smaller than `size` consecutive frequencies in length. """ ids = np.unique(fronts) for id in ids: if id == 0 or id == -1: continue front = _get_front_idxs_from_id(fronts, id) if len(front) < size: indexes = ([f for f, _ in front], [s for _, s in front]) fronts[indexes] = 0
[ "Removes", "all", "fronts", "from", "fronts", "which", "are", "strictly", "smaller", "than", "size", "consecutive", "frequencies", "in", "length", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L776-L788
[ "def", "_remove_fronts_that_are_too_small", "(", "fronts", ",", "size", ")", ":", "ids", "=", "np", ".", "unique", "(", "fronts", ")", "for", "id", "in", "ids", ":", "if", "id", "==", "0", "or", "id", "==", "-", "1", ":", "continue", "front", "=", "_get_front_idxs_from_id", "(", "fronts", ",", "id", ")", "if", "len", "(", "front", ")", "<", "size", ":", "indexes", "=", "(", "[", "f", "for", "f", ",", "_", "in", "front", "]", ",", "[", "s", "for", "_", ",", "s", "in", "front", "]", ")", "fronts", "[", "indexes", "]", "=", "0" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_break_poorly_matched_fronts
For each onset front, for each frequency in that front, break the onset front if the signals between this frequency's onset and the next frequency's onset are not similar enough. Specifically: If we have the following two frequency channels, and the two O's are part of the same onset front, :: [ . O . . . . . . . . . . ] [ . . . . O . . . . . . . ] We compare the signals x and y: :: [ . x x x x . . . . . . . ] [ . y y y y . . . . . . . ] And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset front between these two channels. Once this is done, remove any onset fronts that are less than 3 channels wide.
algorithms/asa.py
def _break_poorly_matched_fronts(fronts, threshold=0.1, threshold_overlap_samples=3): """ For each onset front, for each frequency in that front, break the onset front if the signals between this frequency's onset and the next frequency's onset are not similar enough. Specifically: If we have the following two frequency channels, and the two O's are part of the same onset front, :: [ . O . . . . . . . . . . ] [ . . . . O . . . . . . . ] We compare the signals x and y: :: [ . x x x x . . . . . . . ] [ . y y y y . . . . . . . ] And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset front between these two channels. Once this is done, remove any onset fronts that are less than 3 channels wide. """ assert threshold_overlap_samples > 0, "Number of samples of overlap must be greater than zero" breaks_after = {} for front_id in _get_front_ids_one_at_a_time(fronts): front = _get_front_idxs_from_id(fronts, front_id) for i, (f, s) in enumerate(front): if i < len(front) - 1: # Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1 next_f, next_s = front[i + 1] low_s = min(s, next_s) high_s = max(s, next_s) sig_this_f = fronts[f, low_s:high_s] sig_next_f = fronts[next_f, low_s:high_s] assert len(sig_next_f) == len(sig_this_f) if len(sig_next_f) > threshold_overlap_samples: # If these two signals are not sufficiently close in form, this front should be broken up correlation = signal.correlate(sig_this_f, sig_next_f, mode='same') assert len(correlation) > 0 correlation = correlation / max(correlation + 1E-9) similarity = np.sum(correlation) / len(correlation) # TODO: the above stuff probably needs to be figured out if similarity < threshold: if front_id in breaks_after: breaks_after[front_id].append((f, s)) else: breaks_after[front_id] = [] # Now update the fronts matrix by breaking up any fronts at the points we just identified # and assign the newly created fronts new IDs taken_ids = sorted(np.unique(fronts)) next_id = taken_ids[-1] + 1 for id in breaks_after.keys(): for f, s in breaks_after[id]: fidxs, sidxs = np.where(fronts == id) idxs_greater_than_f = [fidx for fidx in fidxs if fidx > f] start = len(sidxs) - len(idxs_greater_than_f) indexes = (idxs_greater_than_f, sidxs[start:]) fronts[indexes] = next_id next_id += 1 _remove_fronts_that_are_too_small(fronts, 3)
def _break_poorly_matched_fronts(fronts, threshold=0.1, threshold_overlap_samples=3): """ For each onset front, for each frequency in that front, break the onset front if the signals between this frequency's onset and the next frequency's onset are not similar enough. Specifically: If we have the following two frequency channels, and the two O's are part of the same onset front, :: [ . O . . . . . . . . . . ] [ . . . . O . . . . . . . ] We compare the signals x and y: :: [ . x x x x . . . . . . . ] [ . y y y y . . . . . . . ] And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset front between these two channels. Once this is done, remove any onset fronts that are less than 3 channels wide. """ assert threshold_overlap_samples > 0, "Number of samples of overlap must be greater than zero" breaks_after = {} for front_id in _get_front_ids_one_at_a_time(fronts): front = _get_front_idxs_from_id(fronts, front_id) for i, (f, s) in enumerate(front): if i < len(front) - 1: # Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1 next_f, next_s = front[i + 1] low_s = min(s, next_s) high_s = max(s, next_s) sig_this_f = fronts[f, low_s:high_s] sig_next_f = fronts[next_f, low_s:high_s] assert len(sig_next_f) == len(sig_this_f) if len(sig_next_f) > threshold_overlap_samples: # If these two signals are not sufficiently close in form, this front should be broken up correlation = signal.correlate(sig_this_f, sig_next_f, mode='same') assert len(correlation) > 0 correlation = correlation / max(correlation + 1E-9) similarity = np.sum(correlation) / len(correlation) # TODO: the above stuff probably needs to be figured out if similarity < threshold: if front_id in breaks_after: breaks_after[front_id].append((f, s)) else: breaks_after[front_id] = [] # Now update the fronts matrix by breaking up any fronts at the points we just identified # and assign the newly created fronts new IDs taken_ids = sorted(np.unique(fronts)) next_id = taken_ids[-1] + 1 for id in breaks_after.keys(): for f, s in breaks_after[id]: fidxs, sidxs = np.where(fronts == id) idxs_greater_than_f = [fidx for fidx in fidxs if fidx > f] start = len(sidxs) - len(idxs_greater_than_f) indexes = (idxs_greater_than_f, sidxs[start:]) fronts[indexes] = next_id next_id += 1 _remove_fronts_that_are_too_small(fronts, 3)
[ "For", "each", "onset", "front", "for", "each", "frequency", "in", "that", "front", "break", "the", "onset", "front", "if", "the", "signals", "between", "this", "frequency", "s", "onset", "and", "the", "next", "frequency", "s", "onset", "are", "not", "similar", "enough", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L790-L855
[ "def", "_break_poorly_matched_fronts", "(", "fronts", ",", "threshold", "=", "0.1", ",", "threshold_overlap_samples", "=", "3", ")", ":", "assert", "threshold_overlap_samples", ">", "0", ",", "\"Number of samples of overlap must be greater than zero\"", "breaks_after", "=", "{", "}", "for", "front_id", "in", "_get_front_ids_one_at_a_time", "(", "fronts", ")", ":", "front", "=", "_get_front_idxs_from_id", "(", "fronts", ",", "front_id", ")", "for", "i", ",", "(", "f", ",", "s", ")", "in", "enumerate", "(", "front", ")", ":", "if", "i", "<", "len", "(", "front", ")", "-", "1", ":", "# Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1", "next_f", ",", "next_s", "=", "front", "[", "i", "+", "1", "]", "low_s", "=", "min", "(", "s", ",", "next_s", ")", "high_s", "=", "max", "(", "s", ",", "next_s", ")", "sig_this_f", "=", "fronts", "[", "f", ",", "low_s", ":", "high_s", "]", "sig_next_f", "=", "fronts", "[", "next_f", ",", "low_s", ":", "high_s", "]", "assert", "len", "(", "sig_next_f", ")", "==", "len", "(", "sig_this_f", ")", "if", "len", "(", "sig_next_f", ")", ">", "threshold_overlap_samples", ":", "# If these two signals are not sufficiently close in form, this front should be broken up", "correlation", "=", "signal", ".", "correlate", "(", "sig_this_f", ",", "sig_next_f", ",", "mode", "=", "'same'", ")", "assert", "len", "(", "correlation", ")", ">", "0", "correlation", "=", "correlation", "/", "max", "(", "correlation", "+", "1E-9", ")", "similarity", "=", "np", ".", "sum", "(", "correlation", ")", "/", "len", "(", "correlation", ")", "# TODO: the above stuff probably needs to be figured out", "if", "similarity", "<", "threshold", ":", "if", "front_id", "in", "breaks_after", ":", "breaks_after", "[", "front_id", "]", ".", "append", "(", "(", "f", ",", "s", ")", ")", "else", ":", "breaks_after", "[", "front_id", "]", "=", "[", "]", "# Now update the fronts matrix by breaking up any fronts at the points we just identified", "# and assign the newly created fronts new IDs", "taken_ids", "=", "sorted", "(", "np", ".", "unique", "(", "fronts", ")", ")", "next_id", "=", "taken_ids", "[", "-", "1", "]", "+", "1", "for", "id", "in", "breaks_after", ".", "keys", "(", ")", ":", "for", "f", ",", "s", "in", "breaks_after", "[", "id", "]", ":", "fidxs", ",", "sidxs", "=", "np", ".", "where", "(", "fronts", "==", "id", ")", "idxs_greater_than_f", "=", "[", "fidx", "for", "fidx", "in", "fidxs", "if", "fidx", ">", "f", "]", "start", "=", "len", "(", "sidxs", ")", "-", "len", "(", "idxs_greater_than_f", ")", "indexes", "=", "(", "idxs_greater_than_f", ",", "sidxs", "[", "start", ":", "]", ")", "fronts", "[", "indexes", "]", "=", "next_id", "next_id", "+=", "1", "_remove_fronts_that_are_too_small", "(", "fronts", ",", "3", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_update_segmentation_mask_if_overlap
Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly.
algorithms/asa.py
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid): """ Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly. """ # If there is any overlap or touching, merge the two, otherwise just return yourmask = other == otherid mymask = toupdate == id overlap_exists = np.any(yourmask & mymask) if not overlap_exists: return yourfidxs, yoursidxs = np.where(other == otherid) toupdate[yourfidxs, yoursidxs] = id
def _update_segmentation_mask_if_overlap(toupdate, other, id, otherid): """ Merges the segments specified by `id` (found in `toupdate`) and `otherid` (found in `other`) if they overlap at all. Updates `toupdate` accordingly. """ # If there is any overlap or touching, merge the two, otherwise just return yourmask = other == otherid mymask = toupdate == id overlap_exists = np.any(yourmask & mymask) if not overlap_exists: return yourfidxs, yoursidxs = np.where(other == otherid) toupdate[yourfidxs, yoursidxs] = id
[ "Merges", "the", "segments", "specified", "by", "id", "(", "found", "in", "toupdate", ")", "and", "otherid", "(", "found", "in", "other", ")", "if", "they", "overlap", "at", "all", ".", "Updates", "toupdate", "accordingly", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L857-L870
[ "def", "_update_segmentation_mask_if_overlap", "(", "toupdate", ",", "other", ",", "id", ",", "otherid", ")", ":", "# If there is any overlap or touching, merge the two, otherwise just return", "yourmask", "=", "other", "==", "otherid", "mymask", "=", "toupdate", "==", "id", "overlap_exists", "=", "np", ".", "any", "(", "yourmask", "&", "mymask", ")", "if", "not", "overlap_exists", ":", "return", "yourfidxs", ",", "yoursidxs", "=", "np", ".", "where", "(", "other", "==", "otherid", ")", "toupdate", "[", "yourfidxs", ",", "yoursidxs", "]", "=", "id" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_segments_are_adjacent
Checks if seg1 and seg2 are adjacent at any point. Each is a tuple of the form (fidxs, sidxs).
algorithms/asa.py
def _segments_are_adjacent(seg1, seg2): """ Checks if seg1 and seg2 are adjacent at any point. Each is a tuple of the form (fidxs, sidxs). """ # TODO: This is unnacceptably slow lsf1, lss1 = seg1 lsf2, lss2 = seg2 for i, f1 in enumerate(lsf1): for j, f2 in enumerate(lsf2): if f1 <= f2 + 1 and f1 >= f2 - 1: # Frequencies are a match, are samples? if lss1[i] <= lss2[j] + 1 and lss1[i] >= lss2[j] - 1: return True return False
def _segments_are_adjacent(seg1, seg2): """ Checks if seg1 and seg2 are adjacent at any point. Each is a tuple of the form (fidxs, sidxs). """ # TODO: This is unnacceptably slow lsf1, lss1 = seg1 lsf2, lss2 = seg2 for i, f1 in enumerate(lsf1): for j, f2 in enumerate(lsf2): if f1 <= f2 + 1 and f1 >= f2 - 1: # Frequencies are a match, are samples? if lss1[i] <= lss2[j] + 1 and lss1[i] >= lss2[j] - 1: return True return False
[ "Checks", "if", "seg1", "and", "seg2", "are", "adjacent", "at", "any", "point", ".", "Each", "is", "a", "tuple", "of", "the", "form", "(", "fidxs", "sidxs", ")", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L872-L886
[ "def", "_segments_are_adjacent", "(", "seg1", ",", "seg2", ")", ":", "# TODO: This is unnacceptably slow", "lsf1", ",", "lss1", "=", "seg1", "lsf2", ",", "lss2", "=", "seg2", "for", "i", ",", "f1", "in", "enumerate", "(", "lsf1", ")", ":", "for", "j", ",", "f2", "in", "enumerate", "(", "lsf2", ")", ":", "if", "f1", "<=", "f2", "+", "1", "and", "f1", ">=", "f2", "-", "1", ":", "# Frequencies are a match, are samples?", "if", "lss1", "[", "i", "]", "<=", "lss2", "[", "j", "]", "+", "1", "and", "lss1", "[", "i", "]", ">=", "lss2", "[", "j", "]", "-", "1", ":", "return", "True", "return", "False" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_merge_adjacent_segments
Merges all segments in `mask` which are touching.
algorithms/asa.py
def _merge_adjacent_segments(mask): """ Merges all segments in `mask` which are touching. """ mask_ids = [id for id in np.unique(mask) if id != 0] for id in mask_ids: myfidxs, mysidxs = np.where(mask == id) for other in mask_ids: # Ugh, brute force O(N^2) algorithm.. gross.. if id == other: continue else: other_fidxs, other_sidxs = np.where(mask == other) if _segments_are_adjacent((myfidxs, mysidxs), (other_fidxs, other_sidxs)): mask[other_fidxs, other_sidxs] = id
def _merge_adjacent_segments(mask): """ Merges all segments in `mask` which are touching. """ mask_ids = [id for id in np.unique(mask) if id != 0] for id in mask_ids: myfidxs, mysidxs = np.where(mask == id) for other in mask_ids: # Ugh, brute force O(N^2) algorithm.. gross.. if id == other: continue else: other_fidxs, other_sidxs = np.where(mask == other) if _segments_are_adjacent((myfidxs, mysidxs), (other_fidxs, other_sidxs)): mask[other_fidxs, other_sidxs] = id
[ "Merges", "all", "segments", "in", "mask", "which", "are", "touching", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L888-L901
[ "def", "_merge_adjacent_segments", "(", "mask", ")", ":", "mask_ids", "=", "[", "id", "for", "id", "in", "np", ".", "unique", "(", "mask", ")", "if", "id", "!=", "0", "]", "for", "id", "in", "mask_ids", ":", "myfidxs", ",", "mysidxs", "=", "np", ".", "where", "(", "mask", "==", "id", ")", "for", "other", "in", "mask_ids", ":", "# Ugh, brute force O(N^2) algorithm.. gross..", "if", "id", "==", "other", ":", "continue", "else", ":", "other_fidxs", ",", "other_sidxs", "=", "np", ".", "where", "(", "mask", "==", "other", ")", "if", "_segments_are_adjacent", "(", "(", "myfidxs", ",", "mysidxs", ")", ",", "(", "other_fidxs", ",", "other_sidxs", ")", ")", ":", "mask", "[", "other_fidxs", ",", "other_sidxs", "]", "=", "id" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_integrate_segmentation_masks
`segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks.
algorithms/asa.py
def _integrate_segmentation_masks(segmasks): """ `segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks. """ if len(segmasks) == 1: return segmasks assert len(segmasks) > 0, "Passed in empty list of segmentation masks" coarse_mask = np.copy(segmasks[0]) mask_ids = [id for id in np.unique(coarse_mask) if id != 0] for id in mask_ids: for mask in segmasks[1:]: finer_ids = [i for i in np.unique(mask) if i != 0] for finer_id in finer_ids: _update_segmentation_mask_if_overlap(coarse_mask, mask, id, finer_id) # Lastly, merge all adjacent blocks, but just kidding, since this algorithm is waaaay to slow #_merge_adjacent_segments(coarse_mask) return coarse_mask
def _integrate_segmentation_masks(segmasks): """ `segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks. """ if len(segmasks) == 1: return segmasks assert len(segmasks) > 0, "Passed in empty list of segmentation masks" coarse_mask = np.copy(segmasks[0]) mask_ids = [id for id in np.unique(coarse_mask) if id != 0] for id in mask_ids: for mask in segmasks[1:]: finer_ids = [i for i in np.unique(mask) if i != 0] for finer_id in finer_ids: _update_segmentation_mask_if_overlap(coarse_mask, mask, id, finer_id) # Lastly, merge all adjacent blocks, but just kidding, since this algorithm is waaaay to slow #_merge_adjacent_segments(coarse_mask) return coarse_mask
[ "segmasks", "should", "be", "in", "sorted", "order", "of", "[", "coarsest", "...", "finest", "]", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L903-L924
[ "def", "_integrate_segmentation_masks", "(", "segmasks", ")", ":", "if", "len", "(", "segmasks", ")", "==", "1", ":", "return", "segmasks", "assert", "len", "(", "segmasks", ")", ">", "0", ",", "\"Passed in empty list of segmentation masks\"", "coarse_mask", "=", "np", ".", "copy", "(", "segmasks", "[", "0", "]", ")", "mask_ids", "=", "[", "id", "for", "id", "in", "np", ".", "unique", "(", "coarse_mask", ")", "if", "id", "!=", "0", "]", "for", "id", "in", "mask_ids", ":", "for", "mask", "in", "segmasks", "[", "1", ":", "]", ":", "finer_ids", "=", "[", "i", "for", "i", "in", "np", ".", "unique", "(", "mask", ")", "if", "i", "!=", "0", "]", "for", "finer_id", "in", "finer_ids", ":", "_update_segmentation_mask_if_overlap", "(", "coarse_mask", ",", "mask", ",", "id", ",", "finer_id", ")", "# Lastly, merge all adjacent blocks, but just kidding, since this algorithm is waaaay to slow", "#_merge_adjacent_segments(coarse_mask)", "return", "coarse_mask" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_separate_masks
Returns a list of segmentation masks each of the same dimension as the input one, but where they each have exactly one segment in them and all other samples in them are zeroed. Only bothers to return segments that are larger in total area than `threshold * mask.size`.
algorithms/asa.py
def _separate_masks(mask, threshold=0.025): """ Returns a list of segmentation masks each of the same dimension as the input one, but where they each have exactly one segment in them and all other samples in them are zeroed. Only bothers to return segments that are larger in total area than `threshold * mask.size`. """ try: ncpus = multiprocessing.cpu_count() except NotImplementedError: ncpus = 2 with multiprocessing.Pool(processes=ncpus) as pool: mask_ids = [id for id in np.unique(mask) if id != 0] thresholds = [threshold * mask.size for _ in range(len(mask_ids))] masks = [mask for _ in range(len(mask_ids))] ms = pool.starmap(_separate_masks_task, zip(mask_ids, thresholds, masks)) return [m for m in ms if m is not None]
def _separate_masks(mask, threshold=0.025): """ Returns a list of segmentation masks each of the same dimension as the input one, but where they each have exactly one segment in them and all other samples in them are zeroed. Only bothers to return segments that are larger in total area than `threshold * mask.size`. """ try: ncpus = multiprocessing.cpu_count() except NotImplementedError: ncpus = 2 with multiprocessing.Pool(processes=ncpus) as pool: mask_ids = [id for id in np.unique(mask) if id != 0] thresholds = [threshold * mask.size for _ in range(len(mask_ids))] masks = [mask for _ in range(len(mask_ids))] ms = pool.starmap(_separate_masks_task, zip(mask_ids, thresholds, masks)) return [m for m in ms if m is not None]
[ "Returns", "a", "list", "of", "segmentation", "masks", "each", "of", "the", "same", "dimension", "as", "the", "input", "one", "but", "where", "they", "each", "have", "exactly", "one", "segment", "in", "them", "and", "all", "other", "samples", "in", "them", "are", "zeroed", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L935-L953
[ "def", "_separate_masks", "(", "mask", ",", "threshold", "=", "0.025", ")", ":", "try", ":", "ncpus", "=", "multiprocessing", ".", "cpu_count", "(", ")", "except", "NotImplementedError", ":", "ncpus", "=", "2", "with", "multiprocessing", ".", "Pool", "(", "processes", "=", "ncpus", ")", "as", "pool", ":", "mask_ids", "=", "[", "id", "for", "id", "in", "np", ".", "unique", "(", "mask", ")", "if", "id", "!=", "0", "]", "thresholds", "=", "[", "threshold", "*", "mask", ".", "size", "for", "_", "in", "range", "(", "len", "(", "mask_ids", ")", ")", "]", "masks", "=", "[", "mask", "for", "_", "in", "range", "(", "len", "(", "mask_ids", ")", ")", "]", "ms", "=", "pool", ".", "starmap", "(", "_separate_masks_task", ",", "zip", "(", "mask_ids", ",", "thresholds", ",", "masks", ")", ")", "return", "[", "m", "for", "m", "in", "ms", "if", "m", "is", "not", "None", "]" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_downsample_one_or_the_other
Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times` and downsamples one of them into the other one's times, so that the time dimensions are equal. Leaves the frequency dimension untouched.
algorithms/asa.py
def _downsample_one_or_the_other(mask, mask_indexes, stft, stft_indexes): """ Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times` and downsamples one of them into the other one's times, so that the time dimensions are equal. Leaves the frequency dimension untouched. """ assert len(mask.shape) == 2, "Expected a two-dimensional `mask`, but got one of {} dimensions.".format(len(mask.shape)) assert len(stft.shape) == 2, "Expected a two-dimensional `stft`, but got one of {} dimensions.".format(len(stft.shape)) if mask.shape[1] > stft.shape[1]: downsample_factor = mask.shape[1] / stft.shape[1] indexes = _get_downsampled_indexes(mask, downsample_factor) mask = mask[:, indexes] mask_indexes = np.array(indexes) elif mask.shape[1] < stft.shape[1]: downsample_factor = stft.shape[1] / mask.shape[1] indexes = _get_downsampled_indexes(stft, downsample_factor) stft = stft[:, indexes] stft_indexes = np.array(indexes) return mask, mask_indexes, stft, stft_indexes
def _downsample_one_or_the_other(mask, mask_indexes, stft, stft_indexes): """ Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times` and downsamples one of them into the other one's times, so that the time dimensions are equal. Leaves the frequency dimension untouched. """ assert len(mask.shape) == 2, "Expected a two-dimensional `mask`, but got one of {} dimensions.".format(len(mask.shape)) assert len(stft.shape) == 2, "Expected a two-dimensional `stft`, but got one of {} dimensions.".format(len(stft.shape)) if mask.shape[1] > stft.shape[1]: downsample_factor = mask.shape[1] / stft.shape[1] indexes = _get_downsampled_indexes(mask, downsample_factor) mask = mask[:, indexes] mask_indexes = np.array(indexes) elif mask.shape[1] < stft.shape[1]: downsample_factor = stft.shape[1] / mask.shape[1] indexes = _get_downsampled_indexes(stft, downsample_factor) stft = stft[:, indexes] stft_indexes = np.array(indexes) return mask, mask_indexes, stft, stft_indexes
[ "Takes", "the", "given", "mask", "and", "stft", "which", "must", "be", "matrices", "of", "shape", "frequencies", "times", "and", "downsamples", "one", "of", "them", "into", "the", "other", "one", "s", "times", "so", "that", "the", "time", "dimensions", "are", "equal", ".", "Leaves", "the", "frequency", "dimension", "untouched", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L976-L996
[ "def", "_downsample_one_or_the_other", "(", "mask", ",", "mask_indexes", ",", "stft", ",", "stft_indexes", ")", ":", "assert", "len", "(", "mask", ".", "shape", ")", "==", "2", ",", "\"Expected a two-dimensional `mask`, but got one of {} dimensions.\"", ".", "format", "(", "len", "(", "mask", ".", "shape", ")", ")", "assert", "len", "(", "stft", ".", "shape", ")", "==", "2", ",", "\"Expected a two-dimensional `stft`, but got one of {} dimensions.\"", ".", "format", "(", "len", "(", "stft", ".", "shape", ")", ")", "if", "mask", ".", "shape", "[", "1", "]", ">", "stft", ".", "shape", "[", "1", "]", ":", "downsample_factor", "=", "mask", ".", "shape", "[", "1", "]", "/", "stft", ".", "shape", "[", "1", "]", "indexes", "=", "_get_downsampled_indexes", "(", "mask", ",", "downsample_factor", ")", "mask", "=", "mask", "[", ":", ",", "indexes", "]", "mask_indexes", "=", "np", ".", "array", "(", "indexes", ")", "elif", "mask", ".", "shape", "[", "1", "]", "<", "stft", ".", "shape", "[", "1", "]", ":", "downsample_factor", "=", "stft", ".", "shape", "[", "1", "]", "/", "mask", ".", "shape", "[", "1", "]", "indexes", "=", "_get_downsampled_indexes", "(", "stft", ",", "downsample_factor", ")", "stft", "=", "stft", "[", ":", ",", "indexes", "]", "stft_indexes", "=", "np", ".", "array", "(", "indexes", ")", "return", "mask", ",", "mask_indexes", ",", "stft", ",", "stft_indexes" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_map_segmentation_mask_to_stft_domain
Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`) and returns the result.
algorithms/asa.py
def _map_segmentation_mask_to_stft_domain(mask, times, frequencies, stft_times, stft_frequencies): """ Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`) and returns the result. """ assert mask.shape == (frequencies.shape[0], times.shape[0]), "Times is shape {} and frequencies is shape {}, but mask is shaped {}".format( times.shape, frequencies.shape, mask.shape ) result = np.zeros((stft_frequencies.shape[0], stft_times.shape[0])) if len(stft_times) > len(times): all_j = [j for j in range(len(stft_times))] idxs = [int(i) for i in np.linspace(0, len(times) - 1, num=len(stft_times))] all_i = [all_j[idx] for idx in idxs] else: all_i = [i for i in range(len(times))] idxs = [int(i) for i in np.linspace(0, len(stft_times) - 1, num=len(times))] all_j = [all_i[idx] for idx in idxs] for i, j in zip(all_i, all_j): result[:, j] = np.interp(stft_frequencies, frequencies, mask[:, i]) return result
def _map_segmentation_mask_to_stft_domain(mask, times, frequencies, stft_times, stft_frequencies): """ Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`) and returns the result. """ assert mask.shape == (frequencies.shape[0], times.shape[0]), "Times is shape {} and frequencies is shape {}, but mask is shaped {}".format( times.shape, frequencies.shape, mask.shape ) result = np.zeros((stft_frequencies.shape[0], stft_times.shape[0])) if len(stft_times) > len(times): all_j = [j for j in range(len(stft_times))] idxs = [int(i) for i in np.linspace(0, len(times) - 1, num=len(stft_times))] all_i = [all_j[idx] for idx in idxs] else: all_i = [i for i in range(len(times))] idxs = [int(i) for i in np.linspace(0, len(stft_times) - 1, num=len(times))] all_j = [all_i[idx] for idx in idxs] for i, j in zip(all_i, all_j): result[:, j] = np.interp(stft_frequencies, frequencies, mask[:, i]) return result
[ "Maps", "the", "given", "mask", "which", "is", "in", "domain", "(", "frequencies", "times", ")", "to", "the", "new", "domain", "(", "stft_frequencies", "stft_times", ")", "and", "returns", "the", "result", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L998-L1020
[ "def", "_map_segmentation_mask_to_stft_domain", "(", "mask", ",", "times", ",", "frequencies", ",", "stft_times", ",", "stft_frequencies", ")", ":", "assert", "mask", ".", "shape", "==", "(", "frequencies", ".", "shape", "[", "0", "]", ",", "times", ".", "shape", "[", "0", "]", ")", ",", "\"Times is shape {} and frequencies is shape {}, but mask is shaped {}\"", ".", "format", "(", "times", ".", "shape", ",", "frequencies", ".", "shape", ",", "mask", ".", "shape", ")", "result", "=", "np", ".", "zeros", "(", "(", "stft_frequencies", ".", "shape", "[", "0", "]", ",", "stft_times", ".", "shape", "[", "0", "]", ")", ")", "if", "len", "(", "stft_times", ")", ">", "len", "(", "times", ")", ":", "all_j", "=", "[", "j", "for", "j", "in", "range", "(", "len", "(", "stft_times", ")", ")", "]", "idxs", "=", "[", "int", "(", "i", ")", "for", "i", "in", "np", ".", "linspace", "(", "0", ",", "len", "(", "times", ")", "-", "1", ",", "num", "=", "len", "(", "stft_times", ")", ")", "]", "all_i", "=", "[", "all_j", "[", "idx", "]", "for", "idx", "in", "idxs", "]", "else", ":", "all_i", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "times", ")", ")", "]", "idxs", "=", "[", "int", "(", "i", ")", "for", "i", "in", "np", ".", "linspace", "(", "0", ",", "len", "(", "stft_times", ")", "-", "1", ",", "num", "=", "len", "(", "times", ")", ")", "]", "all_j", "=", "[", "all_i", "[", "idx", "]", "for", "idx", "in", "idxs", "]", "for", "i", ",", "j", "in", "zip", "(", "all_i", ",", "all_j", ")", ":", "result", "[", ":", ",", "j", "]", "=", "np", ".", "interp", "(", "stft_frequencies", ",", "frequencies", ",", "mask", "[", ":", ",", "i", "]", ")", "return", "result" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_asa_task
Worker for the ASA algorithm's multiprocessing step.
algorithms/asa.py
def _asa_task(q, masks, stft, sample_width, frame_rate, nsamples_for_each_fft): """ Worker for the ASA algorithm's multiprocessing step. """ # Convert each mask to (1 or 0) rather than (ID or 0) for mask in masks: mask = np.where(mask > 0, 1, 0) # Multiply the masks against STFTs masks = [mask * stft for mask in masks] nparrs = [] dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32} dtype = dtype_dict[sample_width] for m in masks: _times, nparr = signal.istft(m, frame_rate, nperseg=nsamples_for_each_fft) nparr = nparr.astype(dtype) nparrs.append(nparr) for m in nparrs: q.put(m) q.put("DONE")
def _asa_task(q, masks, stft, sample_width, frame_rate, nsamples_for_each_fft): """ Worker for the ASA algorithm's multiprocessing step. """ # Convert each mask to (1 or 0) rather than (ID or 0) for mask in masks: mask = np.where(mask > 0, 1, 0) # Multiply the masks against STFTs masks = [mask * stft for mask in masks] nparrs = [] dtype_dict = {1: np.int8, 2: np.int16, 4: np.int32} dtype = dtype_dict[sample_width] for m in masks: _times, nparr = signal.istft(m, frame_rate, nperseg=nsamples_for_each_fft) nparr = nparr.astype(dtype) nparrs.append(nparr) for m in nparrs: q.put(m) q.put("DONE")
[ "Worker", "for", "the", "ASA", "algorithm", "s", "multiprocessing", "step", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L1022-L1043
[ "def", "_asa_task", "(", "q", ",", "masks", ",", "stft", ",", "sample_width", ",", "frame_rate", ",", "nsamples_for_each_fft", ")", ":", "# Convert each mask to (1 or 0) rather than (ID or 0)", "for", "mask", "in", "masks", ":", "mask", "=", "np", ".", "where", "(", "mask", ">", "0", ",", "1", ",", "0", ")", "# Multiply the masks against STFTs", "masks", "=", "[", "mask", "*", "stft", "for", "mask", "in", "masks", "]", "nparrs", "=", "[", "]", "dtype_dict", "=", "{", "1", ":", "np", ".", "int8", ",", "2", ":", "np", ".", "int16", ",", "4", ":", "np", ".", "int32", "}", "dtype", "=", "dtype_dict", "[", "sample_width", "]", "for", "m", "in", "masks", ":", "_times", ",", "nparr", "=", "signal", ".", "istft", "(", "m", ",", "frame_rate", ",", "nperseg", "=", "nsamples_for_each_fft", ")", "nparr", "=", "nparr", ".", "astype", "(", "dtype", ")", "nparrs", ".", "append", "(", "nparr", ")", "for", "m", "in", "nparrs", ":", "q", ".", "put", "(", "m", ")", "q", ".", "put", "(", "\"DONE\"", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_get_filter_indices
Runs a Markov Decision Process over the given `seg` in chunks of `ms_per_input`, yielding `True` if this `ms_per_input` chunk has been classified as positive (1) and `False` if this chunk has been classified as negative (0). :param seg: The AudioSegment to apply this algorithm to. :param start_as_yes: If True, the first `ms_per_input` chunk will be classified as positive. :param prob_raw_yes: The raw probability of finding the event in any given independently sampled `ms_per_input`. :param ms_per_input: The number of ms of AudioSegment to be fed into the model at a time. :param model: The model, which must hava predict() function, which takes an AudioSegment of `ms_per_input` number of ms and which outputs 1 if the audio event is detected in that input, 0 if not. :param transition_matrix: An iterable of the form: [p(yes->no), p(no->yes)]. :param model_stats: An iterable of the form: [p(reality=1|output=1), p(reality=1|output=0)]. :yields: `True` if the event has been classified in this chunk, `False` otherwise.
algorithms/eventdetection.py
def _get_filter_indices(seg, start_as_yes, prob_raw_yes, ms_per_input, model, transition_matrix, model_stats): """ Runs a Markov Decision Process over the given `seg` in chunks of `ms_per_input`, yielding `True` if this `ms_per_input` chunk has been classified as positive (1) and `False` if this chunk has been classified as negative (0). :param seg: The AudioSegment to apply this algorithm to. :param start_as_yes: If True, the first `ms_per_input` chunk will be classified as positive. :param prob_raw_yes: The raw probability of finding the event in any given independently sampled `ms_per_input`. :param ms_per_input: The number of ms of AudioSegment to be fed into the model at a time. :param model: The model, which must hava predict() function, which takes an AudioSegment of `ms_per_input` number of ms and which outputs 1 if the audio event is detected in that input, 0 if not. :param transition_matrix: An iterable of the form: [p(yes->no), p(no->yes)]. :param model_stats: An iterable of the form: [p(reality=1|output=1), p(reality=1|output=0)]. :yields: `True` if the event has been classified in this chunk, `False` otherwise. """ filter_triggered = 1 if start_as_yes else 0 prob_raw_no = 1.0 - prob_raw_yes for segment, _timestamp in seg.generate_frames_as_segments(ms_per_input): yield filter_triggered observation = int(round(model.predict(segment))) assert observation == 1 or observation == 0, "The given model did not output a 1 or a 0, output: "\ + str(observation) prob_hyp_yes_given_last_hyp = 1.0 - transition_matrix[0] if filter_triggered else transition_matrix[1] prob_hyp_no_given_last_hyp = transition_matrix[0] if filter_triggered else 1.0 - transition_matrix[1] prob_hyp_yes_given_data = model_stats[0] if observation == 1 else model_stats[1] prob_hyp_no_given_data = 1.0 - model_stats[0] if observation == 1 else 1.0 - model_stats[1] hypothesis_yes = prob_raw_yes * prob_hyp_yes_given_last_hyp * prob_hyp_yes_given_data hypothesis_no = prob_raw_no * prob_hyp_no_given_last_hyp * prob_hyp_no_given_data # make a list of ints - each is 0 or 1. The number of 1s is hypotheis_yes * 100 # the number of 0s is hypothesis_no * 100 distribution = [1 for i in range(int(round(hypothesis_yes * 100)))] distribution.extend([0 for i in range(int(round(hypothesis_no * 100)))]) # shuffle random.shuffle(distribution) filter_triggered = random.choice(distribution)
def _get_filter_indices(seg, start_as_yes, prob_raw_yes, ms_per_input, model, transition_matrix, model_stats): """ Runs a Markov Decision Process over the given `seg` in chunks of `ms_per_input`, yielding `True` if this `ms_per_input` chunk has been classified as positive (1) and `False` if this chunk has been classified as negative (0). :param seg: The AudioSegment to apply this algorithm to. :param start_as_yes: If True, the first `ms_per_input` chunk will be classified as positive. :param prob_raw_yes: The raw probability of finding the event in any given independently sampled `ms_per_input`. :param ms_per_input: The number of ms of AudioSegment to be fed into the model at a time. :param model: The model, which must hava predict() function, which takes an AudioSegment of `ms_per_input` number of ms and which outputs 1 if the audio event is detected in that input, 0 if not. :param transition_matrix: An iterable of the form: [p(yes->no), p(no->yes)]. :param model_stats: An iterable of the form: [p(reality=1|output=1), p(reality=1|output=0)]. :yields: `True` if the event has been classified in this chunk, `False` otherwise. """ filter_triggered = 1 if start_as_yes else 0 prob_raw_no = 1.0 - prob_raw_yes for segment, _timestamp in seg.generate_frames_as_segments(ms_per_input): yield filter_triggered observation = int(round(model.predict(segment))) assert observation == 1 or observation == 0, "The given model did not output a 1 or a 0, output: "\ + str(observation) prob_hyp_yes_given_last_hyp = 1.0 - transition_matrix[0] if filter_triggered else transition_matrix[1] prob_hyp_no_given_last_hyp = transition_matrix[0] if filter_triggered else 1.0 - transition_matrix[1] prob_hyp_yes_given_data = model_stats[0] if observation == 1 else model_stats[1] prob_hyp_no_given_data = 1.0 - model_stats[0] if observation == 1 else 1.0 - model_stats[1] hypothesis_yes = prob_raw_yes * prob_hyp_yes_given_last_hyp * prob_hyp_yes_given_data hypothesis_no = prob_raw_no * prob_hyp_no_given_last_hyp * prob_hyp_no_given_data # make a list of ints - each is 0 or 1. The number of 1s is hypotheis_yes * 100 # the number of 0s is hypothesis_no * 100 distribution = [1 for i in range(int(round(hypothesis_yes * 100)))] distribution.extend([0 for i in range(int(round(hypothesis_no * 100)))]) # shuffle random.shuffle(distribution) filter_triggered = random.choice(distribution)
[ "Runs", "a", "Markov", "Decision", "Process", "over", "the", "given", "seg", "in", "chunks", "of", "ms_per_input", "yielding", "True", "if", "this", "ms_per_input", "chunk", "has", "been", "classified", "as", "positive", "(", "1", ")", "and", "False", "if", "this", "chunk", "has", "been", "classified", "as", "negative", "(", "0", ")", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/eventdetection.py#L9-L44
[ "def", "_get_filter_indices", "(", "seg", ",", "start_as_yes", ",", "prob_raw_yes", ",", "ms_per_input", ",", "model", ",", "transition_matrix", ",", "model_stats", ")", ":", "filter_triggered", "=", "1", "if", "start_as_yes", "else", "0", "prob_raw_no", "=", "1.0", "-", "prob_raw_yes", "for", "segment", ",", "_timestamp", "in", "seg", ".", "generate_frames_as_segments", "(", "ms_per_input", ")", ":", "yield", "filter_triggered", "observation", "=", "int", "(", "round", "(", "model", ".", "predict", "(", "segment", ")", ")", ")", "assert", "observation", "==", "1", "or", "observation", "==", "0", ",", "\"The given model did not output a 1 or a 0, output: \"", "+", "str", "(", "observation", ")", "prob_hyp_yes_given_last_hyp", "=", "1.0", "-", "transition_matrix", "[", "0", "]", "if", "filter_triggered", "else", "transition_matrix", "[", "1", "]", "prob_hyp_no_given_last_hyp", "=", "transition_matrix", "[", "0", "]", "if", "filter_triggered", "else", "1.0", "-", "transition_matrix", "[", "1", "]", "prob_hyp_yes_given_data", "=", "model_stats", "[", "0", "]", "if", "observation", "==", "1", "else", "model_stats", "[", "1", "]", "prob_hyp_no_given_data", "=", "1.0", "-", "model_stats", "[", "0", "]", "if", "observation", "==", "1", "else", "1.0", "-", "model_stats", "[", "1", "]", "hypothesis_yes", "=", "prob_raw_yes", "*", "prob_hyp_yes_given_last_hyp", "*", "prob_hyp_yes_given_data", "hypothesis_no", "=", "prob_raw_no", "*", "prob_hyp_no_given_last_hyp", "*", "prob_hyp_no_given_data", "# make a list of ints - each is 0 or 1. The number of 1s is hypotheis_yes * 100", "# the number of 0s is hypothesis_no * 100", "distribution", "=", "[", "1", "for", "i", "in", "range", "(", "int", "(", "round", "(", "hypothesis_yes", "*", "100", ")", ")", ")", "]", "distribution", ".", "extend", "(", "[", "0", "for", "i", "in", "range", "(", "int", "(", "round", "(", "hypothesis_no", "*", "100", ")", ")", ")", "]", ")", "# shuffle", "random", ".", "shuffle", "(", "distribution", ")", "filter_triggered", "=", "random", ".", "choice", "(", "distribution", ")" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_group_filter_values
Takes a list of 1s and 0s and returns a list of tuples of the form: ['y/n', timestamp].
algorithms/eventdetection.py
def _group_filter_values(seg, filter_indices, ms_per_input): """ Takes a list of 1s and 0s and returns a list of tuples of the form: ['y/n', timestamp]. """ ret = [] for filter_value, (_segment, timestamp) in zip(filter_indices, seg.generate_frames_as_segments(ms_per_input)): if filter_value == 1: if len(ret) > 0 and ret[-1][0] == 'n': ret.append(['y', timestamp]) # The last one was different, so we create a new one elif len(ret) > 0 and ret[-1][0] == 'y': ret[-1][1] = timestamp # The last one was the same as this one, so just update the timestamp else: ret.append(['y', timestamp]) # This is the first one else: if len(ret) > 0 and ret[-1][0] == 'n': ret[-1][1] = timestamp elif len(ret) > 0 and ret[-1][0] == 'y': ret.append(['n', timestamp]) else: ret.append(['n', timestamp]) return ret
def _group_filter_values(seg, filter_indices, ms_per_input): """ Takes a list of 1s and 0s and returns a list of tuples of the form: ['y/n', timestamp]. """ ret = [] for filter_value, (_segment, timestamp) in zip(filter_indices, seg.generate_frames_as_segments(ms_per_input)): if filter_value == 1: if len(ret) > 0 and ret[-1][0] == 'n': ret.append(['y', timestamp]) # The last one was different, so we create a new one elif len(ret) > 0 and ret[-1][0] == 'y': ret[-1][1] = timestamp # The last one was the same as this one, so just update the timestamp else: ret.append(['y', timestamp]) # This is the first one else: if len(ret) > 0 and ret[-1][0] == 'n': ret[-1][1] = timestamp elif len(ret) > 0 and ret[-1][0] == 'y': ret.append(['n', timestamp]) else: ret.append(['n', timestamp]) return ret
[ "Takes", "a", "list", "of", "1s", "and", "0s", "and", "returns", "a", "list", "of", "tuples", "of", "the", "form", ":", "[", "y", "/", "n", "timestamp", "]", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/eventdetection.py#L46-L67
[ "def", "_group_filter_values", "(", "seg", ",", "filter_indices", ",", "ms_per_input", ")", ":", "ret", "=", "[", "]", "for", "filter_value", ",", "(", "_segment", ",", "timestamp", ")", "in", "zip", "(", "filter_indices", ",", "seg", ".", "generate_frames_as_segments", "(", "ms_per_input", ")", ")", ":", "if", "filter_value", "==", "1", ":", "if", "len", "(", "ret", ")", ">", "0", "and", "ret", "[", "-", "1", "]", "[", "0", "]", "==", "'n'", ":", "ret", ".", "append", "(", "[", "'y'", ",", "timestamp", "]", ")", "# The last one was different, so we create a new one", "elif", "len", "(", "ret", ")", ">", "0", "and", "ret", "[", "-", "1", "]", "[", "0", "]", "==", "'y'", ":", "ret", "[", "-", "1", "]", "[", "1", "]", "=", "timestamp", "# The last one was the same as this one, so just update the timestamp", "else", ":", "ret", ".", "append", "(", "[", "'y'", ",", "timestamp", "]", ")", "# This is the first one", "else", ":", "if", "len", "(", "ret", ")", ">", "0", "and", "ret", "[", "-", "1", "]", "[", "0", "]", "==", "'n'", ":", "ret", "[", "-", "1", "]", "[", "1", "]", "=", "timestamp", "elif", "len", "(", "ret", ")", ">", "0", "and", "ret", "[", "-", "1", "]", "[", "0", "]", "==", "'y'", ":", "ret", ".", "append", "(", "[", "'n'", ",", "timestamp", "]", ")", "else", ":", "ret", ".", "append", "(", "[", "'n'", ",", "timestamp", "]", ")", "return", "ret" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
_homogeneity_filter
Takes `ls` (a list of 1s and 0s) and smoothes it so that adjacent values are more likely to be the same. :param ls: A list of 1s and 0s to smooth. :param window_size: How large the smoothing kernel is. :returns: A list of 1s and 0s, but smoother.
algorithms/eventdetection.py
def _homogeneity_filter(ls, window_size): """ Takes `ls` (a list of 1s and 0s) and smoothes it so that adjacent values are more likely to be the same. :param ls: A list of 1s and 0s to smooth. :param window_size: How large the smoothing kernel is. :returns: A list of 1s and 0s, but smoother. """ # TODO: This is fine way to do this, but it seems like it might be faster and better to do a Gaussian convolution followed by rounding k = window_size i = k while i <= len(ls) - k: # Get a window of k items window = [ls[i + j] for j in range(k)] # Change the items in the window to be more like the mode of that window mode = 1 if sum(window) >= k / 2 else 0 for j in range(k): ls[i+j] = mode i += k return ls
def _homogeneity_filter(ls, window_size): """ Takes `ls` (a list of 1s and 0s) and smoothes it so that adjacent values are more likely to be the same. :param ls: A list of 1s and 0s to smooth. :param window_size: How large the smoothing kernel is. :returns: A list of 1s and 0s, but smoother. """ # TODO: This is fine way to do this, but it seems like it might be faster and better to do a Gaussian convolution followed by rounding k = window_size i = k while i <= len(ls) - k: # Get a window of k items window = [ls[i + j] for j in range(k)] # Change the items in the window to be more like the mode of that window mode = 1 if sum(window) >= k / 2 else 0 for j in range(k): ls[i+j] = mode i += k return ls
[ "Takes", "ls", "(", "a", "list", "of", "1s", "and", "0s", ")", "and", "smoothes", "it", "so", "that", "adjacent", "values", "are", "more", "likely", "to", "be", "the", "same", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/eventdetection.py#L69-L89
[ "def", "_homogeneity_filter", "(", "ls", ",", "window_size", ")", ":", "# TODO: This is fine way to do this, but it seems like it might be faster and better to do a Gaussian convolution followed by rounding", "k", "=", "window_size", "i", "=", "k", "while", "i", "<=", "len", "(", "ls", ")", "-", "k", ":", "# Get a window of k items", "window", "=", "[", "ls", "[", "i", "+", "j", "]", "for", "j", "in", "range", "(", "k", ")", "]", "# Change the items in the window to be more like the mode of that window", "mode", "=", "1", "if", "sum", "(", "window", ")", ">=", "k", "/", "2", "else", "0", "for", "j", "in", "range", "(", "k", ")", ":", "ls", "[", "i", "+", "j", "]", "=", "mode", "i", "+=", "k", "return", "ls" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
bandpass_filter
Does a bandpass filter over the given data. :param data: The data (numpy array) to be filtered. :param low: The low cutoff in Hz. :param high: The high cutoff in Hz. :param fs: The sample rate (in Hz) of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array).
algorithms/filters.py
def bandpass_filter(data, low, high, fs, order=5): """ Does a bandpass filter over the given data. :param data: The data (numpy array) to be filtered. :param low: The low cutoff in Hz. :param high: The high cutoff in Hz. :param fs: The sample rate (in Hz) of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """ nyq = 0.5 * fs low = low / nyq high = high / nyq b, a = signal.butter(order, [low, high], btype='band') y = signal.lfilter(b, a, data) return y
def bandpass_filter(data, low, high, fs, order=5): """ Does a bandpass filter over the given data. :param data: The data (numpy array) to be filtered. :param low: The low cutoff in Hz. :param high: The high cutoff in Hz. :param fs: The sample rate (in Hz) of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """ nyq = 0.5 * fs low = low / nyq high = high / nyq b, a = signal.butter(order, [low, high], btype='band') y = signal.lfilter(b, a, data) return y
[ "Does", "a", "bandpass", "filter", "over", "the", "given", "data", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/filters.py#L7-L23
[ "def", "bandpass_filter", "(", "data", ",", "low", ",", "high", ",", "fs", ",", "order", "=", "5", ")", ":", "nyq", "=", "0.5", "*", "fs", "low", "=", "low", "/", "nyq", "high", "=", "high", "/", "nyq", "b", ",", "a", "=", "signal", ".", "butter", "(", "order", ",", "[", "low", ",", "high", "]", ",", "btype", "=", "'band'", ")", "y", "=", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "data", ")", "return", "y" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
lowpass_filter
Does a lowpass filter over the given data. :param data: The data (numpy array) to be filtered. :param cutoff: The high cutoff in Hz. :param fs: The sample rate in Hz of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array).
algorithms/filters.py
def lowpass_filter(data, cutoff, fs, order=5): """ Does a lowpass filter over the given data. :param data: The data (numpy array) to be filtered. :param cutoff: The high cutoff in Hz. :param fs: The sample rate in Hz of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """ nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = signal.butter(order, normal_cutoff, btype='low', analog=False) y = signal.lfilter(b, a, data) return y
def lowpass_filter(data, cutoff, fs, order=5): """ Does a lowpass filter over the given data. :param data: The data (numpy array) to be filtered. :param cutoff: The high cutoff in Hz. :param fs: The sample rate in Hz of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """ nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = signal.butter(order, normal_cutoff, btype='low', analog=False) y = signal.lfilter(b, a, data) return y
[ "Does", "a", "lowpass", "filter", "over", "the", "given", "data", "." ]
MaxStrange/AudioSegment
python
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/filters.py#L25-L39
[ "def", "lowpass_filter", "(", "data", ",", "cutoff", ",", "fs", ",", "order", "=", "5", ")", ":", "nyq", "=", "0.5", "*", "fs", "normal_cutoff", "=", "cutoff", "/", "nyq", "b", ",", "a", "=", "signal", ".", "butter", "(", "order", ",", "normal_cutoff", ",", "btype", "=", "'low'", ",", "analog", "=", "False", ")", "y", "=", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "data", ")", "return", "y" ]
1daefb8de626ddff3ff7016697c3ad31d262ecd6
test
list_to_tf_input
Separates the outcome feature from the data and creates the onehot vector for each row.
BlackBoxAuditing/model_factories/DecisionTree.py
def list_to_tf_input(data, response_index, num_outcomes): """ Separates the outcome feature from the data and creates the onehot vector for each row. """ matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data]) outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8) outcomes_onehot = (np.arange(num_outcomes) == outcomes[:, None]).astype(np.float32) return matrix, outcomes_onehot
def list_to_tf_input(data, response_index, num_outcomes): """ Separates the outcome feature from the data and creates the onehot vector for each row. """ matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data]) outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8) outcomes_onehot = (np.arange(num_outcomes) == outcomes[:, None]).astype(np.float32) return matrix, outcomes_onehot
[ "Separates", "the", "outcome", "feature", "from", "the", "data", "and", "creates", "the", "onehot", "vector", "for", "each", "row", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/model_factories/DecisionTree.py#L132-L140
[ "def", "list_to_tf_input", "(", "data", ",", "response_index", ",", "num_outcomes", ")", ":", "matrix", "=", "np", ".", "matrix", "(", "[", "row", "[", ":", "response_index", "]", "+", "row", "[", "response_index", "+", "1", ":", "]", "for", "row", "in", "data", "]", ")", "outcomes", "=", "np", ".", "asarray", "(", "[", "row", "[", "response_index", "]", "for", "row", "in", "data", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "outcomes_onehot", "=", "(", "np", ".", "arange", "(", "num_outcomes", ")", "==", "outcomes", "[", ":", ",", "None", "]", ")", ".", "astype", "(", "np", ".", "float32", ")", "return", "matrix", ",", "outcomes_onehot" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
expand_and_standardize_dataset
Standardizes continuous features and expands categorical features.
BlackBoxAuditing/model_factories/DecisionTree.py
def expand_and_standardize_dataset(response_index, response_header, data_set, col_vals, headers, standardizers, feats_to_ignore, columns_to_expand, outcome_trans_dict): """ Standardizes continuous features and expands categorical features. """ # expand and standardize modified_set = [] for row_index, row in enumerate(data_set): new_row = [] for col_index, val in enumerate(row): header = headers[col_index] # Outcome feature -> index outcome if col_index == response_index: new_outcome = outcome_trans_dict[val] new_row.append(new_outcome) # Ignored feature -> pass elif header in feats_to_ignore: pass # Categorical feature -> create new binary column for each possible value of the column elif header in columns_to_expand: for poss_val in col_vals[header]: if val == poss_val: new_cat_val = 1.0 else: new_cat_val = -1.0 new_row.append(new_cat_val) # Continuous feature -> standardize value with respect to its column else: new_cont_val = float((val - standardizers[header]['mean']) / standardizers[header]['std_dev']) new_row.append(new_cont_val) modified_set.append(new_row) # update headers to reflect column expansion expanded_headers = [] for header in headers: if header in feats_to_ignore: pass elif (header in columns_to_expand) and (header is not response_header): for poss_val in col_vals[header]: new_header = '{}_{}'.format(header,poss_val) expanded_headers.append(new_header) else: expanded_headers.append(header) return modified_set, expanded_headers
def expand_and_standardize_dataset(response_index, response_header, data_set, col_vals, headers, standardizers, feats_to_ignore, columns_to_expand, outcome_trans_dict): """ Standardizes continuous features and expands categorical features. """ # expand and standardize modified_set = [] for row_index, row in enumerate(data_set): new_row = [] for col_index, val in enumerate(row): header = headers[col_index] # Outcome feature -> index outcome if col_index == response_index: new_outcome = outcome_trans_dict[val] new_row.append(new_outcome) # Ignored feature -> pass elif header in feats_to_ignore: pass # Categorical feature -> create new binary column for each possible value of the column elif header in columns_to_expand: for poss_val in col_vals[header]: if val == poss_val: new_cat_val = 1.0 else: new_cat_val = -1.0 new_row.append(new_cat_val) # Continuous feature -> standardize value with respect to its column else: new_cont_val = float((val - standardizers[header]['mean']) / standardizers[header]['std_dev']) new_row.append(new_cont_val) modified_set.append(new_row) # update headers to reflect column expansion expanded_headers = [] for header in headers: if header in feats_to_ignore: pass elif (header in columns_to_expand) and (header is not response_header): for poss_val in col_vals[header]: new_header = '{}_{}'.format(header,poss_val) expanded_headers.append(new_header) else: expanded_headers.append(header) return modified_set, expanded_headers
[ "Standardizes", "continuous", "features", "and", "expands", "categorical", "features", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/model_factories/DecisionTree.py#L142-L190
[ "def", "expand_and_standardize_dataset", "(", "response_index", ",", "response_header", ",", "data_set", ",", "col_vals", ",", "headers", ",", "standardizers", ",", "feats_to_ignore", ",", "columns_to_expand", ",", "outcome_trans_dict", ")", ":", "# expand and standardize", "modified_set", "=", "[", "]", "for", "row_index", ",", "row", "in", "enumerate", "(", "data_set", ")", ":", "new_row", "=", "[", "]", "for", "col_index", ",", "val", "in", "enumerate", "(", "row", ")", ":", "header", "=", "headers", "[", "col_index", "]", "# Outcome feature -> index outcome", "if", "col_index", "==", "response_index", ":", "new_outcome", "=", "outcome_trans_dict", "[", "val", "]", "new_row", ".", "append", "(", "new_outcome", ")", "# Ignored feature -> pass", "elif", "header", "in", "feats_to_ignore", ":", "pass", "# Categorical feature -> create new binary column for each possible value of the column", "elif", "header", "in", "columns_to_expand", ":", "for", "poss_val", "in", "col_vals", "[", "header", "]", ":", "if", "val", "==", "poss_val", ":", "new_cat_val", "=", "1.0", "else", ":", "new_cat_val", "=", "-", "1.0", "new_row", ".", "append", "(", "new_cat_val", ")", "# Continuous feature -> standardize value with respect to its column", "else", ":", "new_cont_val", "=", "float", "(", "(", "val", "-", "standardizers", "[", "header", "]", "[", "'mean'", "]", ")", "/", "standardizers", "[", "header", "]", "[", "'std_dev'", "]", ")", "new_row", ".", "append", "(", "new_cont_val", ")", "modified_set", ".", "append", "(", "new_row", ")", "# update headers to reflect column expansion", "expanded_headers", "=", "[", "]", "for", "header", "in", "headers", ":", "if", "header", "in", "feats_to_ignore", ":", "pass", "elif", "(", "header", "in", "columns_to_expand", ")", "and", "(", "header", "is", "not", "response_header", ")", ":", "for", "poss_val", "in", "col_vals", "[", "header", "]", ":", "new_header", "=", "'{}_{}'", ".", "format", "(", "header", ",", "poss_val", ")", "expanded_headers", ".", "append", "(", "new_header", ")", "else", ":", "expanded_headers", ".", "append", "(", "header", ")", "return", "modified_set", ",", "expanded_headers" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
equal_ignore_order
Used to check whether the two edge lists have the same edges when elements are neither hashable nor sortable.
BlackBoxAuditing/repairers/CategoricalFeature.py
def equal_ignore_order(a, b): """ Used to check whether the two edge lists have the same edges when elements are neither hashable nor sortable. """ unmatched = list(b) for element in a: try: unmatched.remove(element) except ValueError: return False return not unmatched
def equal_ignore_order(a, b): """ Used to check whether the two edge lists have the same edges when elements are neither hashable nor sortable. """ unmatched = list(b) for element in a: try: unmatched.remove(element) except ValueError: return False return not unmatched
[ "Used", "to", "check", "whether", "the", "two", "edge", "lists", "have", "the", "same", "edges", "when", "elements", "are", "neither", "hashable", "nor", "sortable", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/repairers/CategoricalFeature.py#L111-L122
[ "def", "equal_ignore_order", "(", "a", ",", "b", ")", ":", "unmatched", "=", "list", "(", "b", ")", "for", "element", "in", "a", ":", "try", ":", "unmatched", ".", "remove", "(", "element", ")", "except", "ValueError", ":", "return", "False", "return", "not", "unmatched" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
Repairer.repair
Create unique value structures: When performing repairs, we choose median values. If repair is partial, then values will be modified to some intermediate value between the original and the median value. However, the partially repaired value will only be chosen out of values that exist in the data set. This prevents choosing values that might not make any sense in the data's context. To do this, for each column, we need to sort all unique values and create two data structures: a list of values, and a dict mapping values to their positions in that list. Example: There are unique_col_vals[col] = [1, 2, 5, 7, 10, 14, 20] in the column. A value 2 must be repaired to 14, but the user requests that data only be repaired by 50%. We do this by finding the value at the right index: index_lookup[col][2] = 1 index_lookup[col][14] = 5 this tells us that unique_col_vals[col][3] = 7 is 50% of the way from 2 to 14.
python2_source/BlackBoxAuditing/repairers/CategoricRepairer.py
def repair(self, data_to_repair): num_cols = len(data_to_repair[0]) col_ids = range(num_cols) # Get column type information col_types = ["Y"]*len(col_ids) for i, col in enumerate(col_ids): if i in self.features_to_ignore: col_types[i] = "I" elif i == self.feature_to_repair: col_types[i] = "X" col_type_dict = {col_id: col_type for col_id, col_type in zip(col_ids, col_types)} not_I_col_ids = filter(lambda x: col_type_dict[x] != "I", col_ids) if self.kdd: cols_to_repair = filter(lambda x: col_type_dict[x] == "Y", col_ids) else: cols_to_repair = filter(lambda x: col_type_dict[x] in "YX", col_ids) # To prevent potential perils with user-provided column names, map them to safe column names safe_stratify_cols = [self.feature_to_repair] # Extract column values for each attribute in data # Begin byled code will usually be created in the same directory as the .py file. initializing keys and values in dictionary data_dict = {col_id: [] for col_id in col_ids} # Populate each attribute with its column values for row in data_to_repair: for i in col_ids: data_dict[i].append(row[i]) repair_types = {} for col_id, values in data_dict.items(): if all(isinstance(value, float) for value in values): repair_types[col_id] = float elif all(isinstance(value, int) for value in values): repair_types[col_id] = int else: repair_types[col_id] = str """ Create unique value structures: When performing repairs, we choose median values. If repair is partial, then values will be modified to some intermediate value between the original and the median value. However, the partially repaired value will only be chosen out of values that exist in the data set. This prevents choosing values that might not make any sense in the data's context. To do this, for each column, we need to sort all unique values and create two data structures: a list of values, and a dict mapping values to their positions in that list. Example: There are unique_col_vals[col] = [1, 2, 5, 7, 10, 14, 20] in the column. A value 2 must be repaired to 14, but the user requests that data only be repaired by 50%. We do this by finding the value at the right index: index_lookup[col][2] = 1 index_lookup[col][14] = 5 this tells us that unique_col_vals[col][3] = 7 is 50% of the way from 2 to 14. """ unique_col_vals = {} index_lookup = {} for col_id in not_I_col_ids: col_values = data_dict[col_id] # extract unique values from column and sort col_values = sorted(list(set(col_values))) unique_col_vals[col_id] = col_values # look up a value, get its position index_lookup[col_id] = {col_values[i]: i for i in range(len(col_values))} """ Make a list of unique values per each stratified column. Then make a list of combinations of stratified groups. Example: race and gender cols are stratified: [(white, female), (white, male), (black, female), (black, male)] The combinations are tuples because they can be hashed and used as dictionary keys. From these, find the sizes of these groups. """ unique_stratify_values = [unique_col_vals[i] for i in safe_stratify_cols] all_stratified_groups = list(product(*unique_stratify_values)) # look up a stratified group, and get a list of indices corresponding to that group in the data stratified_group_indices = defaultdict(list) # Find the number of unique values for each strat-group, organized per column. val_sets = {group: {col_id:set() for col_id in cols_to_repair} for group in all_stratified_groups} for i, row in enumerate(data_to_repair): group = tuple(row[col] for col in safe_stratify_cols) for col_id in cols_to_repair: val_sets[group][col_id].add(row[col_id]) # Also remember that this row pertains to this strat-group. stratified_group_indices[group].append(i) """ Separate data by stratified group to perform repair on each Y column's values given that their corresponding protected attribute is a particular stratified group. We need to keep track of each Y column's values corresponding to each particular stratified group, as well as each value's index, so that when we repair the data, we can modify the correct value in the original data. Example: Supposing there is a Y column, "Score1", in which the 3rd and 5th scores, 70 and 90 respectively, belonged to black women, the data structure would look like: {("Black", "Woman"): {Score1: [(70,2),(90,4)]}} """ stratified_group_data = {group: {} for group in all_stratified_groups} for group in all_stratified_groups: for col_id, col_dict in data_dict.items(): # Get the indices at which each value occurs. indices = {} for i in stratified_group_indices[group]: value = col_dict[i] if value not in indices: indices[value] = [] indices[value].append(i) stratified_col_values = [(occurs, val) for val, occurs in indices.items()] stratified_col_values.sort(key=lambda tup: tup[1]) stratified_group_data[group][col_id] = stratified_col_values mode_feature_to_repair = get_mode(data_dict[self.feature_to_repair]) # Repair Data and retrieve the results for col_id in cols_to_repair: # which bucket value we're repairing group_offsets = {group: 0 for group in all_stratified_groups} col = data_dict[col_id] num_quantiles = min(len(val_sets[group][col_id]) for group in all_stratified_groups) quantile_unit = 1.0/num_quantiles if repair_types[col_id] in {int, float}: for quantile in range(num_quantiles): median_at_quantiles = [] indices_per_group = {} for group in all_stratified_groups: group_data_at_col = stratified_group_data[group][col_id] num_vals = len(group_data_at_col) offset = int(round(group_offsets[group]*num_vals)) number_to_get = int(round((group_offsets[group] + quantile_unit)*num_vals) - offset) group_offsets[group] += quantile_unit if number_to_get > 0: # Get data at this quantile from this Y column such that stratified X = group offset_data = group_data_at_col[offset:offset+number_to_get] indices_per_group[group] = [i for val_indices, _ in offset_data for i in val_indices] values = sorted([float(val) for _, val in offset_data]) # Find this group's median value at this quantile median_at_quantiles.append( get_median(values, self.kdd) ) # Find the median value of all groups at this quantile (chosen from each group's medians) median = get_median(median_at_quantiles, self.kdd) median_val_pos = index_lookup[col_id][median] # Update values to repair the dataset. for group in all_stratified_groups: for index in indices_per_group[group]: original_value = col[index] current_val_pos = index_lookup[col_id][original_value] distance = median_val_pos - current_val_pos # distance between indices distance_to_repair = int(round(distance * self.repair_level)) index_of_repair_value = current_val_pos + distance_to_repair repaired_value = unique_col_vals[col_id][index_of_repair_value] # Update data to repaired valued data_dict[col_id][index] = repaired_value #Categorical Repair is done below elif repair_types[col_id] in {str}: feature = CategoricalFeature(col) categories = feature.bin_index_dict.keys() group_features = get_group_data(all_stratified_groups, stratified_group_data, col_id) categories_count = get_categories_count(categories, all_stratified_groups, group_features) categories_count_norm = get_categories_count_norm(categories, all_stratified_groups, categories_count, group_features) median = get_median_per_category(categories, categories_count_norm) # Partially fill-out the generator functions to simplify later calls. dist_generator = lambda group_index, category : gen_desired_dist(group_index, category, col_id, median, self.repair_level, categories_count_norm, self.feature_to_repair, mode_feature_to_repair) count_generator = lambda group_index, group, category : gen_desired_count(group_index, group, category, median, group_features, self.repair_level, categories_count) group_features, overflow = flow_on_group_features(all_stratified_groups, group_features, count_generator) group_features, assigned_overflow, distribution = assign_overflow(all_stratified_groups, categories, overflow, group_features, dist_generator) # Return our repaired feature in the form of our original dataset for group in all_stratified_groups: indices = stratified_group_indices[group] for i, index in enumerate(indices): repaired_value = group_features[group].data[i] data_dict[col_id][index] = repaired_value # Replace stratified groups with their mode value, to remove it's information repaired_data = [] for i, orig_row in enumerate(data_to_repair): new_row = [orig_row[j] if j not in cols_to_repair else data_dict[j][i] for j in col_ids] repaired_data.append(new_row) return repaired_data
def repair(self, data_to_repair): num_cols = len(data_to_repair[0]) col_ids = range(num_cols) # Get column type information col_types = ["Y"]*len(col_ids) for i, col in enumerate(col_ids): if i in self.features_to_ignore: col_types[i] = "I" elif i == self.feature_to_repair: col_types[i] = "X" col_type_dict = {col_id: col_type for col_id, col_type in zip(col_ids, col_types)} not_I_col_ids = filter(lambda x: col_type_dict[x] != "I", col_ids) if self.kdd: cols_to_repair = filter(lambda x: col_type_dict[x] == "Y", col_ids) else: cols_to_repair = filter(lambda x: col_type_dict[x] in "YX", col_ids) # To prevent potential perils with user-provided column names, map them to safe column names safe_stratify_cols = [self.feature_to_repair] # Extract column values for each attribute in data # Begin byled code will usually be created in the same directory as the .py file. initializing keys and values in dictionary data_dict = {col_id: [] for col_id in col_ids} # Populate each attribute with its column values for row in data_to_repair: for i in col_ids: data_dict[i].append(row[i]) repair_types = {} for col_id, values in data_dict.items(): if all(isinstance(value, float) for value in values): repair_types[col_id] = float elif all(isinstance(value, int) for value in values): repair_types[col_id] = int else: repair_types[col_id] = str """ Create unique value structures: When performing repairs, we choose median values. If repair is partial, then values will be modified to some intermediate value between the original and the median value. However, the partially repaired value will only be chosen out of values that exist in the data set. This prevents choosing values that might not make any sense in the data's context. To do this, for each column, we need to sort all unique values and create two data structures: a list of values, and a dict mapping values to their positions in that list. Example: There are unique_col_vals[col] = [1, 2, 5, 7, 10, 14, 20] in the column. A value 2 must be repaired to 14, but the user requests that data only be repaired by 50%. We do this by finding the value at the right index: index_lookup[col][2] = 1 index_lookup[col][14] = 5 this tells us that unique_col_vals[col][3] = 7 is 50% of the way from 2 to 14. """ unique_col_vals = {} index_lookup = {} for col_id in not_I_col_ids: col_values = data_dict[col_id] # extract unique values from column and sort col_values = sorted(list(set(col_values))) unique_col_vals[col_id] = col_values # look up a value, get its position index_lookup[col_id] = {col_values[i]: i for i in range(len(col_values))} """ Make a list of unique values per each stratified column. Then make a list of combinations of stratified groups. Example: race and gender cols are stratified: [(white, female), (white, male), (black, female), (black, male)] The combinations are tuples because they can be hashed and used as dictionary keys. From these, find the sizes of these groups. """ unique_stratify_values = [unique_col_vals[i] for i in safe_stratify_cols] all_stratified_groups = list(product(*unique_stratify_values)) # look up a stratified group, and get a list of indices corresponding to that group in the data stratified_group_indices = defaultdict(list) # Find the number of unique values for each strat-group, organized per column. val_sets = {group: {col_id:set() for col_id in cols_to_repair} for group in all_stratified_groups} for i, row in enumerate(data_to_repair): group = tuple(row[col] for col in safe_stratify_cols) for col_id in cols_to_repair: val_sets[group][col_id].add(row[col_id]) # Also remember that this row pertains to this strat-group. stratified_group_indices[group].append(i) """ Separate data by stratified group to perform repair on each Y column's values given that their corresponding protected attribute is a particular stratified group. We need to keep track of each Y column's values corresponding to each particular stratified group, as well as each value's index, so that when we repair the data, we can modify the correct value in the original data. Example: Supposing there is a Y column, "Score1", in which the 3rd and 5th scores, 70 and 90 respectively, belonged to black women, the data structure would look like: {("Black", "Woman"): {Score1: [(70,2),(90,4)]}} """ stratified_group_data = {group: {} for group in all_stratified_groups} for group in all_stratified_groups: for col_id, col_dict in data_dict.items(): # Get the indices at which each value occurs. indices = {} for i in stratified_group_indices[group]: value = col_dict[i] if value not in indices: indices[value] = [] indices[value].append(i) stratified_col_values = [(occurs, val) for val, occurs in indices.items()] stratified_col_values.sort(key=lambda tup: tup[1]) stratified_group_data[group][col_id] = stratified_col_values mode_feature_to_repair = get_mode(data_dict[self.feature_to_repair]) # Repair Data and retrieve the results for col_id in cols_to_repair: # which bucket value we're repairing group_offsets = {group: 0 for group in all_stratified_groups} col = data_dict[col_id] num_quantiles = min(len(val_sets[group][col_id]) for group in all_stratified_groups) quantile_unit = 1.0/num_quantiles if repair_types[col_id] in {int, float}: for quantile in range(num_quantiles): median_at_quantiles = [] indices_per_group = {} for group in all_stratified_groups: group_data_at_col = stratified_group_data[group][col_id] num_vals = len(group_data_at_col) offset = int(round(group_offsets[group]*num_vals)) number_to_get = int(round((group_offsets[group] + quantile_unit)*num_vals) - offset) group_offsets[group] += quantile_unit if number_to_get > 0: # Get data at this quantile from this Y column such that stratified X = group offset_data = group_data_at_col[offset:offset+number_to_get] indices_per_group[group] = [i for val_indices, _ in offset_data for i in val_indices] values = sorted([float(val) for _, val in offset_data]) # Find this group's median value at this quantile median_at_quantiles.append( get_median(values, self.kdd) ) # Find the median value of all groups at this quantile (chosen from each group's medians) median = get_median(median_at_quantiles, self.kdd) median_val_pos = index_lookup[col_id][median] # Update values to repair the dataset. for group in all_stratified_groups: for index in indices_per_group[group]: original_value = col[index] current_val_pos = index_lookup[col_id][original_value] distance = median_val_pos - current_val_pos # distance between indices distance_to_repair = int(round(distance * self.repair_level)) index_of_repair_value = current_val_pos + distance_to_repair repaired_value = unique_col_vals[col_id][index_of_repair_value] # Update data to repaired valued data_dict[col_id][index] = repaired_value #Categorical Repair is done below elif repair_types[col_id] in {str}: feature = CategoricalFeature(col) categories = feature.bin_index_dict.keys() group_features = get_group_data(all_stratified_groups, stratified_group_data, col_id) categories_count = get_categories_count(categories, all_stratified_groups, group_features) categories_count_norm = get_categories_count_norm(categories, all_stratified_groups, categories_count, group_features) median = get_median_per_category(categories, categories_count_norm) # Partially fill-out the generator functions to simplify later calls. dist_generator = lambda group_index, category : gen_desired_dist(group_index, category, col_id, median, self.repair_level, categories_count_norm, self.feature_to_repair, mode_feature_to_repair) count_generator = lambda group_index, group, category : gen_desired_count(group_index, group, category, median, group_features, self.repair_level, categories_count) group_features, overflow = flow_on_group_features(all_stratified_groups, group_features, count_generator) group_features, assigned_overflow, distribution = assign_overflow(all_stratified_groups, categories, overflow, group_features, dist_generator) # Return our repaired feature in the form of our original dataset for group in all_stratified_groups: indices = stratified_group_indices[group] for i, index in enumerate(indices): repaired_value = group_features[group].data[i] data_dict[col_id][index] = repaired_value # Replace stratified groups with their mode value, to remove it's information repaired_data = [] for i, orig_row in enumerate(data_to_repair): new_row = [orig_row[j] if j not in cols_to_repair else data_dict[j][i] for j in col_ids] repaired_data.append(new_row) return repaired_data
[ "Create", "unique", "value", "structures", ":", "When", "performing", "repairs", "we", "choose", "median", "values", ".", "If", "repair", "is", "partial", "then", "values", "will", "be", "modified", "to", "some", "intermediate", "value", "between", "the", "original", "and", "the", "median", "value", ".", "However", "the", "partially", "repaired", "value", "will", "only", "be", "chosen", "out", "of", "values", "that", "exist", "in", "the", "data", "set", ".", "This", "prevents", "choosing", "values", "that", "might", "not", "make", "any", "sense", "in", "the", "data", "s", "context", ".", "To", "do", "this", "for", "each", "column", "we", "need", "to", "sort", "all", "unique", "values", "and", "create", "two", "data", "structures", ":", "a", "list", "of", "values", "and", "a", "dict", "mapping", "values", "to", "their", "positions", "in", "that", "list", ".", "Example", ":", "There", "are", "unique_col_vals", "[", "col", "]", "=", "[", "1", "2", "5", "7", "10", "14", "20", "]", "in", "the", "column", ".", "A", "value", "2", "must", "be", "repaired", "to", "14", "but", "the", "user", "requests", "that", "data", "only", "be", "repaired", "by", "50%", ".", "We", "do", "this", "by", "finding", "the", "value", "at", "the", "right", "index", ":", "index_lookup", "[", "col", "]", "[", "2", "]", "=", "1", "index_lookup", "[", "col", "]", "[", "14", "]", "=", "5", "this", "tells", "us", "that", "unique_col_vals", "[", "col", "]", "[", "3", "]", "=", "7", "is", "50%", "of", "the", "way", "from", "2", "to", "14", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/repairers/CategoricRepairer.py#L15-L197
[ "def", "repair", "(", "self", ",", "data_to_repair", ")", ":", "num_cols", "=", "len", "(", "data_to_repair", "[", "0", "]", ")", "col_ids", "=", "range", "(", "num_cols", ")", "# Get column type information", "col_types", "=", "[", "\"Y\"", "]", "*", "len", "(", "col_ids", ")", "for", "i", ",", "col", "in", "enumerate", "(", "col_ids", ")", ":", "if", "i", "in", "self", ".", "features_to_ignore", ":", "col_types", "[", "i", "]", "=", "\"I\"", "elif", "i", "==", "self", ".", "feature_to_repair", ":", "col_types", "[", "i", "]", "=", "\"X\"", "col_type_dict", "=", "{", "col_id", ":", "col_type", "for", "col_id", ",", "col_type", "in", "zip", "(", "col_ids", ",", "col_types", ")", "}", "not_I_col_ids", "=", "filter", "(", "lambda", "x", ":", "col_type_dict", "[", "x", "]", "!=", "\"I\"", ",", "col_ids", ")", "if", "self", ".", "kdd", ":", "cols_to_repair", "=", "filter", "(", "lambda", "x", ":", "col_type_dict", "[", "x", "]", "==", "\"Y\"", ",", "col_ids", ")", "else", ":", "cols_to_repair", "=", "filter", "(", "lambda", "x", ":", "col_type_dict", "[", "x", "]", "in", "\"YX\"", ",", "col_ids", ")", "# To prevent potential perils with user-provided column names, map them to safe column names", "safe_stratify_cols", "=", "[", "self", ".", "feature_to_repair", "]", "# Extract column values for each attribute in data", "# Begin byled code will usually be created in the same directory as the .py file. initializing keys and values in dictionary", "data_dict", "=", "{", "col_id", ":", "[", "]", "for", "col_id", "in", "col_ids", "}", "# Populate each attribute with its column values", "for", "row", "in", "data_to_repair", ":", "for", "i", "in", "col_ids", ":", "data_dict", "[", "i", "]", ".", "append", "(", "row", "[", "i", "]", ")", "repair_types", "=", "{", "}", "for", "col_id", ",", "values", "in", "data_dict", ".", "items", "(", ")", ":", "if", "all", "(", "isinstance", "(", "value", ",", "float", ")", "for", "value", "in", "values", ")", ":", "repair_types", "[", "col_id", "]", "=", "float", "elif", "all", "(", "isinstance", "(", "value", ",", "int", ")", "for", "value", "in", "values", ")", ":", "repair_types", "[", "col_id", "]", "=", "int", "else", ":", "repair_types", "[", "col_id", "]", "=", "str", "unique_col_vals", "=", "{", "}", "index_lookup", "=", "{", "}", "for", "col_id", "in", "not_I_col_ids", ":", "col_values", "=", "data_dict", "[", "col_id", "]", "# extract unique values from column and sort", "col_values", "=", "sorted", "(", "list", "(", "set", "(", "col_values", ")", ")", ")", "unique_col_vals", "[", "col_id", "]", "=", "col_values", "# look up a value, get its position", "index_lookup", "[", "col_id", "]", "=", "{", "col_values", "[", "i", "]", ":", "i", "for", "i", "in", "range", "(", "len", "(", "col_values", ")", ")", "}", "\"\"\"\n Make a list of unique values per each stratified column. Then make a list of combinations of stratified groups. Example: race and gender cols are stratified: [(white, female), (white, male), (black, female), (black, male)] The combinations are tuples because they can be hashed and used as dictionary keys. From these, find the sizes of these groups.\n \"\"\"", "unique_stratify_values", "=", "[", "unique_col_vals", "[", "i", "]", "for", "i", "in", "safe_stratify_cols", "]", "all_stratified_groups", "=", "list", "(", "product", "(", "*", "unique_stratify_values", ")", ")", "# look up a stratified group, and get a list of indices corresponding to that group in the data", "stratified_group_indices", "=", "defaultdict", "(", "list", ")", "# Find the number of unique values for each strat-group, organized per column.", "val_sets", "=", "{", "group", ":", "{", "col_id", ":", "set", "(", ")", "for", "col_id", "in", "cols_to_repair", "}", "for", "group", "in", "all_stratified_groups", "}", "for", "i", ",", "row", "in", "enumerate", "(", "data_to_repair", ")", ":", "group", "=", "tuple", "(", "row", "[", "col", "]", "for", "col", "in", "safe_stratify_cols", ")", "for", "col_id", "in", "cols_to_repair", ":", "val_sets", "[", "group", "]", "[", "col_id", "]", ".", "add", "(", "row", "[", "col_id", "]", ")", "# Also remember that this row pertains to this strat-group.", "stratified_group_indices", "[", "group", "]", ".", "append", "(", "i", ")", "\"\"\"\n Separate data by stratified group to perform repair on each Y column's values given that their corresponding protected attribute is a particular stratified group. We need to keep track of each Y column's values corresponding to each particular stratified group, as well as each value's index, so that when we repair the data, we can modify the correct value in the original data. Example: Supposing there is a Y column, \"Score1\", in which the 3rd and 5th scores, 70 and 90 respectively, belonged to black women, the data structure would look like: {(\"Black\", \"Woman\"): {Score1: [(70,2),(90,4)]}}\n \"\"\"", "stratified_group_data", "=", "{", "group", ":", "{", "}", "for", "group", "in", "all_stratified_groups", "}", "for", "group", "in", "all_stratified_groups", ":", "for", "col_id", ",", "col_dict", "in", "data_dict", ".", "items", "(", ")", ":", "# Get the indices at which each value occurs.", "indices", "=", "{", "}", "for", "i", "in", "stratified_group_indices", "[", "group", "]", ":", "value", "=", "col_dict", "[", "i", "]", "if", "value", "not", "in", "indices", ":", "indices", "[", "value", "]", "=", "[", "]", "indices", "[", "value", "]", ".", "append", "(", "i", ")", "stratified_col_values", "=", "[", "(", "occurs", ",", "val", ")", "for", "val", ",", "occurs", "in", "indices", ".", "items", "(", ")", "]", "stratified_col_values", ".", "sort", "(", "key", "=", "lambda", "tup", ":", "tup", "[", "1", "]", ")", "stratified_group_data", "[", "group", "]", "[", "col_id", "]", "=", "stratified_col_values", "mode_feature_to_repair", "=", "get_mode", "(", "data_dict", "[", "self", ".", "feature_to_repair", "]", ")", "# Repair Data and retrieve the results", "for", "col_id", "in", "cols_to_repair", ":", "# which bucket value we're repairing", "group_offsets", "=", "{", "group", ":", "0", "for", "group", "in", "all_stratified_groups", "}", "col", "=", "data_dict", "[", "col_id", "]", "num_quantiles", "=", "min", "(", "len", "(", "val_sets", "[", "group", "]", "[", "col_id", "]", ")", "for", "group", "in", "all_stratified_groups", ")", "quantile_unit", "=", "1.0", "/", "num_quantiles", "if", "repair_types", "[", "col_id", "]", "in", "{", "int", ",", "float", "}", ":", "for", "quantile", "in", "range", "(", "num_quantiles", ")", ":", "median_at_quantiles", "=", "[", "]", "indices_per_group", "=", "{", "}", "for", "group", "in", "all_stratified_groups", ":", "group_data_at_col", "=", "stratified_group_data", "[", "group", "]", "[", "col_id", "]", "num_vals", "=", "len", "(", "group_data_at_col", ")", "offset", "=", "int", "(", "round", "(", "group_offsets", "[", "group", "]", "*", "num_vals", ")", ")", "number_to_get", "=", "int", "(", "round", "(", "(", "group_offsets", "[", "group", "]", "+", "quantile_unit", ")", "*", "num_vals", ")", "-", "offset", ")", "group_offsets", "[", "group", "]", "+=", "quantile_unit", "if", "number_to_get", ">", "0", ":", "# Get data at this quantile from this Y column such that stratified X = group", "offset_data", "=", "group_data_at_col", "[", "offset", ":", "offset", "+", "number_to_get", "]", "indices_per_group", "[", "group", "]", "=", "[", "i", "for", "val_indices", ",", "_", "in", "offset_data", "for", "i", "in", "val_indices", "]", "values", "=", "sorted", "(", "[", "float", "(", "val", ")", "for", "_", ",", "val", "in", "offset_data", "]", ")", "# Find this group's median value at this quantile", "median_at_quantiles", ".", "append", "(", "get_median", "(", "values", ",", "self", ".", "kdd", ")", ")", "# Find the median value of all groups at this quantile (chosen from each group's medians)", "median", "=", "get_median", "(", "median_at_quantiles", ",", "self", ".", "kdd", ")", "median_val_pos", "=", "index_lookup", "[", "col_id", "]", "[", "median", "]", "# Update values to repair the dataset.", "for", "group", "in", "all_stratified_groups", ":", "for", "index", "in", "indices_per_group", "[", "group", "]", ":", "original_value", "=", "col", "[", "index", "]", "current_val_pos", "=", "index_lookup", "[", "col_id", "]", "[", "original_value", "]", "distance", "=", "median_val_pos", "-", "current_val_pos", "# distance between indices", "distance_to_repair", "=", "int", "(", "round", "(", "distance", "*", "self", ".", "repair_level", ")", ")", "index_of_repair_value", "=", "current_val_pos", "+", "distance_to_repair", "repaired_value", "=", "unique_col_vals", "[", "col_id", "]", "[", "index_of_repair_value", "]", "# Update data to repaired valued", "data_dict", "[", "col_id", "]", "[", "index", "]", "=", "repaired_value", "#Categorical Repair is done below", "elif", "repair_types", "[", "col_id", "]", "in", "{", "str", "}", ":", "feature", "=", "CategoricalFeature", "(", "col", ")", "categories", "=", "feature", ".", "bin_index_dict", ".", "keys", "(", ")", "group_features", "=", "get_group_data", "(", "all_stratified_groups", ",", "stratified_group_data", ",", "col_id", ")", "categories_count", "=", "get_categories_count", "(", "categories", ",", "all_stratified_groups", ",", "group_features", ")", "categories_count_norm", "=", "get_categories_count_norm", "(", "categories", ",", "all_stratified_groups", ",", "categories_count", ",", "group_features", ")", "median", "=", "get_median_per_category", "(", "categories", ",", "categories_count_norm", ")", "# Partially fill-out the generator functions to simplify later calls.", "dist_generator", "=", "lambda", "group_index", ",", "category", ":", "gen_desired_dist", "(", "group_index", ",", "category", ",", "col_id", ",", "median", ",", "self", ".", "repair_level", ",", "categories_count_norm", ",", "self", ".", "feature_to_repair", ",", "mode_feature_to_repair", ")", "count_generator", "=", "lambda", "group_index", ",", "group", ",", "category", ":", "gen_desired_count", "(", "group_index", ",", "group", ",", "category", ",", "median", ",", "group_features", ",", "self", ".", "repair_level", ",", "categories_count", ")", "group_features", ",", "overflow", "=", "flow_on_group_features", "(", "all_stratified_groups", ",", "group_features", ",", "count_generator", ")", "group_features", ",", "assigned_overflow", ",", "distribution", "=", "assign_overflow", "(", "all_stratified_groups", ",", "categories", ",", "overflow", ",", "group_features", ",", "dist_generator", ")", "# Return our repaired feature in the form of our original dataset", "for", "group", "in", "all_stratified_groups", ":", "indices", "=", "stratified_group_indices", "[", "group", "]", "for", "i", ",", "index", "in", "enumerate", "(", "indices", ")", ":", "repaired_value", "=", "group_features", "[", "group", "]", ".", "data", "[", "i", "]", "data_dict", "[", "col_id", "]", "[", "index", "]", "=", "repaired_value", "# Replace stratified groups with their mode value, to remove it's information", "repaired_data", "=", "[", "]", "for", "i", ",", "orig_row", "in", "enumerate", "(", "data_to_repair", ")", ":", "new_row", "=", "[", "orig_row", "[", "j", "]", "if", "j", "not", "in", "cols_to_repair", "else", "data_dict", "[", "j", "]", "[", "i", "]", "for", "j", "in", "col_ids", "]", "repaired_data", ".", "append", "(", "new_row", ")", "return", "repaired_data" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
group_audit_ranks
Given a list of audit files, rank them using the `measurer` and return the features that never deviate more than `similarity_bound` across repairs.
BlackBoxAuditing/audit_reading.py
def group_audit_ranks(filenames, measurer, similarity_bound=0.05): """ Given a list of audit files, rank them using the `measurer` and return the features that never deviate more than `similarity_bound` across repairs. """ def _partition_groups(feature_scores): groups = [] for feature, score in feature_scores: added_to_group = False # Check to see if the feature belongs in a group with any other features. for i, group in enumerate(groups): mean_score, group_feature_scores = group if abs(mean_score - score) < similarity_bound: groups[i][1].append( (feature, score) ) # Recalculate the representative mean. groups[i][0] = sum([s for _, s in group_feature_scores])/len(group_feature_scores) added_to_group = True break # If this feature did not much with the current groups, create another group. if not added_to_group: groups.append( [score, [(feature,score)]] ) # Return just the features. return [[feature for feature, score in group] for _, group in groups] score_dict = {} features = [] for filename in filenames: with open(filename) as audit_file: header_line = audit_file.readline()[:-1] # Remove the trailing endline. feature = header_line[header_line.index(":")+1:] features.append(feature) confusion_matrices = load_audit_confusion_matrices(filename) for rep_level, matrix in confusion_matrices: score = measurer(matrix) if rep_level not in score_dict: score_dict[rep_level] = {} score_dict[rep_level][feature] = score # Sort by repair level increasing repair level. score_keys = sorted(score_dict.keys()) groups = [features] while score_keys: key = score_keys.pop() new_groups = [] for group in groups: group_features = [(f, score_dict[key][f]) for f in group] sub_groups = _partition_groups(group_features) new_groups.extend(sub_groups) groups = new_groups return groups
def group_audit_ranks(filenames, measurer, similarity_bound=0.05): """ Given a list of audit files, rank them using the `measurer` and return the features that never deviate more than `similarity_bound` across repairs. """ def _partition_groups(feature_scores): groups = [] for feature, score in feature_scores: added_to_group = False # Check to see if the feature belongs in a group with any other features. for i, group in enumerate(groups): mean_score, group_feature_scores = group if abs(mean_score - score) < similarity_bound: groups[i][1].append( (feature, score) ) # Recalculate the representative mean. groups[i][0] = sum([s for _, s in group_feature_scores])/len(group_feature_scores) added_to_group = True break # If this feature did not much with the current groups, create another group. if not added_to_group: groups.append( [score, [(feature,score)]] ) # Return just the features. return [[feature for feature, score in group] for _, group in groups] score_dict = {} features = [] for filename in filenames: with open(filename) as audit_file: header_line = audit_file.readline()[:-1] # Remove the trailing endline. feature = header_line[header_line.index(":")+1:] features.append(feature) confusion_matrices = load_audit_confusion_matrices(filename) for rep_level, matrix in confusion_matrices: score = measurer(matrix) if rep_level not in score_dict: score_dict[rep_level] = {} score_dict[rep_level][feature] = score # Sort by repair level increasing repair level. score_keys = sorted(score_dict.keys()) groups = [features] while score_keys: key = score_keys.pop() new_groups = [] for group in groups: group_features = [(f, score_dict[key][f]) for f in group] sub_groups = _partition_groups(group_features) new_groups.extend(sub_groups) groups = new_groups return groups
[ "Given", "a", "list", "of", "audit", "files", "rank", "them", "using", "the", "measurer", "and", "return", "the", "features", "that", "never", "deviate", "more", "than", "similarity_bound", "across", "repairs", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/audit_reading.py#L124-L183
[ "def", "group_audit_ranks", "(", "filenames", ",", "measurer", ",", "similarity_bound", "=", "0.05", ")", ":", "def", "_partition_groups", "(", "feature_scores", ")", ":", "groups", "=", "[", "]", "for", "feature", ",", "score", "in", "feature_scores", ":", "added_to_group", "=", "False", "# Check to see if the feature belongs in a group with any other features.", "for", "i", ",", "group", "in", "enumerate", "(", "groups", ")", ":", "mean_score", ",", "group_feature_scores", "=", "group", "if", "abs", "(", "mean_score", "-", "score", ")", "<", "similarity_bound", ":", "groups", "[", "i", "]", "[", "1", "]", ".", "append", "(", "(", "feature", ",", "score", ")", ")", "# Recalculate the representative mean.", "groups", "[", "i", "]", "[", "0", "]", "=", "sum", "(", "[", "s", "for", "_", ",", "s", "in", "group_feature_scores", "]", ")", "/", "len", "(", "group_feature_scores", ")", "added_to_group", "=", "True", "break", "# If this feature did not much with the current groups, create another group.", "if", "not", "added_to_group", ":", "groups", ".", "append", "(", "[", "score", ",", "[", "(", "feature", ",", "score", ")", "]", "]", ")", "# Return just the features.", "return", "[", "[", "feature", "for", "feature", ",", "score", "in", "group", "]", "for", "_", ",", "group", "in", "groups", "]", "score_dict", "=", "{", "}", "features", "=", "[", "]", "for", "filename", "in", "filenames", ":", "with", "open", "(", "filename", ")", "as", "audit_file", ":", "header_line", "=", "audit_file", ".", "readline", "(", ")", "[", ":", "-", "1", "]", "# Remove the trailing endline.", "feature", "=", "header_line", "[", "header_line", ".", "index", "(", "\":\"", ")", "+", "1", ":", "]", "features", ".", "append", "(", "feature", ")", "confusion_matrices", "=", "load_audit_confusion_matrices", "(", "filename", ")", "for", "rep_level", ",", "matrix", "in", "confusion_matrices", ":", "score", "=", "measurer", "(", "matrix", ")", "if", "rep_level", "not", "in", "score_dict", ":", "score_dict", "[", "rep_level", "]", "=", "{", "}", "score_dict", "[", "rep_level", "]", "[", "feature", "]", "=", "score", "# Sort by repair level increasing repair level.", "score_keys", "=", "sorted", "(", "score_dict", ".", "keys", "(", ")", ")", "groups", "=", "[", "features", "]", "while", "score_keys", ":", "key", "=", "score_keys", ".", "pop", "(", ")", "new_groups", "=", "[", "]", "for", "group", "in", "groups", ":", "group_features", "=", "[", "(", "f", ",", "score_dict", "[", "key", "]", "[", "f", "]", ")", "for", "f", "in", "group", "]", "sub_groups", "=", "_partition_groups", "(", "group_features", ")", "new_groups", ".", "extend", "(", "sub_groups", ")", "groups", "=", "new_groups", "return", "groups" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
accuracy
Given a confusion matrix, returns the accuracy. Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
python2_source/BlackBoxAuditing/measurements.py
def accuracy(conf_matrix): """ Given a confusion matrix, returns the accuracy. Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml """ total, correct = 0.0, 0.0 for true_response, guess_dict in conf_matrix.items(): for guess, count in guess_dict.items(): if true_response == guess: correct += count total += count return correct/total
def accuracy(conf_matrix): """ Given a confusion matrix, returns the accuracy. Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml """ total, correct = 0.0, 0.0 for true_response, guess_dict in conf_matrix.items(): for guess, count in guess_dict.items(): if true_response == guess: correct += count total += count return correct/total
[ "Given", "a", "confusion", "matrix", "returns", "the", "accuracy", ".", "Accuracy", "Definition", ":", "http", ":", "//", "research", ".", "ics", ".", "aalto", ".", "fi", "/", "events", "/", "eyechallenge2005", "/", "evaluation", ".", "shtml" ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/measurements.py#L1-L12
[ "def", "accuracy", "(", "conf_matrix", ")", ":", "total", ",", "correct", "=", "0.0", ",", "0.0", "for", "true_response", ",", "guess_dict", "in", "conf_matrix", ".", "items", "(", ")", ":", "for", "guess", ",", "count", "in", "guess_dict", ".", "items", "(", ")", ":", "if", "true_response", "==", "guess", ":", "correct", "+=", "count", "total", "+=", "count", "return", "correct", "/", "total" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
BCR
Given a confusion matrix, returns Balanced Classification Rate. BCR is (1 - Balanced Error Rate). BER Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
python2_source/BlackBoxAuditing/measurements.py
def BCR(conf_matrix): """ Given a confusion matrix, returns Balanced Classification Rate. BCR is (1 - Balanced Error Rate). BER Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml """ parts = [] for true_response, guess_dict in conf_matrix.items(): error = 0.0 total = 0.0 for guess, count in guess_dict.items(): if true_response != guess: error += count total += count parts.append(error/total) BER = sum(parts)/len(parts) return 1 - BER
def BCR(conf_matrix): """ Given a confusion matrix, returns Balanced Classification Rate. BCR is (1 - Balanced Error Rate). BER Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml """ parts = [] for true_response, guess_dict in conf_matrix.items(): error = 0.0 total = 0.0 for guess, count in guess_dict.items(): if true_response != guess: error += count total += count parts.append(error/total) BER = sum(parts)/len(parts) return 1 - BER
[ "Given", "a", "confusion", "matrix", "returns", "Balanced", "Classification", "Rate", ".", "BCR", "is", "(", "1", "-", "Balanced", "Error", "Rate", ")", ".", "BER", "Definition", ":", "http", ":", "//", "research", ".", "ics", ".", "aalto", ".", "fi", "/", "events", "/", "eyechallenge2005", "/", "evaluation", ".", "shtml" ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/measurements.py#L14-L30
[ "def", "BCR", "(", "conf_matrix", ")", ":", "parts", "=", "[", "]", "for", "true_response", ",", "guess_dict", "in", "conf_matrix", ".", "items", "(", ")", ":", "error", "=", "0.0", "total", "=", "0.0", "for", "guess", ",", "count", "in", "guess_dict", ".", "items", "(", ")", ":", "if", "true_response", "!=", "guess", ":", "error", "+=", "count", "total", "+=", "count", "parts", ".", "append", "(", "error", "/", "total", ")", "BER", "=", "sum", "(", "parts", ")", "/", "len", "(", "parts", ")", "return", "1", "-", "BER" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
get_median
Given an unsorted list of numeric values, return median value (as a float). Note that in the case of even-length lists of values, we apply the value to the left of the center to be the median (such that the median can only be a value from the list of values). Eg: get_median([1,2,3,4]) == 2, not 2.5.
BlackBoxAuditing/repairers/calculators.py
def get_median(values, kdd): """ Given an unsorted list of numeric values, return median value (as a float). Note that in the case of even-length lists of values, we apply the value to the left of the center to be the median (such that the median can only be a value from the list of values). Eg: get_median([1,2,3,4]) == 2, not 2.5. """ if not values: raise Exception("Cannot calculate median of list with no values!") sorted_values = deepcopy(values) sorted_values.sort() # Not calling `sorted` b/c `sorted_values` may not be list. if kdd: return sorted_values[len(values)//2] else: if len(values) % 2 == 0: return sorted_values[len(values)//2-1] else: return sorted_values[len(values)//2]
def get_median(values, kdd): """ Given an unsorted list of numeric values, return median value (as a float). Note that in the case of even-length lists of values, we apply the value to the left of the center to be the median (such that the median can only be a value from the list of values). Eg: get_median([1,2,3,4]) == 2, not 2.5. """ if not values: raise Exception("Cannot calculate median of list with no values!") sorted_values = deepcopy(values) sorted_values.sort() # Not calling `sorted` b/c `sorted_values` may not be list. if kdd: return sorted_values[len(values)//2] else: if len(values) % 2 == 0: return sorted_values[len(values)//2-1] else: return sorted_values[len(values)//2]
[ "Given", "an", "unsorted", "list", "of", "numeric", "values", "return", "median", "value", "(", "as", "a", "float", ")", ".", "Note", "that", "in", "the", "case", "of", "even", "-", "length", "lists", "of", "values", "we", "apply", "the", "value", "to", "the", "left", "of", "the", "center", "to", "be", "the", "median", "(", "such", "that", "the", "median", "can", "only", "be", "a", "value", "from", "the", "list", "of", "values", ")", ".", "Eg", ":", "get_median", "(", "[", "1", "2", "3", "4", "]", ")", "==", "2", "not", "2", ".", "5", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/repairers/calculators.py#L3-L24
[ "def", "get_median", "(", "values", ",", "kdd", ")", ":", "if", "not", "values", ":", "raise", "Exception", "(", "\"Cannot calculate median of list with no values!\"", ")", "sorted_values", "=", "deepcopy", "(", "values", ")", "sorted_values", ".", "sort", "(", ")", "# Not calling `sorted` b/c `sorted_values` may not be list.", "if", "kdd", ":", "return", "sorted_values", "[", "len", "(", "values", ")", "//", "2", "]", "else", ":", "if", "len", "(", "values", ")", "%", "2", "==", "0", ":", "return", "sorted_values", "[", "len", "(", "values", ")", "//", "2", "-", "1", "]", "else", ":", "return", "sorted_values", "[", "len", "(", "values", ")", "//", "2", "]" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
expand_to_one_hot
with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f: writer = csv.writer(f,delimiter=",") for row in fin: writer.writerow(row)
python2_source/BlackBoxAuditing/model_factories/RecidivismTensorFlowModelFactory.py
def expand_to_one_hot(data,expand = True,use_alternative=False): header_dict = {'ALCABUS':0,'PRIRCAT':1,'TMSRVC':2,'SEX1':3,'RACE':4,'RELTYP':5,'age_1st_arrest':6,'DRUGAB':7,'Class':8,'RLAGE':9,'NFRCTNS':10} new_data = [] for entry in data: temp = {} if expand == True: if entry[header_dict["SEX1"]] == "FEMALE": temp['female'] = 1 else: temp['female'] = 0 if entry[header_dict["ALCABUS"]] == 'INMATE IS AN ALCOHOL ABUSER': temp['prior_alcohol_abuse'] = 1 else: temp['prior_alcohol_abuse'] = 0 if entry[header_dict['DRUGAB']] == 'INMATE IS A DRUG ABUSER': temp['prior_drug_abuse'] = 1 else: temp['prior_drug_abuse'] = 0 if entry[header_dict['NFRCTNS']] == 'INMATE HAS RECORD': temp['infraction_in_prison'] = 1 else: temp['infraction_in_prison'] = 0 race_cats = ['WHITE','BLACK','AMERICAN INDIAN/ALEUTIAN','ASIAN/PACIFIC ISLANDER','OTHER','UNKNOWN'] for cat in race_cats: if entry[header_dict['RACE']] == cat: temp['race_'+cat] = 1 else: temp['race_'+cat] = 0 release_age_cats = ['14 TO 17 YEARS OLD','18 TO 24 YEARS OLD', '25 TO 29 YEARS OLD', \ '30 TO 34 YEARS OLD','35 TO 39 YEARS OLD','40 TO 44 YEARS OLD','45 YEARS OLD AND OLDER'] for cat in release_age_cats: if entry[header_dict['RLAGE']] == cat: temp['release_age_'+cat] = 1 else: temp['release_age_'+cat] = 0 time_served_cats = ['None','1 TO 6 MONTHS','13 TO 18 MONTHS','19 TO 24 MONTHS','25 TO 30 MONTHS', \ '31 TO 36 MONTHS','37 TO 60 MONTHS','61 MONTHS AND HIGHER','7 TO 12 MONTHS'] for cat in time_served_cats: if entry[header_dict['TMSRVC']] == cat: temp['time_served_'+cat] = 1 else: temp['time_served_'+cat] = 0 prior_arrest_cats = ['None','1 PRIOR ARREST','11 TO 15 PRIOR ARRESTS','16 TO HI PRIOR ARRESTS','2 PRIOR ARRESTS', \ '3 PRIOR ARRESTS','4 PRIOR ARRESTS','5 PRIOR ARRESTS','6 PRIOR ARRESTS','7 TO 10 PRIOR ARRESTS'] for cat in prior_arrest_cats: if entry[header_dict['PRIRCAT']] == cat: temp['prior_arrest_'+cat] = 1 else: temp['prior_arrest_'+cat] = 0 conditional_release =['PAROLE BOARD DECISION-SERVED NO MINIMUM','MANDATORY PAROLE RELEASE', 'PROBATION RELEASE-SHOCK PROBATION', \ 'OTHER CONDITIONAL RELEASE'] unconditional_release = ['EXPIRATION OF SENTENCE','COMMUTATION-PARDON','RELEASE TO CUSTODY, DETAINER, OR WARRANT', \ 'OTHER UNCONDITIONAL RELEASE'] other_release = ['NATURAL CAUSES','SUICIDE','HOMICIDE BY ANOTHER INMATE','OTHER HOMICIDE','EXECUTION','OTHER TYPE OF DEATH', \ 'TRANSFER','RELEASE ON APPEAL OR BOND','OTHER TYPE OF RELEASE','ESCAPE','ACCIDENTAL INJURY TO SELF','UNKNOWN'] if entry[header_dict['RELTYP']] in conditional_release: temp['released_conditional'] = 1 temp['released_unconditional'] = 0 temp['released_other'] = 0 elif entry[header_dict['RELTYP']] in unconditional_release: temp['released_conditional'] = 0 temp['released_unconditional'] = 1 temp['released_other'] = 0 else: temp['released_conditional'] = 0 temp['released_unconditional'] = 0 temp['released_other'] = 1 first_arrest_cats = ['UNDER 17','BETWEEN 18 AND 24','BETWEEN 25 AND 29','BETWEEN 30 AND 39','OVER 40'] for cat in first_arrest_cats: if entry[header_dict['age_1st_arrest']] == cat: temp['age_first_arrest_'+cat] = 1 else: temp['age_first_arrest_'+cat] = 0 else: temp['SEX1'] = entry['SEX1'] temp['RELTYP'] = entry['RELTYP'] temp['PRIRCAT'] = entry['PRIRCAT'] temp['ALCABUS'] = entry['ALCABUS'] temp['DRUGAB'] = entry['DRUGAB'] temp['RLAGE'] = entry['RLAGE'] temp['TMSRVC'] = entry['TMSRVC'] temp['NFRCTNS'] = entry['NFRCTNS'] temp['RACE'] = entry['RACE'] try: bdate = datetime.date(int(entry['YEAROB2']),int(entry['MNTHOB2']), int(entry['DAYOB2'])) first_arrest = datetime.date(int(entry['A001YR']),int(entry['A001MO']),int(entry['A001DA'])) first_arrest_age = first_arrest - bdate temp['age_1st_arrest'] = first_arrest_age.days except: temp['age_1st_arrest'] = 0 new_data.append(temp) # convert from dictionary to list of lists fin = [[int(entry[key]) for key in entry.keys()] for entry in new_data] """ with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f: writer = csv.writer(f,delimiter=",") for row in fin: writer.writerow(row) """ return fin
def expand_to_one_hot(data,expand = True,use_alternative=False): header_dict = {'ALCABUS':0,'PRIRCAT':1,'TMSRVC':2,'SEX1':3,'RACE':4,'RELTYP':5,'age_1st_arrest':6,'DRUGAB':7,'Class':8,'RLAGE':9,'NFRCTNS':10} new_data = [] for entry in data: temp = {} if expand == True: if entry[header_dict["SEX1"]] == "FEMALE": temp['female'] = 1 else: temp['female'] = 0 if entry[header_dict["ALCABUS"]] == 'INMATE IS AN ALCOHOL ABUSER': temp['prior_alcohol_abuse'] = 1 else: temp['prior_alcohol_abuse'] = 0 if entry[header_dict['DRUGAB']] == 'INMATE IS A DRUG ABUSER': temp['prior_drug_abuse'] = 1 else: temp['prior_drug_abuse'] = 0 if entry[header_dict['NFRCTNS']] == 'INMATE HAS RECORD': temp['infraction_in_prison'] = 1 else: temp['infraction_in_prison'] = 0 race_cats = ['WHITE','BLACK','AMERICAN INDIAN/ALEUTIAN','ASIAN/PACIFIC ISLANDER','OTHER','UNKNOWN'] for cat in race_cats: if entry[header_dict['RACE']] == cat: temp['race_'+cat] = 1 else: temp['race_'+cat] = 0 release_age_cats = ['14 TO 17 YEARS OLD','18 TO 24 YEARS OLD', '25 TO 29 YEARS OLD', \ '30 TO 34 YEARS OLD','35 TO 39 YEARS OLD','40 TO 44 YEARS OLD','45 YEARS OLD AND OLDER'] for cat in release_age_cats: if entry[header_dict['RLAGE']] == cat: temp['release_age_'+cat] = 1 else: temp['release_age_'+cat] = 0 time_served_cats = ['None','1 TO 6 MONTHS','13 TO 18 MONTHS','19 TO 24 MONTHS','25 TO 30 MONTHS', \ '31 TO 36 MONTHS','37 TO 60 MONTHS','61 MONTHS AND HIGHER','7 TO 12 MONTHS'] for cat in time_served_cats: if entry[header_dict['TMSRVC']] == cat: temp['time_served_'+cat] = 1 else: temp['time_served_'+cat] = 0 prior_arrest_cats = ['None','1 PRIOR ARREST','11 TO 15 PRIOR ARRESTS','16 TO HI PRIOR ARRESTS','2 PRIOR ARRESTS', \ '3 PRIOR ARRESTS','4 PRIOR ARRESTS','5 PRIOR ARRESTS','6 PRIOR ARRESTS','7 TO 10 PRIOR ARRESTS'] for cat in prior_arrest_cats: if entry[header_dict['PRIRCAT']] == cat: temp['prior_arrest_'+cat] = 1 else: temp['prior_arrest_'+cat] = 0 conditional_release =['PAROLE BOARD DECISION-SERVED NO MINIMUM','MANDATORY PAROLE RELEASE', 'PROBATION RELEASE-SHOCK PROBATION', \ 'OTHER CONDITIONAL RELEASE'] unconditional_release = ['EXPIRATION OF SENTENCE','COMMUTATION-PARDON','RELEASE TO CUSTODY, DETAINER, OR WARRANT', \ 'OTHER UNCONDITIONAL RELEASE'] other_release = ['NATURAL CAUSES','SUICIDE','HOMICIDE BY ANOTHER INMATE','OTHER HOMICIDE','EXECUTION','OTHER TYPE OF DEATH', \ 'TRANSFER','RELEASE ON APPEAL OR BOND','OTHER TYPE OF RELEASE','ESCAPE','ACCIDENTAL INJURY TO SELF','UNKNOWN'] if entry[header_dict['RELTYP']] in conditional_release: temp['released_conditional'] = 1 temp['released_unconditional'] = 0 temp['released_other'] = 0 elif entry[header_dict['RELTYP']] in unconditional_release: temp['released_conditional'] = 0 temp['released_unconditional'] = 1 temp['released_other'] = 0 else: temp['released_conditional'] = 0 temp['released_unconditional'] = 0 temp['released_other'] = 1 first_arrest_cats = ['UNDER 17','BETWEEN 18 AND 24','BETWEEN 25 AND 29','BETWEEN 30 AND 39','OVER 40'] for cat in first_arrest_cats: if entry[header_dict['age_1st_arrest']] == cat: temp['age_first_arrest_'+cat] = 1 else: temp['age_first_arrest_'+cat] = 0 else: temp['SEX1'] = entry['SEX1'] temp['RELTYP'] = entry['RELTYP'] temp['PRIRCAT'] = entry['PRIRCAT'] temp['ALCABUS'] = entry['ALCABUS'] temp['DRUGAB'] = entry['DRUGAB'] temp['RLAGE'] = entry['RLAGE'] temp['TMSRVC'] = entry['TMSRVC'] temp['NFRCTNS'] = entry['NFRCTNS'] temp['RACE'] = entry['RACE'] try: bdate = datetime.date(int(entry['YEAROB2']),int(entry['MNTHOB2']), int(entry['DAYOB2'])) first_arrest = datetime.date(int(entry['A001YR']),int(entry['A001MO']),int(entry['A001DA'])) first_arrest_age = first_arrest - bdate temp['age_1st_arrest'] = first_arrest_age.days except: temp['age_1st_arrest'] = 0 new_data.append(temp) # convert from dictionary to list of lists fin = [[int(entry[key]) for key in entry.keys()] for entry in new_data] """ with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f: writer = csv.writer(f,delimiter=",") for row in fin: writer.writerow(row) """ return fin
[ "with", "open", "(", "brandon_testing", "/", "test_", "+", "str", "(", "time", ".", "clock", "()", ")", "+", ".", "csv", "w", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", "delimiter", "=", ")", "for", "row", "in", "fin", ":", "writer", ".", "writerow", "(", "row", ")" ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/model_factories/RecidivismTensorFlowModelFactory.py#L139-L253
[ "def", "expand_to_one_hot", "(", "data", ",", "expand", "=", "True", ",", "use_alternative", "=", "False", ")", ":", "header_dict", "=", "{", "'ALCABUS'", ":", "0", ",", "'PRIRCAT'", ":", "1", ",", "'TMSRVC'", ":", "2", ",", "'SEX1'", ":", "3", ",", "'RACE'", ":", "4", ",", "'RELTYP'", ":", "5", ",", "'age_1st_arrest'", ":", "6", ",", "'DRUGAB'", ":", "7", ",", "'Class'", ":", "8", ",", "'RLAGE'", ":", "9", ",", "'NFRCTNS'", ":", "10", "}", "new_data", "=", "[", "]", "for", "entry", "in", "data", ":", "temp", "=", "{", "}", "if", "expand", "==", "True", ":", "if", "entry", "[", "header_dict", "[", "\"SEX1\"", "]", "]", "==", "\"FEMALE\"", ":", "temp", "[", "'female'", "]", "=", "1", "else", ":", "temp", "[", "'female'", "]", "=", "0", "if", "entry", "[", "header_dict", "[", "\"ALCABUS\"", "]", "]", "==", "'INMATE IS AN ALCOHOL ABUSER'", ":", "temp", "[", "'prior_alcohol_abuse'", "]", "=", "1", "else", ":", "temp", "[", "'prior_alcohol_abuse'", "]", "=", "0", "if", "entry", "[", "header_dict", "[", "'DRUGAB'", "]", "]", "==", "'INMATE IS A DRUG ABUSER'", ":", "temp", "[", "'prior_drug_abuse'", "]", "=", "1", "else", ":", "temp", "[", "'prior_drug_abuse'", "]", "=", "0", "if", "entry", "[", "header_dict", "[", "'NFRCTNS'", "]", "]", "==", "'INMATE HAS RECORD'", ":", "temp", "[", "'infraction_in_prison'", "]", "=", "1", "else", ":", "temp", "[", "'infraction_in_prison'", "]", "=", "0", "race_cats", "=", "[", "'WHITE'", ",", "'BLACK'", ",", "'AMERICAN INDIAN/ALEUTIAN'", ",", "'ASIAN/PACIFIC ISLANDER'", ",", "'OTHER'", ",", "'UNKNOWN'", "]", "for", "cat", "in", "race_cats", ":", "if", "entry", "[", "header_dict", "[", "'RACE'", "]", "]", "==", "cat", ":", "temp", "[", "'race_'", "+", "cat", "]", "=", "1", "else", ":", "temp", "[", "'race_'", "+", "cat", "]", "=", "0", "release_age_cats", "=", "[", "'14 TO 17 YEARS OLD'", ",", "'18 TO 24 YEARS OLD'", ",", "'25 TO 29 YEARS OLD'", ",", "'30 TO 34 YEARS OLD'", ",", "'35 TO 39 YEARS OLD'", ",", "'40 TO 44 YEARS OLD'", ",", "'45 YEARS OLD AND OLDER'", "]", "for", "cat", "in", "release_age_cats", ":", "if", "entry", "[", "header_dict", "[", "'RLAGE'", "]", "]", "==", "cat", ":", "temp", "[", "'release_age_'", "+", "cat", "]", "=", "1", "else", ":", "temp", "[", "'release_age_'", "+", "cat", "]", "=", "0", "time_served_cats", "=", "[", "'None'", ",", "'1 TO 6 MONTHS'", ",", "'13 TO 18 MONTHS'", ",", "'19 TO 24 MONTHS'", ",", "'25 TO 30 MONTHS'", ",", "'31 TO 36 MONTHS'", ",", "'37 TO 60 MONTHS'", ",", "'61 MONTHS AND HIGHER'", ",", "'7 TO 12 MONTHS'", "]", "for", "cat", "in", "time_served_cats", ":", "if", "entry", "[", "header_dict", "[", "'TMSRVC'", "]", "]", "==", "cat", ":", "temp", "[", "'time_served_'", "+", "cat", "]", "=", "1", "else", ":", "temp", "[", "'time_served_'", "+", "cat", "]", "=", "0", "prior_arrest_cats", "=", "[", "'None'", ",", "'1 PRIOR ARREST'", ",", "'11 TO 15 PRIOR ARRESTS'", ",", "'16 TO HI PRIOR ARRESTS'", ",", "'2 PRIOR ARRESTS'", ",", "'3 PRIOR ARRESTS'", ",", "'4 PRIOR ARRESTS'", ",", "'5 PRIOR ARRESTS'", ",", "'6 PRIOR ARRESTS'", ",", "'7 TO 10 PRIOR ARRESTS'", "]", "for", "cat", "in", "prior_arrest_cats", ":", "if", "entry", "[", "header_dict", "[", "'PRIRCAT'", "]", "]", "==", "cat", ":", "temp", "[", "'prior_arrest_'", "+", "cat", "]", "=", "1", "else", ":", "temp", "[", "'prior_arrest_'", "+", "cat", "]", "=", "0", "conditional_release", "=", "[", "'PAROLE BOARD DECISION-SERVED NO MINIMUM'", ",", "'MANDATORY PAROLE RELEASE'", ",", "'PROBATION RELEASE-SHOCK PROBATION'", ",", "'OTHER CONDITIONAL RELEASE'", "]", "unconditional_release", "=", "[", "'EXPIRATION OF SENTENCE'", ",", "'COMMUTATION-PARDON'", ",", "'RELEASE TO CUSTODY, DETAINER, OR WARRANT'", ",", "'OTHER UNCONDITIONAL RELEASE'", "]", "other_release", "=", "[", "'NATURAL CAUSES'", ",", "'SUICIDE'", ",", "'HOMICIDE BY ANOTHER INMATE'", ",", "'OTHER HOMICIDE'", ",", "'EXECUTION'", ",", "'OTHER TYPE OF DEATH'", ",", "'TRANSFER'", ",", "'RELEASE ON APPEAL OR BOND'", ",", "'OTHER TYPE OF RELEASE'", ",", "'ESCAPE'", ",", "'ACCIDENTAL INJURY TO SELF'", ",", "'UNKNOWN'", "]", "if", "entry", "[", "header_dict", "[", "'RELTYP'", "]", "]", "in", "conditional_release", ":", "temp", "[", "'released_conditional'", "]", "=", "1", "temp", "[", "'released_unconditional'", "]", "=", "0", "temp", "[", "'released_other'", "]", "=", "0", "elif", "entry", "[", "header_dict", "[", "'RELTYP'", "]", "]", "in", "unconditional_release", ":", "temp", "[", "'released_conditional'", "]", "=", "0", "temp", "[", "'released_unconditional'", "]", "=", "1", "temp", "[", "'released_other'", "]", "=", "0", "else", ":", "temp", "[", "'released_conditional'", "]", "=", "0", "temp", "[", "'released_unconditional'", "]", "=", "0", "temp", "[", "'released_other'", "]", "=", "1", "first_arrest_cats", "=", "[", "'UNDER 17'", ",", "'BETWEEN 18 AND 24'", ",", "'BETWEEN 25 AND 29'", ",", "'BETWEEN 30 AND 39'", ",", "'OVER 40'", "]", "for", "cat", "in", "first_arrest_cats", ":", "if", "entry", "[", "header_dict", "[", "'age_1st_arrest'", "]", "]", "==", "cat", ":", "temp", "[", "'age_first_arrest_'", "+", "cat", "]", "=", "1", "else", ":", "temp", "[", "'age_first_arrest_'", "+", "cat", "]", "=", "0", "else", ":", "temp", "[", "'SEX1'", "]", "=", "entry", "[", "'SEX1'", "]", "temp", "[", "'RELTYP'", "]", "=", "entry", "[", "'RELTYP'", "]", "temp", "[", "'PRIRCAT'", "]", "=", "entry", "[", "'PRIRCAT'", "]", "temp", "[", "'ALCABUS'", "]", "=", "entry", "[", "'ALCABUS'", "]", "temp", "[", "'DRUGAB'", "]", "=", "entry", "[", "'DRUGAB'", "]", "temp", "[", "'RLAGE'", "]", "=", "entry", "[", "'RLAGE'", "]", "temp", "[", "'TMSRVC'", "]", "=", "entry", "[", "'TMSRVC'", "]", "temp", "[", "'NFRCTNS'", "]", "=", "entry", "[", "'NFRCTNS'", "]", "temp", "[", "'RACE'", "]", "=", "entry", "[", "'RACE'", "]", "try", ":", "bdate", "=", "datetime", ".", "date", "(", "int", "(", "entry", "[", "'YEAROB2'", "]", ")", ",", "int", "(", "entry", "[", "'MNTHOB2'", "]", ")", ",", "int", "(", "entry", "[", "'DAYOB2'", "]", ")", ")", "first_arrest", "=", "datetime", ".", "date", "(", "int", "(", "entry", "[", "'A001YR'", "]", ")", ",", "int", "(", "entry", "[", "'A001MO'", "]", ")", ",", "int", "(", "entry", "[", "'A001DA'", "]", ")", ")", "first_arrest_age", "=", "first_arrest", "-", "bdate", "temp", "[", "'age_1st_arrest'", "]", "=", "first_arrest_age", ".", "days", "except", ":", "temp", "[", "'age_1st_arrest'", "]", "=", "0", "new_data", ".", "append", "(", "temp", ")", "# convert from dictionary to list of lists", "fin", "=", "[", "[", "int", "(", "entry", "[", "key", "]", ")", "for", "key", "in", "entry", ".", "keys", "(", ")", "]", "for", "entry", "in", "new_data", "]", "return", "fin" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
load_audit_confusion_matrices
Loads a confusion matrix in a two-level dictionary format. For example, the confusion matrix of a 75%-accurate model that predicted 15 values (and mis-classified 5) may look like: {"A": {"A":10, "B": 5}, "B": {"B":5}} Note that raw boolean values are translated into strings, such that a value that was the boolean True will be returned as the string "True".
python2_source/BlackBoxAuditing/audit_reading.py
def load_audit_confusion_matrices(filename): """ Loads a confusion matrix in a two-level dictionary format. For example, the confusion matrix of a 75%-accurate model that predicted 15 values (and mis-classified 5) may look like: {"A": {"A":10, "B": 5}, "B": {"B":5}} Note that raw boolean values are translated into strings, such that a value that was the boolean True will be returned as the string "True". """ with open(filename) as audit_file: audit_file.next() # Skip the first line. # Extract the confusion matrices and repair levels from the audit file. confusion_matrices = [] for line in audit_file: separator = ":" separator_index = line.index(separator) comma_index = line.index(',') repair_level = float(line[separator_index+2:comma_index]) raw_confusion_matrix = line[comma_index+2:-2] confusion_matrix = json.loads( raw_confusion_matrix.replace("'","\"") ) confusion_matrices.append( (repair_level, confusion_matrix) ) # Sort the repair levels in case they are out of order for whatever reason. confusion_matrices.sort(key = lambda pair: pair[0]) return confusion_matrices
def load_audit_confusion_matrices(filename): """ Loads a confusion matrix in a two-level dictionary format. For example, the confusion matrix of a 75%-accurate model that predicted 15 values (and mis-classified 5) may look like: {"A": {"A":10, "B": 5}, "B": {"B":5}} Note that raw boolean values are translated into strings, such that a value that was the boolean True will be returned as the string "True". """ with open(filename) as audit_file: audit_file.next() # Skip the first line. # Extract the confusion matrices and repair levels from the audit file. confusion_matrices = [] for line in audit_file: separator = ":" separator_index = line.index(separator) comma_index = line.index(',') repair_level = float(line[separator_index+2:comma_index]) raw_confusion_matrix = line[comma_index+2:-2] confusion_matrix = json.loads( raw_confusion_matrix.replace("'","\"") ) confusion_matrices.append( (repair_level, confusion_matrix) ) # Sort the repair levels in case they are out of order for whatever reason. confusion_matrices.sort(key = lambda pair: pair[0]) return confusion_matrices
[ "Loads", "a", "confusion", "matrix", "in", "a", "two", "-", "level", "dictionary", "format", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/audit_reading.py#L11-L40
[ "def", "load_audit_confusion_matrices", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "audit_file", ":", "audit_file", ".", "next", "(", ")", "# Skip the first line.", "# Extract the confusion matrices and repair levels from the audit file.", "confusion_matrices", "=", "[", "]", "for", "line", "in", "audit_file", ":", "separator", "=", "\":\"", "separator_index", "=", "line", ".", "index", "(", "separator", ")", "comma_index", "=", "line", ".", "index", "(", "','", ")", "repair_level", "=", "float", "(", "line", "[", "separator_index", "+", "2", ":", "comma_index", "]", ")", "raw_confusion_matrix", "=", "line", "[", "comma_index", "+", "2", ":", "-", "2", "]", "confusion_matrix", "=", "json", ".", "loads", "(", "raw_confusion_matrix", ".", "replace", "(", "\"'\"", ",", "\"\\\"\"", ")", ")", "confusion_matrices", ".", "append", "(", "(", "repair_level", ",", "confusion_matrix", ")", ")", "# Sort the repair levels in case they are out of order for whatever reason.", "confusion_matrices", ".", "sort", "(", "key", "=", "lambda", "pair", ":", "pair", "[", "0", "]", ")", "return", "confusion_matrices" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
list_to_tf_input
Separates the outcome feature from the data.
BlackBoxAuditing/model_factories/SVM.py
def list_to_tf_input(data, response_index, num_outcomes): """ Separates the outcome feature from the data. """ matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data]) outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8) return matrix, outcomes
def list_to_tf_input(data, response_index, num_outcomes): """ Separates the outcome feature from the data. """ matrix = np.matrix([row[:response_index] + row[response_index+1:] for row in data]) outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8) return matrix, outcomes
[ "Separates", "the", "outcome", "feature", "from", "the", "data", "." ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/BlackBoxAuditing/model_factories/SVM.py#L138-L145
[ "def", "list_to_tf_input", "(", "data", ",", "response_index", ",", "num_outcomes", ")", ":", "matrix", "=", "np", ".", "matrix", "(", "[", "row", "[", ":", "response_index", "]", "+", "row", "[", "response_index", "+", "1", ":", "]", "for", "row", "in", "data", "]", ")", "outcomes", "=", "np", ".", "asarray", "(", "[", "row", "[", "response_index", "]", "for", "row", "in", "data", "]", ",", "dtype", "=", "np", ".", "uint8", ")", "return", "matrix", ",", "outcomes" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
FreedmanDiaconisBinSize
The bin size in FD-binning is given by size = 2 * IQR(x) * n^(-1/3) More Info: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule If the BinSize ends up being 0 (in the case that all values are the same), return a BinSize of 1.
python2_source/BlackBoxAuditing/repairers/binning/BinSizes.py
def FreedmanDiaconisBinSize(feature_values): """ The bin size in FD-binning is given by size = 2 * IQR(x) * n^(-1/3) More Info: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule If the BinSize ends up being 0 (in the case that all values are the same), return a BinSize of 1. """ q75, q25 = numpy.percentile(feature_values, [75, 25]) IQR = q75 - q25 return 2.0 * IQR * len(feature_values) ** (-1.0/3.0)
def FreedmanDiaconisBinSize(feature_values): """ The bin size in FD-binning is given by size = 2 * IQR(x) * n^(-1/3) More Info: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule If the BinSize ends up being 0 (in the case that all values are the same), return a BinSize of 1. """ q75, q25 = numpy.percentile(feature_values, [75, 25]) IQR = q75 - q25 return 2.0 * IQR * len(feature_values) ** (-1.0/3.0)
[ "The", "bin", "size", "in", "FD", "-", "binning", "is", "given", "by", "size", "=", "2", "*", "IQR", "(", "x", ")", "*", "n^", "(", "-", "1", "/", "3", ")", "More", "Info", ":", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Freedman%E2%80%93Diaconis_rule" ]
algofairness/BlackBoxAuditing
python
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/repairers/binning/BinSizes.py#L3-L15
[ "def", "FreedmanDiaconisBinSize", "(", "feature_values", ")", ":", "q75", ",", "q25", "=", "numpy", ".", "percentile", "(", "feature_values", ",", "[", "75", ",", "25", "]", ")", "IQR", "=", "q75", "-", "q25", "return", "2.0", "*", "IQR", "*", "len", "(", "feature_values", ")", "**", "(", "-", "1.0", "/", "3.0", ")" ]
b06c4faed5591cd7088475b2a203127bc5820483
test
PackagesStatusDetector._update_index_url_from_configs
Checks for alternative index-url in pip.conf
pip_upgrader/packages_status_detector.py
def _update_index_url_from_configs(self): """ Checks for alternative index-url in pip.conf """ if 'VIRTUAL_ENV' in os.environ: self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.conf')) self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.ini')) if site_config_files: self.pip_config_locations.extend(site_config_files) index_url = None custom_config = None if 'PIP_INDEX_URL' in os.environ and os.environ['PIP_INDEX_URL']: # environ variable takes priority index_url = os.environ['PIP_INDEX_URL'] custom_config = 'PIP_INDEX_URL environment variable' else: for pip_config_filename in self.pip_config_locations: if pip_config_filename.startswith('~'): pip_config_filename = os.path.expanduser(pip_config_filename) if os.path.isfile(pip_config_filename): config = ConfigParser() config.read([pip_config_filename]) try: index_url = config.get('global', 'index-url') custom_config = pip_config_filename break # stop on first detected, because config locations have a priority except (NoOptionError, NoSectionError): # pragma: nocover pass if index_url: self.PYPI_API_URL = self._prepare_api_url(index_url) print(Color('Setting API url to {{autoyellow}}{}{{/autoyellow}} as found in {{autoyellow}}{}{{/autoyellow}}' '. Use --default-index-url to use pypi default index'.format(self.PYPI_API_URL, custom_config)))
def _update_index_url_from_configs(self): """ Checks for alternative index-url in pip.conf """ if 'VIRTUAL_ENV' in os.environ: self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.conf')) self.pip_config_locations.append(os.path.join(os.environ['VIRTUAL_ENV'], 'pip.ini')) if site_config_files: self.pip_config_locations.extend(site_config_files) index_url = None custom_config = None if 'PIP_INDEX_URL' in os.environ and os.environ['PIP_INDEX_URL']: # environ variable takes priority index_url = os.environ['PIP_INDEX_URL'] custom_config = 'PIP_INDEX_URL environment variable' else: for pip_config_filename in self.pip_config_locations: if pip_config_filename.startswith('~'): pip_config_filename = os.path.expanduser(pip_config_filename) if os.path.isfile(pip_config_filename): config = ConfigParser() config.read([pip_config_filename]) try: index_url = config.get('global', 'index-url') custom_config = pip_config_filename break # stop on first detected, because config locations have a priority except (NoOptionError, NoSectionError): # pragma: nocover pass if index_url: self.PYPI_API_URL = self._prepare_api_url(index_url) print(Color('Setting API url to {{autoyellow}}{}{{/autoyellow}} as found in {{autoyellow}}{}{{/autoyellow}}' '. Use --default-index-url to use pypi default index'.format(self.PYPI_API_URL, custom_config)))
[ "Checks", "for", "alternative", "index", "-", "url", "in", "pip", ".", "conf" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_status_detector.py#L55-L90
[ "def", "_update_index_url_from_configs", "(", "self", ")", ":", "if", "'VIRTUAL_ENV'", "in", "os", ".", "environ", ":", "self", ".", "pip_config_locations", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'VIRTUAL_ENV'", "]", ",", "'pip.conf'", ")", ")", "self", ".", "pip_config_locations", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'VIRTUAL_ENV'", "]", ",", "'pip.ini'", ")", ")", "if", "site_config_files", ":", "self", ".", "pip_config_locations", ".", "extend", "(", "site_config_files", ")", "index_url", "=", "None", "custom_config", "=", "None", "if", "'PIP_INDEX_URL'", "in", "os", ".", "environ", "and", "os", ".", "environ", "[", "'PIP_INDEX_URL'", "]", ":", "# environ variable takes priority", "index_url", "=", "os", ".", "environ", "[", "'PIP_INDEX_URL'", "]", "custom_config", "=", "'PIP_INDEX_URL environment variable'", "else", ":", "for", "pip_config_filename", "in", "self", ".", "pip_config_locations", ":", "if", "pip_config_filename", ".", "startswith", "(", "'~'", ")", ":", "pip_config_filename", "=", "os", ".", "path", ".", "expanduser", "(", "pip_config_filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "pip_config_filename", ")", ":", "config", "=", "ConfigParser", "(", ")", "config", ".", "read", "(", "[", "pip_config_filename", "]", ")", "try", ":", "index_url", "=", "config", ".", "get", "(", "'global'", ",", "'index-url'", ")", "custom_config", "=", "pip_config_filename", "break", "# stop on first detected, because config locations have a priority", "except", "(", "NoOptionError", ",", "NoSectionError", ")", ":", "# pragma: nocover", "pass", "if", "index_url", ":", "self", ".", "PYPI_API_URL", "=", "self", ".", "_prepare_api_url", "(", "index_url", ")", "print", "(", "Color", "(", "'Setting API url to {{autoyellow}}{}{{/autoyellow}} as found in {{autoyellow}}{}{{/autoyellow}}'", "'. Use --default-index-url to use pypi default index'", ".", "format", "(", "self", ".", "PYPI_API_URL", ",", "custom_config", ")", ")", ")" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
PackagesStatusDetector._fetch_index_package_info
:type package_name: str :type current_version: version.Version
pip_upgrader/packages_status_detector.py
def _fetch_index_package_info(self, package_name, current_version): """ :type package_name: str :type current_version: version.Version """ try: package_canonical_name = package_name if self.PYPI_API_TYPE == 'simple_html': package_canonical_name = canonicalize_name(package_name) response = requests.get(self.PYPI_API_URL.format(package=package_canonical_name), timeout=15) except HTTPError as e: # pragma: nocover return False, e.message if not response.ok: # pragma: nocover return False, 'API error: {}'.format(response.reason) if self.PYPI_API_TYPE == 'pypi_json': return self._parse_pypi_json_package_info(package_name, current_version, response) elif self.PYPI_API_TYPE == 'simple_html': return self._parse_simple_html_package_info(package_name, current_version, response) else: # pragma: nocover raise NotImplementedError('This type of PYPI_API_TYPE type is not supported')
def _fetch_index_package_info(self, package_name, current_version): """ :type package_name: str :type current_version: version.Version """ try: package_canonical_name = package_name if self.PYPI_API_TYPE == 'simple_html': package_canonical_name = canonicalize_name(package_name) response = requests.get(self.PYPI_API_URL.format(package=package_canonical_name), timeout=15) except HTTPError as e: # pragma: nocover return False, e.message if not response.ok: # pragma: nocover return False, 'API error: {}'.format(response.reason) if self.PYPI_API_TYPE == 'pypi_json': return self._parse_pypi_json_package_info(package_name, current_version, response) elif self.PYPI_API_TYPE == 'simple_html': return self._parse_simple_html_package_info(package_name, current_version, response) else: # pragma: nocover raise NotImplementedError('This type of PYPI_API_TYPE type is not supported')
[ ":", "type", "package_name", ":", "str", ":", "type", "current_version", ":", "version", ".", "Version" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_status_detector.py#L153-L175
[ "def", "_fetch_index_package_info", "(", "self", ",", "package_name", ",", "current_version", ")", ":", "try", ":", "package_canonical_name", "=", "package_name", "if", "self", ".", "PYPI_API_TYPE", "==", "'simple_html'", ":", "package_canonical_name", "=", "canonicalize_name", "(", "package_name", ")", "response", "=", "requests", ".", "get", "(", "self", ".", "PYPI_API_URL", ".", "format", "(", "package", "=", "package_canonical_name", ")", ",", "timeout", "=", "15", ")", "except", "HTTPError", "as", "e", ":", "# pragma: nocover", "return", "False", ",", "e", ".", "message", "if", "not", "response", ".", "ok", ":", "# pragma: nocover", "return", "False", ",", "'API error: {}'", ".", "format", "(", "response", ".", "reason", ")", "if", "self", ".", "PYPI_API_TYPE", "==", "'pypi_json'", ":", "return", "self", ".", "_parse_pypi_json_package_info", "(", "package_name", ",", "current_version", ",", "response", ")", "elif", "self", ".", "PYPI_API_TYPE", "==", "'simple_html'", ":", "return", "self", ".", "_parse_simple_html_package_info", "(", "package_name", ",", "current_version", ",", "response", ")", "else", ":", "# pragma: nocover", "raise", "NotImplementedError", "(", "'This type of PYPI_API_TYPE type is not supported'", ")" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
PackagesStatusDetector._parse_pypi_json_package_info
:type package_name: str :type current_version: version.Version :type response: requests.models.Response
pip_upgrader/packages_status_detector.py
def _parse_pypi_json_package_info(self, package_name, current_version, response): """ :type package_name: str :type current_version: version.Version :type response: requests.models.Response """ data = response.json() all_versions = [version.parse(vers) for vers in data['releases'].keys()] filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease] if not filtered_versions: # pragma: nocover return False, 'error while parsing version' latest_version = max(filtered_versions) # even if user did not choose prerelease, if the package from requirements is pre/post release, use it if self._prerelease or current_version.is_postrelease or current_version.is_prerelease: prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease] if prerelease_versions: latest_version = max(prerelease_versions) try: try: latest_version_info = data['releases'][str(latest_version)][0] except KeyError: # pragma: nocover # non-RFC versions, get the latest from pypi response latest_version = version.parse(data['info']['version']) latest_version_info = data['releases'][str(latest_version)][0] except Exception: # pragma: nocover return False, 'error while parsing version' upload_time = latest_version_info['upload_time'].replace('T', ' ') return { 'name': package_name, 'current_version': current_version, 'latest_version': latest_version, 'upgrade_available': current_version < latest_version, 'upload_time': upload_time }, 'success'
def _parse_pypi_json_package_info(self, package_name, current_version, response): """ :type package_name: str :type current_version: version.Version :type response: requests.models.Response """ data = response.json() all_versions = [version.parse(vers) for vers in data['releases'].keys()] filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease] if not filtered_versions: # pragma: nocover return False, 'error while parsing version' latest_version = max(filtered_versions) # even if user did not choose prerelease, if the package from requirements is pre/post release, use it if self._prerelease or current_version.is_postrelease or current_version.is_prerelease: prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease] if prerelease_versions: latest_version = max(prerelease_versions) try: try: latest_version_info = data['releases'][str(latest_version)][0] except KeyError: # pragma: nocover # non-RFC versions, get the latest from pypi response latest_version = version.parse(data['info']['version']) latest_version_info = data['releases'][str(latest_version)][0] except Exception: # pragma: nocover return False, 'error while parsing version' upload_time = latest_version_info['upload_time'].replace('T', ' ') return { 'name': package_name, 'current_version': current_version, 'latest_version': latest_version, 'upgrade_available': current_version < latest_version, 'upload_time': upload_time }, 'success'
[ ":", "type", "package_name", ":", "str", ":", "type", "current_version", ":", "version", ".", "Version", ":", "type", "response", ":", "requests", ".", "models", ".", "Response" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_status_detector.py#L188-L227
[ "def", "_parse_pypi_json_package_info", "(", "self", ",", "package_name", ",", "current_version", ",", "response", ")", ":", "data", "=", "response", ".", "json", "(", ")", "all_versions", "=", "[", "version", ".", "parse", "(", "vers", ")", "for", "vers", "in", "data", "[", "'releases'", "]", ".", "keys", "(", ")", "]", "filtered_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "not", "vers", ".", "is_prerelease", "and", "not", "vers", ".", "is_postrelease", "]", "if", "not", "filtered_versions", ":", "# pragma: nocover", "return", "False", ",", "'error while parsing version'", "latest_version", "=", "max", "(", "filtered_versions", ")", "# even if user did not choose prerelease, if the package from requirements is pre/post release, use it", "if", "self", ".", "_prerelease", "or", "current_version", ".", "is_postrelease", "or", "current_version", ".", "is_prerelease", ":", "prerelease_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "vers", ".", "is_prerelease", "or", "vers", ".", "is_postrelease", "]", "if", "prerelease_versions", ":", "latest_version", "=", "max", "(", "prerelease_versions", ")", "try", ":", "try", ":", "latest_version_info", "=", "data", "[", "'releases'", "]", "[", "str", "(", "latest_version", ")", "]", "[", "0", "]", "except", "KeyError", ":", "# pragma: nocover", "# non-RFC versions, get the latest from pypi response", "latest_version", "=", "version", ".", "parse", "(", "data", "[", "'info'", "]", "[", "'version'", "]", ")", "latest_version_info", "=", "data", "[", "'releases'", "]", "[", "str", "(", "latest_version", ")", "]", "[", "0", "]", "except", "Exception", ":", "# pragma: nocover", "return", "False", ",", "'error while parsing version'", "upload_time", "=", "latest_version_info", "[", "'upload_time'", "]", ".", "replace", "(", "'T'", ",", "' '", ")", "return", "{", "'name'", ":", "package_name", ",", "'current_version'", ":", "current_version", ",", "'latest_version'", ":", "latest_version", ",", "'upgrade_available'", ":", "current_version", "<", "latest_version", ",", "'upload_time'", ":", "upload_time", "}", ",", "'success'" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
PackagesStatusDetector._parse_simple_html_package_info
:type package_name: str :type current_version: version.Version :type response: requests.models.Response
pip_upgrader/packages_status_detector.py
def _parse_simple_html_package_info(self, package_name, current_version, response): """ :type package_name: str :type current_version: version.Version :type response: requests.models.Response """ pattern = r'<a.*>.*{name}-([A-z0-9\.-]*)(?:-py|\.tar).*<\/a>'.format(name=re.escape(package_name)) versions_match = re.findall(pattern, response.content.decode('utf-8'), flags=re.IGNORECASE) all_versions = [version.parse(vers) for vers in versions_match] filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease] if not filtered_versions: # pragma: nocover return False, 'error while parsing version' latest_version = max(filtered_versions) # even if user did not choose prerelease, if the package from requirements is pre/post release, use it if self._prerelease or current_version.is_postrelease or current_version.is_prerelease: prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease] if prerelease_versions: latest_version = max(prerelease_versions) return { 'name': package_name, 'current_version': current_version, 'latest_version': latest_version, 'upgrade_available': current_version < latest_version, 'upload_time': '-' }, 'success'
def _parse_simple_html_package_info(self, package_name, current_version, response): """ :type package_name: str :type current_version: version.Version :type response: requests.models.Response """ pattern = r'<a.*>.*{name}-([A-z0-9\.-]*)(?:-py|\.tar).*<\/a>'.format(name=re.escape(package_name)) versions_match = re.findall(pattern, response.content.decode('utf-8'), flags=re.IGNORECASE) all_versions = [version.parse(vers) for vers in versions_match] filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease] if not filtered_versions: # pragma: nocover return False, 'error while parsing version' latest_version = max(filtered_versions) # even if user did not choose prerelease, if the package from requirements is pre/post release, use it if self._prerelease or current_version.is_postrelease or current_version.is_prerelease: prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease] if prerelease_versions: latest_version = max(prerelease_versions) return { 'name': package_name, 'current_version': current_version, 'latest_version': latest_version, 'upgrade_available': current_version < latest_version, 'upload_time': '-' }, 'success'
[ ":", "type", "package_name", ":", "str", ":", "type", "current_version", ":", "version", ".", "Version", ":", "type", "response", ":", "requests", ".", "models", ".", "Response" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_status_detector.py#L229-L258
[ "def", "_parse_simple_html_package_info", "(", "self", ",", "package_name", ",", "current_version", ",", "response", ")", ":", "pattern", "=", "r'<a.*>.*{name}-([A-z0-9\\.-]*)(?:-py|\\.tar).*<\\/a>'", ".", "format", "(", "name", "=", "re", ".", "escape", "(", "package_name", ")", ")", "versions_match", "=", "re", ".", "findall", "(", "pattern", ",", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "all_versions", "=", "[", "version", ".", "parse", "(", "vers", ")", "for", "vers", "in", "versions_match", "]", "filtered_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "not", "vers", ".", "is_prerelease", "and", "not", "vers", ".", "is_postrelease", "]", "if", "not", "filtered_versions", ":", "# pragma: nocover", "return", "False", ",", "'error while parsing version'", "latest_version", "=", "max", "(", "filtered_versions", ")", "# even if user did not choose prerelease, if the package from requirements is pre/post release, use it", "if", "self", ".", "_prerelease", "or", "current_version", ".", "is_postrelease", "or", "current_version", ".", "is_prerelease", ":", "prerelease_versions", "=", "[", "vers", "for", "vers", "in", "all_versions", "if", "vers", ".", "is_prerelease", "or", "vers", ".", "is_postrelease", "]", "if", "prerelease_versions", ":", "latest_version", "=", "max", "(", "prerelease_versions", ")", "return", "{", "'name'", ":", "package_name", ",", "'current_version'", ":", "current_version", ",", "'latest_version'", ":", "latest_version", ",", "'upgrade_available'", ":", "current_version", "<", "latest_version", ",", "'upload_time'", ":", "'-'", "}", ",", "'success'" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
main
Main CLI entrypoint.
pip_upgrader/cli.py
def main(): """ Main CLI entrypoint. """ options = get_options() Windows.enable(auto_colors=True, reset_atexit=True) try: # maybe check if virtualenv is not activated check_for_virtualenv(options) # 1. detect requirements files filenames = RequirementsDetector(options.get('<requirements_file>')).get_filenames() if filenames: print(Color('{{autoyellow}}Found valid requirements file(s):{{/autoyellow}} ' '{{autocyan}}\n{}{{/autocyan}}'.format('\n'.join(filenames)))) else: # pragma: nocover print(Color('{autoyellow}No requirements files found in current directory. CD into your project ' 'or manually specify requirements files as arguments.{/autoyellow}')) return # 2. detect all packages inside requirements packages = PackagesDetector(filenames).get_packages() # 3. query pypi API, see which package has a newer version vs the one in requirements (or current env) packages_status_map = PackagesStatusDetector( packages, options.get('--use-default-index')).detect_available_upgrades(options) # 4. [optionally], show interactive screen when user can choose which packages to upgrade selected_packages = PackageInteractiveSelector(packages_status_map, options).get_packages() # 5. having the list of packages, do the actual upgrade and replace the version inside all filenames upgraded_packages = PackagesUpgrader(selected_packages, filenames, options).do_upgrade() print(Color('{{autogreen}}Successfully upgraded (and updated requirements) for the following packages: ' '{}{{/autogreen}}'.format(','.join([package['name'] for package in upgraded_packages])))) if options['--dry-run']: print(Color('{automagenta}Actually, no, because this was a simulation using --dry-run{/automagenta}')) except KeyboardInterrupt: # pragma: nocover print(Color('\n{autored}Upgrade interrupted.{/autored}'))
def main(): """ Main CLI entrypoint. """ options = get_options() Windows.enable(auto_colors=True, reset_atexit=True) try: # maybe check if virtualenv is not activated check_for_virtualenv(options) # 1. detect requirements files filenames = RequirementsDetector(options.get('<requirements_file>')).get_filenames() if filenames: print(Color('{{autoyellow}}Found valid requirements file(s):{{/autoyellow}} ' '{{autocyan}}\n{}{{/autocyan}}'.format('\n'.join(filenames)))) else: # pragma: nocover print(Color('{autoyellow}No requirements files found in current directory. CD into your project ' 'or manually specify requirements files as arguments.{/autoyellow}')) return # 2. detect all packages inside requirements packages = PackagesDetector(filenames).get_packages() # 3. query pypi API, see which package has a newer version vs the one in requirements (or current env) packages_status_map = PackagesStatusDetector( packages, options.get('--use-default-index')).detect_available_upgrades(options) # 4. [optionally], show interactive screen when user can choose which packages to upgrade selected_packages = PackageInteractiveSelector(packages_status_map, options).get_packages() # 5. having the list of packages, do the actual upgrade and replace the version inside all filenames upgraded_packages = PackagesUpgrader(selected_packages, filenames, options).do_upgrade() print(Color('{{autogreen}}Successfully upgraded (and updated requirements) for the following packages: ' '{}{{/autogreen}}'.format(','.join([package['name'] for package in upgraded_packages])))) if options['--dry-run']: print(Color('{automagenta}Actually, no, because this was a simulation using --dry-run{/automagenta}')) except KeyboardInterrupt: # pragma: nocover print(Color('\n{autored}Upgrade interrupted.{/autored}'))
[ "Main", "CLI", "entrypoint", "." ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/cli.py#L47-L84
[ "def", "main", "(", ")", ":", "options", "=", "get_options", "(", ")", "Windows", ".", "enable", "(", "auto_colors", "=", "True", ",", "reset_atexit", "=", "True", ")", "try", ":", "# maybe check if virtualenv is not activated", "check_for_virtualenv", "(", "options", ")", "# 1. detect requirements files", "filenames", "=", "RequirementsDetector", "(", "options", ".", "get", "(", "'<requirements_file>'", ")", ")", ".", "get_filenames", "(", ")", "if", "filenames", ":", "print", "(", "Color", "(", "'{{autoyellow}}Found valid requirements file(s):{{/autoyellow}} '", "'{{autocyan}}\\n{}{{/autocyan}}'", ".", "format", "(", "'\\n'", ".", "join", "(", "filenames", ")", ")", ")", ")", "else", ":", "# pragma: nocover", "print", "(", "Color", "(", "'{autoyellow}No requirements files found in current directory. CD into your project '", "'or manually specify requirements files as arguments.{/autoyellow}'", ")", ")", "return", "# 2. detect all packages inside requirements", "packages", "=", "PackagesDetector", "(", "filenames", ")", ".", "get_packages", "(", ")", "# 3. query pypi API, see which package has a newer version vs the one in requirements (or current env)", "packages_status_map", "=", "PackagesStatusDetector", "(", "packages", ",", "options", ".", "get", "(", "'--use-default-index'", ")", ")", ".", "detect_available_upgrades", "(", "options", ")", "# 4. [optionally], show interactive screen when user can choose which packages to upgrade", "selected_packages", "=", "PackageInteractiveSelector", "(", "packages_status_map", ",", "options", ")", ".", "get_packages", "(", ")", "# 5. having the list of packages, do the actual upgrade and replace the version inside all filenames", "upgraded_packages", "=", "PackagesUpgrader", "(", "selected_packages", ",", "filenames", ",", "options", ")", ".", "do_upgrade", "(", ")", "print", "(", "Color", "(", "'{{autogreen}}Successfully upgraded (and updated requirements) for the following packages: '", "'{}{{/autogreen}}'", ".", "format", "(", "','", ".", "join", "(", "[", "package", "[", "'name'", "]", "for", "package", "in", "upgraded_packages", "]", ")", ")", ")", ")", "if", "options", "[", "'--dry-run'", "]", ":", "print", "(", "Color", "(", "'{automagenta}Actually, no, because this was a simulation using --dry-run{/automagenta}'", ")", ")", "except", "KeyboardInterrupt", ":", "# pragma: nocover", "print", "(", "Color", "(", "'\\n{autored}Upgrade interrupted.{/autored}'", ")", ")" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
PackagesUpgrader._update_package
Update (install) the package in current environment, and if success, also replace version in file
pip_upgrader/packages_upgrader.py
def _update_package(self, package): """ Update (install) the package in current environment, and if success, also replace version in file """ try: if not self.dry_run and not self.skip_package_installation: # pragma: nocover subprocess.check_call(['pip', 'install', '{}=={}'.format(package['name'], package['latest_version'])]) else: print('[Dry Run]: skipping package installation:', package['name']) # update only if installation success self._update_requirements_package(package) except CalledProcessError: # pragma: nocover print(Color('{{autored}}Failed to install package "{}"{{/autored}}'.format(package['name'])))
def _update_package(self, package): """ Update (install) the package in current environment, and if success, also replace version in file """ try: if not self.dry_run and not self.skip_package_installation: # pragma: nocover subprocess.check_call(['pip', 'install', '{}=={}'.format(package['name'], package['latest_version'])]) else: print('[Dry Run]: skipping package installation:', package['name']) # update only if installation success self._update_requirements_package(package) except CalledProcessError: # pragma: nocover print(Color('{{autored}}Failed to install package "{}"{{/autored}}'.format(package['name'])))
[ "Update", "(", "install", ")", "the", "package", "in", "current", "environment", "and", "if", "success", "also", "replace", "version", "in", "file" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_upgrader.py#L30-L41
[ "def", "_update_package", "(", "self", ",", "package", ")", ":", "try", ":", "if", "not", "self", ".", "dry_run", "and", "not", "self", ".", "skip_package_installation", ":", "# pragma: nocover", "subprocess", ".", "check_call", "(", "[", "'pip'", ",", "'install'", ",", "'{}=={}'", ".", "format", "(", "package", "[", "'name'", "]", ",", "package", "[", "'latest_version'", "]", ")", "]", ")", "else", ":", "print", "(", "'[Dry Run]: skipping package installation:'", ",", "package", "[", "'name'", "]", ")", "# update only if installation success", "self", ".", "_update_requirements_package", "(", "package", ")", "except", "CalledProcessError", ":", "# pragma: nocover", "print", "(", "Color", "(", "'{{autored}}Failed to install package \"{}\"{{/autored}}'", ".", "format", "(", "package", "[", "'name'", "]", ")", ")", ")" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
RequirementsDetector.autodetect_files
Attempt to detect requirements files in the current working directory
pip_upgrader/requirements_detector.py
def autodetect_files(self): """ Attempt to detect requirements files in the current working directory """ if self._is_valid_requirements_file('requirements.txt'): self.filenames.append('requirements.txt') if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover self.filenames.append('requirements.pip') if os.path.isdir('requirements'): for filename in os.listdir('requirements'): file_path = os.path.join('requirements', filename) if self._is_valid_requirements_file(file_path): self.filenames.append(file_path) self._check_inclusions_recursively()
def autodetect_files(self): """ Attempt to detect requirements files in the current working directory """ if self._is_valid_requirements_file('requirements.txt'): self.filenames.append('requirements.txt') if self._is_valid_requirements_file('requirements.pip'): # pragma: nocover self.filenames.append('requirements.pip') if os.path.isdir('requirements'): for filename in os.listdir('requirements'): file_path = os.path.join('requirements', filename) if self._is_valid_requirements_file(file_path): self.filenames.append(file_path) self._check_inclusions_recursively()
[ "Attempt", "to", "detect", "requirements", "files", "in", "the", "current", "working", "directory" ]
simion/pip-upgrader
python
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/requirements_detector.py#L32-L45
[ "def", "autodetect_files", "(", "self", ")", ":", "if", "self", ".", "_is_valid_requirements_file", "(", "'requirements.txt'", ")", ":", "self", ".", "filenames", ".", "append", "(", "'requirements.txt'", ")", "if", "self", ".", "_is_valid_requirements_file", "(", "'requirements.pip'", ")", ":", "# pragma: nocover", "self", ".", "filenames", ".", "append", "(", "'requirements.pip'", ")", "if", "os", ".", "path", ".", "isdir", "(", "'requirements'", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "'requirements'", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "'requirements'", ",", "filename", ")", "if", "self", ".", "_is_valid_requirements_file", "(", "file_path", ")", ":", "self", ".", "filenames", ".", "append", "(", "file_path", ")", "self", ".", "_check_inclusions_recursively", "(", ")" ]
716adca65d9ed56d4d416f94ede8a8e4fa8d640a
test
resolve_streams
Resolve all streams on the network. This function returns all currently available streams from any outlet on the network. The network is usually the subnet specified at the local router, but may also include a group of machines visible to each other via multicast packets (given that the network supports it), or list of hostnames. These details may optionally be customized by the experimenter in a configuration file (see Network Connectivity in the LSL wiki). Keyword arguments: wait_time -- The waiting time for the operation, in seconds, to search for streams. Warning: If this is too short (<0.5s) only a subset (or none) of the outlets that are present on the network may be returned. (default 1.0) Returns a list of StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. The full description can be retrieved from the inlet.
pylsl/pylsl.py
def resolve_streams(wait_time=1.0): """Resolve all streams on the network. This function returns all currently available streams from any outlet on the network. The network is usually the subnet specified at the local router, but may also include a group of machines visible to each other via multicast packets (given that the network supports it), or list of hostnames. These details may optionally be customized by the experimenter in a configuration file (see Network Connectivity in the LSL wiki). Keyword arguments: wait_time -- The waiting time for the operation, in seconds, to search for streams. Warning: If this is too short (<0.5s) only a subset (or none) of the outlets that are present on the network may be returned. (default 1.0) Returns a list of StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. The full description can be retrieved from the inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_streams(wait_time=1.0): """Resolve all streams on the network. This function returns all currently available streams from any outlet on the network. The network is usually the subnet specified at the local router, but may also include a group of machines visible to each other via multicast packets (given that the network supports it), or list of hostnames. These details may optionally be customized by the experimenter in a configuration file (see Network Connectivity in the LSL wiki). Keyword arguments: wait_time -- The waiting time for the operation, in seconds, to search for streams. Warning: If this is too short (<0.5s) only a subset (or none) of the outlets that are present on the network may be returned. (default 1.0) Returns a list of StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. The full description can be retrieved from the inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
[ "Resolve", "all", "streams", "on", "the", "network", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L519-L543
[ "def", "resolve_streams", "(", "wait_time", "=", "1.0", ")", ":", "# noinspection PyCallingNonCallable", "buffer", "=", "(", "c_void_p", "*", "1024", ")", "(", ")", "num_found", "=", "lib", ".", "lsl_resolve_all", "(", "byref", "(", "buffer", ")", ",", "1024", ",", "c_double", "(", "wait_time", ")", ")", "return", "[", "StreamInfo", "(", "handle", "=", "buffer", "[", "k", "]", ")", "for", "k", "in", "range", "(", "num_found", ")", "]" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
resolve_byprop
Resolve all streams with a specific value for a given property. If the goal is to resolve a specific stream, this method is preferred over resolving all streams and then selecting the desired one. Keyword arguments: prop -- The StreamInfo property that should have a specific value (e.g., "name", "type", "source_id", or "desc/manufaturer"). value -- The string value that the property should have (e.g., "EEG" as the type property). minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. Example: results = resolve_Stream_byprop("type","EEG")
pylsl/pylsl.py
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER): """Resolve all streams with a specific value for a given property. If the goal is to resolve a specific stream, this method is preferred over resolving all streams and then selecting the desired one. Keyword arguments: prop -- The StreamInfo property that should have a specific value (e.g., "name", "type", "source_id", or "desc/manufaturer"). value -- The string value that the property should have (e.g., "EEG" as the type property). minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. Example: results = resolve_Stream_byprop("type","EEG") """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_byprop(byref(buffer), 1024, c_char_p(str.encode(prop)), c_char_p(str.encode(value)), minimum, c_double(timeout)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER): """Resolve all streams with a specific value for a given property. If the goal is to resolve a specific stream, this method is preferred over resolving all streams and then selecting the desired one. Keyword arguments: prop -- The StreamInfo property that should have a specific value (e.g., "name", "type", "source_id", or "desc/manufaturer"). value -- The string value that the property should have (e.g., "EEG" as the type property). minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. Example: results = resolve_Stream_byprop("type","EEG") """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_byprop(byref(buffer), 1024, c_char_p(str.encode(prop)), c_char_p(str.encode(value)), minimum, c_double(timeout)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
[ "Resolve", "all", "streams", "with", "a", "specific", "value", "for", "a", "given", "property", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L546-L575
[ "def", "resolve_byprop", "(", "prop", ",", "value", ",", "minimum", "=", "1", ",", "timeout", "=", "FOREVER", ")", ":", "# noinspection PyCallingNonCallable", "buffer", "=", "(", "c_void_p", "*", "1024", ")", "(", ")", "num_found", "=", "lib", ".", "lsl_resolve_byprop", "(", "byref", "(", "buffer", ")", ",", "1024", ",", "c_char_p", "(", "str", ".", "encode", "(", "prop", ")", ")", ",", "c_char_p", "(", "str", ".", "encode", "(", "value", ")", ")", ",", "minimum", ",", "c_double", "(", "timeout", ")", ")", "return", "[", "StreamInfo", "(", "handle", "=", "buffer", "[", "k", "]", ")", "for", "k", "in", "range", "(", "num_found", ")", "]" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
resolve_bypred
Resolve all streams that match a given predicate. Advanced query that allows to impose more conditions on the retrieved streams; the given string is an XPath 1.0 predicate for the <description> node (omitting the surrounding []'s), see also http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951. Keyword arguments: predicate -- The predicate string, e.g. "name='BioSemi'" or "type='EEG' and starts-with(name,'BioSemi') and count(description/desc/channels/channel)=32" minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet.
pylsl/pylsl.py
def resolve_bypred(predicate, minimum=1, timeout=FOREVER): """Resolve all streams that match a given predicate. Advanced query that allows to impose more conditions on the retrieved streams; the given string is an XPath 1.0 predicate for the <description> node (omitting the surrounding []'s), see also http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951. Keyword arguments: predicate -- The predicate string, e.g. "name='BioSemi'" or "type='EEG' and starts-with(name,'BioSemi') and count(description/desc/channels/channel)=32" minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_bypred(byref(buffer), 1024, c_char_p(str.encode(predicate)), minimum, c_double(timeout)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_bypred(predicate, minimum=1, timeout=FOREVER): """Resolve all streams that match a given predicate. Advanced query that allows to impose more conditions on the retrieved streams; the given string is an XPath 1.0 predicate for the <description> node (omitting the surrounding []'s), see also http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951. Keyword arguments: predicate -- The predicate string, e.g. "name='BioSemi'" or "type='EEG' and starts-with(name,'BioSemi') and count(description/desc/channels/channel)=32" minimum -- Return at least this many streams. (default 1) timeout -- Optionally a timeout of the operation, in seconds. If the timeout expires, less than the desired number of streams (possibly none) will be returned. (default FOREVER) Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolve_bypred(byref(buffer), 1024, c_char_p(str.encode(predicate)), minimum, c_double(timeout)) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
[ "Resolve", "all", "streams", "that", "match", "a", "given", "predicate", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L578-L605
[ "def", "resolve_bypred", "(", "predicate", ",", "minimum", "=", "1", ",", "timeout", "=", "FOREVER", ")", ":", "# noinspection PyCallingNonCallable", "buffer", "=", "(", "c_void_p", "*", "1024", ")", "(", ")", "num_found", "=", "lib", ".", "lsl_resolve_bypred", "(", "byref", "(", "buffer", ")", ",", "1024", ",", "c_char_p", "(", "str", ".", "encode", "(", "predicate", ")", ")", ",", "minimum", ",", "c_double", "(", "timeout", ")", ")", "return", "[", "StreamInfo", "(", "handle", "=", "buffer", "[", "k", "]", ")", "for", "k", "in", "range", "(", "num_found", ")", "]" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
handle_error
Error handler function. Translates an error code into an exception.
pylsl/pylsl.py
def handle_error(errcode): """Error handler function. Translates an error code into an exception.""" if type(errcode) is c_int: errcode = errcode.value if errcode == 0: pass # no error elif errcode == -1: raise TimeoutError("the operation failed due to a timeout.") elif errcode == -2: raise LostError("the stream has been lost.") elif errcode == -3: raise InvalidArgumentError("an argument was incorrectly specified.") elif errcode == -4: raise InternalError("an internal error has occurred.") elif errcode < 0: raise RuntimeError("an unknown error has occurred.")
def handle_error(errcode): """Error handler function. Translates an error code into an exception.""" if type(errcode) is c_int: errcode = errcode.value if errcode == 0: pass # no error elif errcode == -1: raise TimeoutError("the operation failed due to a timeout.") elif errcode == -2: raise LostError("the stream has been lost.") elif errcode == -3: raise InvalidArgumentError("an argument was incorrectly specified.") elif errcode == -4: raise InternalError("an internal error has occurred.") elif errcode < 0: raise RuntimeError("an unknown error has occurred.")
[ "Error", "handler", "function", ".", "Translates", "an", "error", "code", "into", "an", "exception", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1129-L1144
[ "def", "handle_error", "(", "errcode", ")", ":", "if", "type", "(", "errcode", ")", "is", "c_int", ":", "errcode", "=", "errcode", ".", "value", "if", "errcode", "==", "0", ":", "pass", "# no error", "elif", "errcode", "==", "-", "1", ":", "raise", "TimeoutError", "(", "\"the operation failed due to a timeout.\"", ")", "elif", "errcode", "==", "-", "2", ":", "raise", "LostError", "(", "\"the stream has been lost.\"", ")", "elif", "errcode", "==", "-", "3", ":", "raise", "InvalidArgumentError", "(", "\"an argument was incorrectly specified.\"", ")", "elif", "errcode", "==", "-", "4", ":", "raise", "InternalError", "(", "\"an internal error has occurred.\"", ")", "elif", "errcode", "<", "0", ":", "raise", "RuntimeError", "(", "\"an unknown error has occurred.\"", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamOutlet.push_sample
Push a sample into the outlet. Each entry in the list corresponds to one channel. Keyword arguments: x -- A list of values to push (one per channel). timestamp -- Optionally the capture time of the sample, in agreement with local_clock(); if omitted, the current time is used. (default 0.0) pushthrough -- Whether to push the sample through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True)
pylsl/pylsl.py
def push_sample(self, x, timestamp=0.0, pushthrough=True): """Push a sample into the outlet. Each entry in the list corresponds to one channel. Keyword arguments: x -- A list of values to push (one per channel). timestamp -- Optionally the capture time of the sample, in agreement with local_clock(); if omitted, the current time is used. (default 0.0) pushthrough -- Whether to push the sample through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ if len(x) == self.channel_count: if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] handle_error(self.do_push_sample(self.obj, self.sample_type(*x), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("length of the data must correspond to the " "stream's channel count.")
def push_sample(self, x, timestamp=0.0, pushthrough=True): """Push a sample into the outlet. Each entry in the list corresponds to one channel. Keyword arguments: x -- A list of values to push (one per channel). timestamp -- Optionally the capture time of the sample, in agreement with local_clock(); if omitted, the current time is used. (default 0.0) pushthrough -- Whether to push the sample through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ if len(x) == self.channel_count: if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] handle_error(self.do_push_sample(self.obj, self.sample_type(*x), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("length of the data must correspond to the " "stream's channel count.")
[ "Push", "a", "sample", "into", "the", "outlet", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L430-L455
[ "def", "push_sample", "(", "self", ",", "x", ",", "timestamp", "=", "0.0", ",", "pushthrough", "=", "True", ")", ":", "if", "len", "(", "x", ")", "==", "self", ".", "channel_count", ":", "if", "self", ".", "channel_format", "==", "cf_string", ":", "x", "=", "[", "v", ".", "encode", "(", "'utf-8'", ")", "for", "v", "in", "x", "]", "handle_error", "(", "self", ".", "do_push_sample", "(", "self", ".", "obj", ",", "self", ".", "sample_type", "(", "*", "x", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"length of the data must correspond to the \"", "\"stream's channel count.\"", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamOutlet.push_chunk
Push a list of samples into the outlet. samples -- A list of samples, either as a list of lists or a list of multiplexed values. timestamp -- Optionally the capture time of the most recent sample, in agreement with local_clock(); if omitted, the current time is used. The time stamps of other samples are automatically derived according to the sampling rate of the stream. (default 0.0) pushthrough Whether to push the chunk through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True)
pylsl/pylsl.py
def push_chunk(self, x, timestamp=0.0, pushthrough=True): """Push a list of samples into the outlet. samples -- A list of samples, either as a list of lists or a list of multiplexed values. timestamp -- Optionally the capture time of the most recent sample, in agreement with local_clock(); if omitted, the current time is used. The time stamps of other samples are automatically derived according to the sampling rate of the stream. (default 0.0) pushthrough Whether to push the chunk through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ try: n_values = self.channel_count * len(x) data_buff = (self.value_type * n_values).from_buffer(x) handle_error(self.do_push_chunk(self.obj, data_buff, c_long(n_values), c_double(timestamp), c_int(pushthrough))) except TypeError: if len(x): if type(x[0]) is list: x = [v for sample in x for v in sample] if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] if len(x) % self.channel_count == 0: constructor = self.value_type*len(x) # noinspection PyCallingNonCallable handle_error(self.do_push_chunk(self.obj, constructor(*x), c_long(len(x)), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("each sample must have the same number of " "channels.")
def push_chunk(self, x, timestamp=0.0, pushthrough=True): """Push a list of samples into the outlet. samples -- A list of samples, either as a list of lists or a list of multiplexed values. timestamp -- Optionally the capture time of the most recent sample, in agreement with local_clock(); if omitted, the current time is used. The time stamps of other samples are automatically derived according to the sampling rate of the stream. (default 0.0) pushthrough Whether to push the chunk through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ try: n_values = self.channel_count * len(x) data_buff = (self.value_type * n_values).from_buffer(x) handle_error(self.do_push_chunk(self.obj, data_buff, c_long(n_values), c_double(timestamp), c_int(pushthrough))) except TypeError: if len(x): if type(x[0]) is list: x = [v for sample in x for v in sample] if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] if len(x) % self.channel_count == 0: constructor = self.value_type*len(x) # noinspection PyCallingNonCallable handle_error(self.do_push_chunk(self.obj, constructor(*x), c_long(len(x)), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("each sample must have the same number of " "channels.")
[ "Push", "a", "list", "of", "samples", "into", "the", "outlet", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L457-L495
[ "def", "push_chunk", "(", "self", ",", "x", ",", "timestamp", "=", "0.0", ",", "pushthrough", "=", "True", ")", ":", "try", ":", "n_values", "=", "self", ".", "channel_count", "*", "len", "(", "x", ")", "data_buff", "=", "(", "self", ".", "value_type", "*", "n_values", ")", ".", "from_buffer", "(", "x", ")", "handle_error", "(", "self", ".", "do_push_chunk", "(", "self", ".", "obj", ",", "data_buff", ",", "c_long", "(", "n_values", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "except", "TypeError", ":", "if", "len", "(", "x", ")", ":", "if", "type", "(", "x", "[", "0", "]", ")", "is", "list", ":", "x", "=", "[", "v", "for", "sample", "in", "x", "for", "v", "in", "sample", "]", "if", "self", ".", "channel_format", "==", "cf_string", ":", "x", "=", "[", "v", ".", "encode", "(", "'utf-8'", ")", "for", "v", "in", "x", "]", "if", "len", "(", "x", ")", "%", "self", ".", "channel_count", "==", "0", ":", "constructor", "=", "self", ".", "value_type", "*", "len", "(", "x", ")", "# noinspection PyCallingNonCallable", "handle_error", "(", "self", ".", "do_push_chunk", "(", "self", ".", "obj", ",", "constructor", "(", "*", "x", ")", ",", "c_long", "(", "len", "(", "x", ")", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"each sample must have the same number of \"", "\"channels.\"", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamOutlet.wait_for_consumers
Wait until some consumer shows up (without wasting resources). Returns True if the wait was successful, False if the timeout expired.
pylsl/pylsl.py
def wait_for_consumers(self, timeout): """Wait until some consumer shows up (without wasting resources). Returns True if the wait was successful, False if the timeout expired. """ return bool(lib.lsl_wait_for_consumers(self.obj, c_double(timeout)))
def wait_for_consumers(self, timeout): """Wait until some consumer shows up (without wasting resources). Returns True if the wait was successful, False if the timeout expired. """ return bool(lib.lsl_wait_for_consumers(self.obj, c_double(timeout)))
[ "Wait", "until", "some", "consumer", "shows", "up", "(", "without", "wasting", "resources", ")", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L506-L512
[ "def", "wait_for_consumers", "(", "self", ",", "timeout", ")", ":", "return", "bool", "(", "lib", ".", "lsl_wait_for_consumers", "(", "self", ".", "obj", ",", "c_double", "(", "timeout", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamInlet.info
Retrieve the complete information of the given stream. This includes the extended description. Can be invoked at any time of the stream's lifetime. Keyword arguments: timeout -- Timeout of the operation. (default FOREVER) Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost).
pylsl/pylsl.py
def info(self, timeout=FOREVER): """Retrieve the complete information of the given stream. This includes the extended description. Can be invoked at any time of the stream's lifetime. Keyword arguments: timeout -- Timeout of the operation. (default FOREVER) Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() result = lib.lsl_get_fullinfo(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode) return StreamInfo(handle=result)
def info(self, timeout=FOREVER): """Retrieve the complete information of the given stream. This includes the extended description. Can be invoked at any time of the stream's lifetime. Keyword arguments: timeout -- Timeout of the operation. (default FOREVER) Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() result = lib.lsl_get_fullinfo(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode) return StreamInfo(handle=result)
[ "Retrieve", "the", "complete", "information", "of", "the", "given", "stream", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L688-L705
[ "def", "info", "(", "self", ",", "timeout", "=", "FOREVER", ")", ":", "errcode", "=", "c_int", "(", ")", "result", "=", "lib", ".", "lsl_get_fullinfo", "(", "self", ".", "obj", ",", "c_double", "(", "timeout", ")", ",", "byref", "(", "errcode", ")", ")", "handle_error", "(", "errcode", ")", "return", "StreamInfo", "(", "handle", "=", "result", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamInlet.open_stream
Subscribe to the data stream. All samples pushed in at the other end from this moment onwards will be queued and eventually be delivered in response to pull_sample() or pull_chunk() calls. Pulling a sample without some preceding open_stream is permitted (the stream will then be opened implicitly). Keyword arguments: timeout -- Optional timeout of the operation (default FOREVER). Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost).
pylsl/pylsl.py
def open_stream(self, timeout=FOREVER): """Subscribe to the data stream. All samples pushed in at the other end from this moment onwards will be queued and eventually be delivered in response to pull_sample() or pull_chunk() calls. Pulling a sample without some preceding open_stream is permitted (the stream will then be opened implicitly). Keyword arguments: timeout -- Optional timeout of the operation (default FOREVER). Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode)
def open_stream(self, timeout=FOREVER): """Subscribe to the data stream. All samples pushed in at the other end from this moment onwards will be queued and eventually be delivered in response to pull_sample() or pull_chunk() calls. Pulling a sample without some preceding open_stream is permitted (the stream will then be opened implicitly). Keyword arguments: timeout -- Optional timeout of the operation (default FOREVER). Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode)
[ "Subscribe", "to", "the", "data", "stream", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L707-L724
[ "def", "open_stream", "(", "self", ",", "timeout", "=", "FOREVER", ")", ":", "errcode", "=", "c_int", "(", ")", "lib", ".", "lsl_open_stream", "(", "self", ".", "obj", ",", "c_double", "(", "timeout", ")", ",", "byref", "(", "errcode", ")", ")", "handle_error", "(", "errcode", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamInlet.time_correction
Retrieve an estimated time correction offset for the given stream. The first call to this function takes several miliseconds until a reliable first estimate is obtained. Subsequent calls are instantaneous (and rely on periodic background updates). The precision of these estimates should be below 1 ms (empirically within +/-0.2 ms). Keyword arguments: timeout -- Timeout to acquire the first time-correction estimate (default FOREVER). Returns the current time correction estimate. This is the number that needs to be added to a time stamp that was remotely generated via local_clock() to map it into the local clock domain of this machine. Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost).
pylsl/pylsl.py
def time_correction(self, timeout=FOREVER): """Retrieve an estimated time correction offset for the given stream. The first call to this function takes several miliseconds until a reliable first estimate is obtained. Subsequent calls are instantaneous (and rely on periodic background updates). The precision of these estimates should be below 1 ms (empirically within +/-0.2 ms). Keyword arguments: timeout -- Timeout to acquire the first time-correction estimate (default FOREVER). Returns the current time correction estimate. This is the number that needs to be added to a time stamp that was remotely generated via local_clock() to map it into the local clock domain of this machine. Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() result = lib.lsl_time_correction(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode) return result
def time_correction(self, timeout=FOREVER): """Retrieve an estimated time correction offset for the given stream. The first call to this function takes several miliseconds until a reliable first estimate is obtained. Subsequent calls are instantaneous (and rely on periodic background updates). The precision of these estimates should be below 1 ms (empirically within +/-0.2 ms). Keyword arguments: timeout -- Timeout to acquire the first time-correction estimate (default FOREVER). Returns the current time correction estimate. This is the number that needs to be added to a time stamp that was remotely generated via local_clock() to map it into the local clock domain of this machine. Throws a TimeoutError (if the timeout expires), or LostError (if the stream source has been lost). """ errcode = c_int() result = lib.lsl_time_correction(self.obj, c_double(timeout), byref(errcode)) handle_error(errcode) return result
[ "Retrieve", "an", "estimated", "time", "correction", "offset", "for", "the", "given", "stream", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L739-L764
[ "def", "time_correction", "(", "self", ",", "timeout", "=", "FOREVER", ")", ":", "errcode", "=", "c_int", "(", ")", "result", "=", "lib", ".", "lsl_time_correction", "(", "self", ".", "obj", ",", "c_double", "(", "timeout", ")", ",", "byref", "(", "errcode", ")", ")", "handle_error", "(", "errcode", ")", "return", "result" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamInlet.pull_sample
Pull a sample from the inlet and return it. Keyword arguments: timeout -- The timeout for this operation, if any. (default FOREVER) If this is passed as 0.0, then the function returns only a sample if one is buffered for immediate pickup. Returns a tuple (sample,timestamp) where sample is a list of channel values and timestamp is the capture time of the sample on the remote machine, or (None,None) if no new sample was available. To remap this time stamp to the local clock, add the value returned by .time_correction() to it. Throws a LostError if the stream source has been lost. Note that, if the timeout expires, no TimeoutError is thrown (because this case is not considered an error).
pylsl/pylsl.py
def pull_sample(self, timeout=FOREVER, sample=None): """Pull a sample from the inlet and return it. Keyword arguments: timeout -- The timeout for this operation, if any. (default FOREVER) If this is passed as 0.0, then the function returns only a sample if one is buffered for immediate pickup. Returns a tuple (sample,timestamp) where sample is a list of channel values and timestamp is the capture time of the sample on the remote machine, or (None,None) if no new sample was available. To remap this time stamp to the local clock, add the value returned by .time_correction() to it. Throws a LostError if the stream source has been lost. Note that, if the timeout expires, no TimeoutError is thrown (because this case is not considered an error). """ # support for the legacy API if type(timeout) is list: assign_to = timeout timeout = sample if type(sample) is float else 0.0 else: assign_to = None errcode = c_int() timestamp = self.do_pull_sample(self.obj, byref(self.sample), self.channel_count, c_double(timeout), byref(errcode)) handle_error(errcode) if timestamp: sample = [v for v in self.sample] if self.channel_format == cf_string: sample = [v.decode('utf-8') for v in sample] if assign_to is not None: assign_to[:] = sample return sample, timestamp else: return None, None
def pull_sample(self, timeout=FOREVER, sample=None): """Pull a sample from the inlet and return it. Keyword arguments: timeout -- The timeout for this operation, if any. (default FOREVER) If this is passed as 0.0, then the function returns only a sample if one is buffered for immediate pickup. Returns a tuple (sample,timestamp) where sample is a list of channel values and timestamp is the capture time of the sample on the remote machine, or (None,None) if no new sample was available. To remap this time stamp to the local clock, add the value returned by .time_correction() to it. Throws a LostError if the stream source has been lost. Note that, if the timeout expires, no TimeoutError is thrown (because this case is not considered an error). """ # support for the legacy API if type(timeout) is list: assign_to = timeout timeout = sample if type(sample) is float else 0.0 else: assign_to = None errcode = c_int() timestamp = self.do_pull_sample(self.obj, byref(self.sample), self.channel_count, c_double(timeout), byref(errcode)) handle_error(errcode) if timestamp: sample = [v for v in self.sample] if self.channel_format == cf_string: sample = [v.decode('utf-8') for v in sample] if assign_to is not None: assign_to[:] = sample return sample, timestamp else: return None, None
[ "Pull", "a", "sample", "from", "the", "inlet", "and", "return", "it", ".", "Keyword", "arguments", ":", "timeout", "--", "The", "timeout", "for", "this", "operation", "if", "any", ".", "(", "default", "FOREVER", ")", "If", "this", "is", "passed", "as", "0", ".", "0", "then", "the", "function", "returns", "only", "a", "sample", "if", "one", "is", "buffered", "for", "immediate", "pickup", ".", "Returns", "a", "tuple", "(", "sample", "timestamp", ")", "where", "sample", "is", "a", "list", "of", "channel", "values", "and", "timestamp", "is", "the", "capture", "time", "of", "the", "sample", "on", "the", "remote", "machine", "or", "(", "None", "None", ")", "if", "no", "new", "sample", "was", "available", ".", "To", "remap", "this", "time", "stamp", "to", "the", "local", "clock", "add", "the", "value", "returned", "by", ".", "time_correction", "()", "to", "it", ".", "Throws", "a", "LostError", "if", "the", "stream", "source", "has", "been", "lost", ".", "Note", "that", "if", "the", "timeout", "expires", "no", "TimeoutError", "is", "thrown", "(", "because", "this", "case", "is", "not", "considered", "an", "error", ")", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L766-L806
[ "def", "pull_sample", "(", "self", ",", "timeout", "=", "FOREVER", ",", "sample", "=", "None", ")", ":", "# support for the legacy API", "if", "type", "(", "timeout", ")", "is", "list", ":", "assign_to", "=", "timeout", "timeout", "=", "sample", "if", "type", "(", "sample", ")", "is", "float", "else", "0.0", "else", ":", "assign_to", "=", "None", "errcode", "=", "c_int", "(", ")", "timestamp", "=", "self", ".", "do_pull_sample", "(", "self", ".", "obj", ",", "byref", "(", "self", ".", "sample", ")", ",", "self", ".", "channel_count", ",", "c_double", "(", "timeout", ")", ",", "byref", "(", "errcode", ")", ")", "handle_error", "(", "errcode", ")", "if", "timestamp", ":", "sample", "=", "[", "v", "for", "v", "in", "self", ".", "sample", "]", "if", "self", ".", "channel_format", "==", "cf_string", ":", "sample", "=", "[", "v", ".", "decode", "(", "'utf-8'", ")", "for", "v", "in", "sample", "]", "if", "assign_to", "is", "not", "None", ":", "assign_to", "[", ":", "]", "=", "sample", "return", "sample", ",", "timestamp", "else", ":", "return", "None", ",", "None" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
StreamInlet.pull_chunk
Pull a chunk of samples from the inlet. Keyword arguments: timeout -- The timeout of the operation; if passed as 0.0, then only samples available for immediate pickup will be returned. (default 0.0) max_samples -- Maximum number of samples to return. (default 1024) dest_obj -- A Python object that supports the buffer interface. If this is provided then the dest_obj will be updated in place and the samples list returned by this method will be empty. It is up to the caller to trim the buffer to the appropriate number of samples. A numpy buffer must be order='C' (default None) Returns a tuple (samples,timestamps) where samples is a list of samples (each itself a list of values), and timestamps is a list of time-stamps. Throws a LostError if the stream source has been lost.
pylsl/pylsl.py
def pull_chunk(self, timeout=0.0, max_samples=1024, dest_obj=None): """Pull a chunk of samples from the inlet. Keyword arguments: timeout -- The timeout of the operation; if passed as 0.0, then only samples available for immediate pickup will be returned. (default 0.0) max_samples -- Maximum number of samples to return. (default 1024) dest_obj -- A Python object that supports the buffer interface. If this is provided then the dest_obj will be updated in place and the samples list returned by this method will be empty. It is up to the caller to trim the buffer to the appropriate number of samples. A numpy buffer must be order='C' (default None) Returns a tuple (samples,timestamps) where samples is a list of samples (each itself a list of values), and timestamps is a list of time-stamps. Throws a LostError if the stream source has been lost. """ # look up a pre-allocated buffer of appropriate length num_channels = self.channel_count max_values = max_samples*num_channels if max_samples not in self.buffers: # noinspection PyCallingNonCallable self.buffers[max_samples] = ((self.value_type*max_values)(), (c_double*max_samples)()) if dest_obj is not None: data_buff = (self.value_type * max_values).from_buffer(dest_obj) else: data_buff = self.buffers[max_samples][0] ts_buff = self.buffers[max_samples][1] # read data into it errcode = c_int() # noinspection PyCallingNonCallable num_elements = self.do_pull_chunk(self.obj, byref(data_buff), byref(ts_buff), max_values, max_samples, c_double(timeout), byref(errcode)) handle_error(errcode) # return results (note: could offer a more efficient format in the # future, e.g., a numpy array) num_samples = num_elements/num_channels if dest_obj is None: samples = [[data_buff[s*num_channels+c] for c in range(num_channels)] for s in range(int(num_samples))] if self.channel_format == cf_string: samples = [[v.decode('utf-8') for v in s] for s in samples] free_char_p_array_memory(data_buff, max_values) else: samples = None timestamps = [ts_buff[s] for s in range(int(num_samples))] return samples, timestamps
def pull_chunk(self, timeout=0.0, max_samples=1024, dest_obj=None): """Pull a chunk of samples from the inlet. Keyword arguments: timeout -- The timeout of the operation; if passed as 0.0, then only samples available for immediate pickup will be returned. (default 0.0) max_samples -- Maximum number of samples to return. (default 1024) dest_obj -- A Python object that supports the buffer interface. If this is provided then the dest_obj will be updated in place and the samples list returned by this method will be empty. It is up to the caller to trim the buffer to the appropriate number of samples. A numpy buffer must be order='C' (default None) Returns a tuple (samples,timestamps) where samples is a list of samples (each itself a list of values), and timestamps is a list of time-stamps. Throws a LostError if the stream source has been lost. """ # look up a pre-allocated buffer of appropriate length num_channels = self.channel_count max_values = max_samples*num_channels if max_samples not in self.buffers: # noinspection PyCallingNonCallable self.buffers[max_samples] = ((self.value_type*max_values)(), (c_double*max_samples)()) if dest_obj is not None: data_buff = (self.value_type * max_values).from_buffer(dest_obj) else: data_buff = self.buffers[max_samples][0] ts_buff = self.buffers[max_samples][1] # read data into it errcode = c_int() # noinspection PyCallingNonCallable num_elements = self.do_pull_chunk(self.obj, byref(data_buff), byref(ts_buff), max_values, max_samples, c_double(timeout), byref(errcode)) handle_error(errcode) # return results (note: could offer a more efficient format in the # future, e.g., a numpy array) num_samples = num_elements/num_channels if dest_obj is None: samples = [[data_buff[s*num_channels+c] for c in range(num_channels)] for s in range(int(num_samples))] if self.channel_format == cf_string: samples = [[v.decode('utf-8') for v in s] for s in samples] free_char_p_array_memory(data_buff, max_values) else: samples = None timestamps = [ts_buff[s] for s in range(int(num_samples))] return samples, timestamps
[ "Pull", "a", "chunk", "of", "samples", "from", "the", "inlet", ".", "Keyword", "arguments", ":", "timeout", "--", "The", "timeout", "of", "the", "operation", ";", "if", "passed", "as", "0", ".", "0", "then", "only", "samples", "available", "for", "immediate", "pickup", "will", "be", "returned", ".", "(", "default", "0", ".", "0", ")", "max_samples", "--", "Maximum", "number", "of", "samples", "to", "return", ".", "(", "default", "1024", ")", "dest_obj", "--", "A", "Python", "object", "that", "supports", "the", "buffer", "interface", ".", "If", "this", "is", "provided", "then", "the", "dest_obj", "will", "be", "updated", "in", "place", "and", "the", "samples", "list", "returned", "by", "this", "method", "will", "be", "empty", ".", "It", "is", "up", "to", "the", "caller", "to", "trim", "the", "buffer", "to", "the", "appropriate", "number", "of", "samples", ".", "A", "numpy", "buffer", "must", "be", "order", "=", "C", "(", "default", "None", ")", "Returns", "a", "tuple", "(", "samples", "timestamps", ")", "where", "samples", "is", "a", "list", "of", "samples", "(", "each", "itself", "a", "list", "of", "values", ")", "and", "timestamps", "is", "a", "list", "of", "time", "-", "stamps", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L808-L865
[ "def", "pull_chunk", "(", "self", ",", "timeout", "=", "0.0", ",", "max_samples", "=", "1024", ",", "dest_obj", "=", "None", ")", ":", "# look up a pre-allocated buffer of appropriate length ", "num_channels", "=", "self", ".", "channel_count", "max_values", "=", "max_samples", "*", "num_channels", "if", "max_samples", "not", "in", "self", ".", "buffers", ":", "# noinspection PyCallingNonCallable", "self", ".", "buffers", "[", "max_samples", "]", "=", "(", "(", "self", ".", "value_type", "*", "max_values", ")", "(", ")", ",", "(", "c_double", "*", "max_samples", ")", "(", ")", ")", "if", "dest_obj", "is", "not", "None", ":", "data_buff", "=", "(", "self", ".", "value_type", "*", "max_values", ")", ".", "from_buffer", "(", "dest_obj", ")", "else", ":", "data_buff", "=", "self", ".", "buffers", "[", "max_samples", "]", "[", "0", "]", "ts_buff", "=", "self", ".", "buffers", "[", "max_samples", "]", "[", "1", "]", "# read data into it", "errcode", "=", "c_int", "(", ")", "# noinspection PyCallingNonCallable", "num_elements", "=", "self", ".", "do_pull_chunk", "(", "self", ".", "obj", ",", "byref", "(", "data_buff", ")", ",", "byref", "(", "ts_buff", ")", ",", "max_values", ",", "max_samples", ",", "c_double", "(", "timeout", ")", ",", "byref", "(", "errcode", ")", ")", "handle_error", "(", "errcode", ")", "# return results (note: could offer a more efficient format in the ", "# future, e.g., a numpy array)", "num_samples", "=", "num_elements", "/", "num_channels", "if", "dest_obj", "is", "None", ":", "samples", "=", "[", "[", "data_buff", "[", "s", "*", "num_channels", "+", "c", "]", "for", "c", "in", "range", "(", "num_channels", ")", "]", "for", "s", "in", "range", "(", "int", "(", "num_samples", ")", ")", "]", "if", "self", ".", "channel_format", "==", "cf_string", ":", "samples", "=", "[", "[", "v", ".", "decode", "(", "'utf-8'", ")", "for", "v", "in", "s", "]", "for", "s", "in", "samples", "]", "free_char_p_array_memory", "(", "data_buff", ",", "max_values", ")", "else", ":", "samples", "=", "None", "timestamps", "=", "[", "ts_buff", "[", "s", "]", "for", "s", "in", "range", "(", "int", "(", "num_samples", ")", ")", "]", "return", "samples", ",", "timestamps" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.child
Get a child with a specified name.
pylsl/pylsl.py
def child(self, name): """Get a child with a specified name.""" return XMLElement(lib.lsl_child(self.e, str.encode(name)))
def child(self, name): """Get a child with a specified name.""" return XMLElement(lib.lsl_child(self.e, str.encode(name)))
[ "Get", "a", "child", "with", "a", "specified", "name", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L920-L922
[ "def", "child", "(", "self", ",", "name", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_child", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.next_sibling
Get the next sibling in the children list of the parent node. If a name is provided, the next sibling with the given name is returned.
pylsl/pylsl.py
def next_sibling(self, name=None): """Get the next sibling in the children list of the parent node. If a name is provided, the next sibling with the given name is returned. """ if name is None: return XMLElement(lib.lsl_next_sibling(self.e)) else: return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name)))
def next_sibling(self, name=None): """Get the next sibling in the children list of the parent node. If a name is provided, the next sibling with the given name is returned. """ if name is None: return XMLElement(lib.lsl_next_sibling(self.e)) else: return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name)))
[ "Get", "the", "next", "sibling", "in", "the", "children", "list", "of", "the", "parent", "node", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L924-L933
[ "def", "next_sibling", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "return", "XMLElement", "(", "lib", ".", "lsl_next_sibling", "(", "self", ".", "e", ")", ")", "else", ":", "return", "XMLElement", "(", "lib", ".", "lsl_next_sibling_n", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.previous_sibling
Get the previous sibling in the children list of the parent node. If a name is provided, the previous sibling with the given name is returned.
pylsl/pylsl.py
def previous_sibling(self, name=None): """Get the previous sibling in the children list of the parent node. If a name is provided, the previous sibling with the given name is returned. """ if name is None: return XMLElement(lib.lsl_previous_sibling(self.e)) else: return XMLElement(lib.lsl_previous_sibling_n(self.e, str.encode(name)))
def previous_sibling(self, name=None): """Get the previous sibling in the children list of the parent node. If a name is provided, the previous sibling with the given name is returned. """ if name is None: return XMLElement(lib.lsl_previous_sibling(self.e)) else: return XMLElement(lib.lsl_previous_sibling_n(self.e, str.encode(name)))
[ "Get", "the", "previous", "sibling", "in", "the", "children", "list", "of", "the", "parent", "node", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L935-L946
[ "def", "previous_sibling", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "return", "XMLElement", "(", "lib", ".", "lsl_previous_sibling", "(", "self", ".", "e", ")", ")", "else", ":", "return", "XMLElement", "(", "lib", ".", "lsl_previous_sibling_n", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.child_value
Get child value (value of the first child that is text). If a name is provided, then the value of the first child with the given name is returned.
pylsl/pylsl.py
def child_value(self, name=None): """Get child value (value of the first child that is text). If a name is provided, then the value of the first child with the given name is returned. """ if name is None: res = lib.lsl_child_value(self.e) else: res = lib.lsl_child_value_n(self.e, str.encode(name)) return res.decode('utf-8')
def child_value(self, name=None): """Get child value (value of the first child that is text). If a name is provided, then the value of the first child with the given name is returned. """ if name is None: res = lib.lsl_child_value(self.e) else: res = lib.lsl_child_value_n(self.e, str.encode(name)) return res.decode('utf-8')
[ "Get", "child", "value", "(", "value", "of", "the", "first", "child", "that", "is", "text", ")", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L974-L985
[ "def", "child_value", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "res", "=", "lib", ".", "lsl_child_value", "(", "self", ".", "e", ")", "else", ":", "res", "=", "lib", ".", "lsl_child_value_n", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", "return", "res", ".", "decode", "(", "'utf-8'", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.append_child_value
Append a child node with a given name, which has a (nameless) plain-text child with the given text value.
pylsl/pylsl.py
def append_child_value(self, name, value): """Append a child node with a given name, which has a (nameless) plain-text child with the given text value.""" return XMLElement(lib.lsl_append_child_value(self.e, str.encode(name), str.encode(value)))
def append_child_value(self, name, value): """Append a child node with a given name, which has a (nameless) plain-text child with the given text value.""" return XMLElement(lib.lsl_append_child_value(self.e, str.encode(name), str.encode(value)))
[ "Append", "a", "child", "node", "with", "a", "given", "name", "which", "has", "a", "(", "nameless", ")", "plain", "-", "text", "child", "with", "the", "given", "text", "value", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L989-L994
[ "def", "append_child_value", "(", "self", ",", "name", ",", "value", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_append_child_value", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ",", "str", ".", "encode", "(", "value", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.prepend_child_value
Prepend a child node with a given name, which has a (nameless) plain-text child with the given text value.
pylsl/pylsl.py
def prepend_child_value(self, name, value): """Prepend a child node with a given name, which has a (nameless) plain-text child with the given text value.""" return XMLElement(lib.lsl_prepend_child_value(self.e, str.encode(name), str.encode(value)))
def prepend_child_value(self, name, value): """Prepend a child node with a given name, which has a (nameless) plain-text child with the given text value.""" return XMLElement(lib.lsl_prepend_child_value(self.e, str.encode(name), str.encode(value)))
[ "Prepend", "a", "child", "node", "with", "a", "given", "name", "which", "has", "a", "(", "nameless", ")", "plain", "-", "text", "child", "with", "the", "given", "text", "value", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L996-L1001
[ "def", "prepend_child_value", "(", "self", ",", "name", ",", "value", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_prepend_child_value", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ",", "str", ".", "encode", "(", "value", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.set_child_value
Set the text value of the (nameless) plain-text child of a named child node.
pylsl/pylsl.py
def set_child_value(self, name, value): """Set the text value of the (nameless) plain-text child of a named child node.""" return XMLElement(lib.lsl_set_child_value(self.e, str.encode(name), str.encode(value)))
def set_child_value(self, name, value): """Set the text value of the (nameless) plain-text child of a named child node.""" return XMLElement(lib.lsl_set_child_value(self.e, str.encode(name), str.encode(value)))
[ "Set", "the", "text", "value", "of", "the", "(", "nameless", ")", "plain", "-", "text", "child", "of", "a", "named", "child", "node", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1003-L1008
[ "def", "set_child_value", "(", "self", ",", "name", ",", "value", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_set_child_value", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ",", "str", ".", "encode", "(", "value", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.set_name
Set the element's name. Returns False if the node is empty.
pylsl/pylsl.py
def set_name(self, name): """Set the element's name. Returns False if the node is empty.""" return bool(lib.lsl_set_name(self.e, str.encode(name)))
def set_name(self, name): """Set the element's name. Returns False if the node is empty.""" return bool(lib.lsl_set_name(self.e, str.encode(name)))
[ "Set", "the", "element", "s", "name", ".", "Returns", "False", "if", "the", "node", "is", "empty", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1010-L1012
[ "def", "set_name", "(", "self", ",", "name", ")", ":", "return", "bool", "(", "lib", ".", "lsl_set_name", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.set_value
Set the element's value. Returns False if the node is empty.
pylsl/pylsl.py
def set_value(self, value): """Set the element's value. Returns False if the node is empty.""" return bool(lib.lsl_set_value(self.e, str.encode(value)))
def set_value(self, value): """Set the element's value. Returns False if the node is empty.""" return bool(lib.lsl_set_value(self.e, str.encode(value)))
[ "Set", "the", "element", "s", "value", ".", "Returns", "False", "if", "the", "node", "is", "empty", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1014-L1016
[ "def", "set_value", "(", "self", ",", "value", ")", ":", "return", "bool", "(", "lib", ".", "lsl_set_value", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "value", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.append_child
Append a child element with the specified name.
pylsl/pylsl.py
def append_child(self, name): """Append a child element with the specified name.""" return XMLElement(lib.lsl_append_child(self.e, str.encode(name)))
def append_child(self, name): """Append a child element with the specified name.""" return XMLElement(lib.lsl_append_child(self.e, str.encode(name)))
[ "Append", "a", "child", "element", "with", "the", "specified", "name", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1018-L1020
[ "def", "append_child", "(", "self", ",", "name", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_append_child", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.prepend_child
Prepend a child element with the specified name.
pylsl/pylsl.py
def prepend_child(self, name): """Prepend a child element with the specified name.""" return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name)))
def prepend_child(self, name): """Prepend a child element with the specified name.""" return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name)))
[ "Prepend", "a", "child", "element", "with", "the", "specified", "name", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1022-L1024
[ "def", "prepend_child", "(", "self", ",", "name", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_prepend_child", "(", "self", ".", "e", ",", "str", ".", "encode", "(", "name", ")", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.append_copy
Append a copy of the specified element as a child.
pylsl/pylsl.py
def append_copy(self, elem): """Append a copy of the specified element as a child.""" return XMLElement(lib.lsl_append_copy(self.e, elem.e))
def append_copy(self, elem): """Append a copy of the specified element as a child.""" return XMLElement(lib.lsl_append_copy(self.e, elem.e))
[ "Append", "a", "copy", "of", "the", "specified", "element", "as", "a", "child", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1026-L1028
[ "def", "append_copy", "(", "self", ",", "elem", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_append_copy", "(", "self", ".", "e", ",", "elem", ".", "e", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.prepend_copy
Prepend a copy of the specified element as a child.
pylsl/pylsl.py
def prepend_copy(self, elem): """Prepend a copy of the specified element as a child.""" return XMLElement(lib.lsl_prepend_copy(self.e, elem.e))
def prepend_copy(self, elem): """Prepend a copy of the specified element as a child.""" return XMLElement(lib.lsl_prepend_copy(self.e, elem.e))
[ "Prepend", "a", "copy", "of", "the", "specified", "element", "as", "a", "child", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1030-L1032
[ "def", "prepend_copy", "(", "self", ",", "elem", ")", ":", "return", "XMLElement", "(", "lib", ".", "lsl_prepend_copy", "(", "self", ".", "e", ",", "elem", ".", "e", ")", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
XMLElement.remove_child
Remove a given child element, specified by name or as element.
pylsl/pylsl.py
def remove_child(self, rhs): """Remove a given child element, specified by name or as element.""" if type(rhs) is XMLElement: lib.lsl_remove_child(self.e, rhs.e) else: lib.lsl_remove_child_n(self.e, rhs)
def remove_child(self, rhs): """Remove a given child element, specified by name or as element.""" if type(rhs) is XMLElement: lib.lsl_remove_child(self.e, rhs.e) else: lib.lsl_remove_child_n(self.e, rhs)
[ "Remove", "a", "given", "child", "element", "specified", "by", "name", "or", "as", "element", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1034-L1039
[ "def", "remove_child", "(", "self", ",", "rhs", ")", ":", "if", "type", "(", "rhs", ")", "is", "XMLElement", ":", "lib", ".", "lsl_remove_child", "(", "self", ".", "e", ",", "rhs", ".", "e", ")", "else", ":", "lib", ".", "lsl_remove_child_n", "(", "self", ".", "e", ",", "rhs", ")" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
ContinuousResolver.results
Obtain the set of currently present streams on the network. Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet.
pylsl/pylsl.py
def results(self): """Obtain the set of currently present streams on the network. Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def results(self): """Obtain the set of currently present streams on the network. Returns a list of matching StreamInfo objects (with empty desc field), any of which can subsequently be used to open an inlet. """ # noinspection PyCallingNonCallable buffer = (c_void_p*1024)() num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024) return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
[ "Obtain", "the", "set", "of", "currently", "present", "streams", "on", "the", "network", "." ]
labstreaminglayer/liblsl-Python
python
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L1092-L1102
[ "def", "results", "(", "self", ")", ":", "# noinspection PyCallingNonCallable", "buffer", "=", "(", "c_void_p", "*", "1024", ")", "(", ")", "num_found", "=", "lib", ".", "lsl_resolver_results", "(", "self", ".", "obj", ",", "byref", "(", "buffer", ")", ",", "1024", ")", "return", "[", "StreamInfo", "(", "handle", "=", "buffer", "[", "k", "]", ")", "for", "k", "in", "range", "(", "num_found", ")", "]" ]
1ff6fe2794f8dba286b7491d1f7a4c915b8a0605
test
pair
See all token associated with a given token. PAIR lilas
addok/pairs.py
def pair(cmd, word): """See all token associated with a given token. PAIR lilas""" word = list(preprocess_query(word))[0] key = pair_key(word) tokens = [t.decode() for t in DB.smembers(key)] tokens.sort() print(white(tokens)) print(magenta('(Total: {})'.format(len(tokens))))
def pair(cmd, word): """See all token associated with a given token. PAIR lilas""" word = list(preprocess_query(word))[0] key = pair_key(word) tokens = [t.decode() for t in DB.smembers(key)] tokens.sort() print(white(tokens)) print(magenta('(Total: {})'.format(len(tokens))))
[ "See", "all", "token", "associated", "with", "a", "given", "token", ".", "PAIR", "lilas" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/pairs.py#L35-L43
[ "def", "pair", "(", "cmd", ",", "word", ")", ":", "word", "=", "list", "(", "preprocess_query", "(", "word", ")", ")", "[", "0", "]", "key", "=", "pair_key", "(", "word", ")", "tokens", "=", "[", "t", ".", "decode", "(", ")", "for", "t", "in", "DB", ".", "smembers", "(", "key", ")", "]", "tokens", ".", "sort", "(", ")", "print", "(", "white", "(", "tokens", ")", ")", "print", "(", "magenta", "(", "'(Total: {})'", ".", "format", "(", "len", "(", "tokens", ")", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
do_AUTOCOMPLETE
Shows autocomplete results for a given token.
addok/autocomplete.py
def do_AUTOCOMPLETE(cmd, s): """Shows autocomplete results for a given token.""" s = list(preprocess_query(s))[0] keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))] print(white(keys)) print(magenta('({} elements)'.format(len(keys))))
def do_AUTOCOMPLETE(cmd, s): """Shows autocomplete results for a given token.""" s = list(preprocess_query(s))[0] keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))] print(white(keys)) print(magenta('({} elements)'.format(len(keys))))
[ "Shows", "autocomplete", "results", "for", "a", "given", "token", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/autocomplete.py#L128-L133
[ "def", "do_AUTOCOMPLETE", "(", "cmd", ",", "s", ")", ":", "s", "=", "list", "(", "preprocess_query", "(", "s", ")", ")", "[", "0", "]", "keys", "=", "[", "k", ".", "decode", "(", ")", "for", "k", "in", "DB", ".", "smembers", "(", "edge_ngram_key", "(", "s", ")", ")", "]", "print", "(", "white", "(", "keys", ")", ")", "print", "(", "magenta", "(", "'({} elements)'", ".", "format", "(", "len", "(", "keys", ")", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
compute_edge_ngrams
Compute edge ngram of token from min. Does not include token itself.
addok/helpers/text.py
def compute_edge_ngrams(token, min=None): """Compute edge ngram of token from min. Does not include token itself.""" if min is None: min = config.MIN_EDGE_NGRAMS token = token[:config.MAX_EDGE_NGRAMS + 1] return [token[:i] for i in range(min, len(token))]
def compute_edge_ngrams(token, min=None): """Compute edge ngram of token from min. Does not include token itself.""" if min is None: min = config.MIN_EDGE_NGRAMS token = token[:config.MAX_EDGE_NGRAMS + 1] return [token[:i] for i in range(min, len(token))]
[ "Compute", "edge", "ngram", "of", "token", "from", "min", ".", "Does", "not", "include", "token", "itself", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/text.py#L187-L192
[ "def", "compute_edge_ngrams", "(", "token", ",", "min", "=", "None", ")", ":", "if", "min", "is", "None", ":", "min", "=", "config", ".", "MIN_EDGE_NGRAMS", "token", "=", "token", "[", ":", "config", ".", "MAX_EDGE_NGRAMS", "+", "1", "]", "return", "[", "token", "[", ":", "i", "]", "for", "i", "in", "range", "(", "min", ",", "len", "(", "token", ")", ")", "]" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
iter_pipe
Allow for iterators to return either an item or an iterator of items.
addok/helpers/__init__.py
def iter_pipe(pipe, processors): """Allow for iterators to return either an item or an iterator of items.""" if isinstance(pipe, str): pipe = [pipe] for it in processors: pipe = it(pipe) yield from pipe
def iter_pipe(pipe, processors): """Allow for iterators to return either an item or an iterator of items.""" if isinstance(pipe, str): pipe = [pipe] for it in processors: pipe = it(pipe) yield from pipe
[ "Allow", "for", "iterators", "to", "return", "either", "an", "item", "or", "an", "iterator", "of", "items", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/__init__.py#L33-L39
[ "def", "iter_pipe", "(", "pipe", ",", "processors", ")", ":", "if", "isinstance", "(", "pipe", ",", "str", ")", ":", "pipe", "=", "[", "pipe", "]", "for", "it", "in", "processors", ":", "pipe", "=", "it", "(", "pipe", ")", "yield", "from", "pipe" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
import_by_path
Import functions or class by their path. Should be of the form: path.to.module.func
addok/helpers/__init__.py
def import_by_path(path): """ Import functions or class by their path. Should be of the form: path.to.module.func """ if not isinstance(path, str): return path module_path, *name = path.rsplit('.', 1) func = import_module(module_path) if name: func = getattr(func, name[0]) return func
def import_by_path(path): """ Import functions or class by their path. Should be of the form: path.to.module.func """ if not isinstance(path, str): return path module_path, *name = path.rsplit('.', 1) func = import_module(module_path) if name: func = getattr(func, name[0]) return func
[ "Import", "functions", "or", "class", "by", "their", "path", ".", "Should", "be", "of", "the", "form", ":", "path", ".", "to", ".", "module", ".", "func" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/__init__.py#L42-L53
[ "def", "import_by_path", "(", "path", ")", ":", "if", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "path", "module_path", ",", "", "*", "name", "=", "path", ".", "rsplit", "(", "'.'", ",", "1", ")", "func", "=", "import_module", "(", "module_path", ")", "if", "name", ":", "func", "=", "getattr", "(", "func", ",", "name", "[", "0", "]", ")", "return", "func" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
haversine_distance
Calculate the great circle distance between two points on the earth (specified in decimal degrees).
addok/helpers/__init__.py
def haversine_distance(point1, point2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees). """ lat1, lon1 = point1 lat2, lon2 = point2 # Convert decimal degrees to radians. lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # Haversine formula. dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) # 6367 km is the radius of the Earth. km = 6367 * c return km
def haversine_distance(point1, point2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees). """ lat1, lon1 = point1 lat2, lon2 = point2 # Convert decimal degrees to radians. lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # Haversine formula. dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) # 6367 km is the radius of the Earth. km = 6367 * c return km
[ "Calculate", "the", "great", "circle", "distance", "between", "two", "points", "on", "the", "earth", "(", "specified", "in", "decimal", "degrees", ")", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/__init__.py#L64-L83
[ "def", "haversine_distance", "(", "point1", ",", "point2", ")", ":", "lat1", ",", "lon1", "=", "point1", "lat2", ",", "lon2", "=", "point2", "# Convert decimal degrees to radians.", "lon1", ",", "lat1", ",", "lon2", ",", "lat2", "=", "map", "(", "radians", ",", "[", "lon1", ",", "lat1", ",", "lon2", ",", "lat2", "]", ")", "# Haversine formula.", "dlon", "=", "lon2", "-", "lon1", "dlat", "=", "lat2", "-", "lat1", "a", "=", "sin", "(", "dlat", "/", "2", ")", "**", "2", "+", "cos", "(", "lat1", ")", "*", "cos", "(", "lat2", ")", "*", "sin", "(", "dlon", "/", "2", ")", "**", "2", "c", "=", "2", "*", "asin", "(", "sqrt", "(", "a", ")", ")", "# 6367 km is the radius of the Earth.", "km", "=", "6367", "*", "c", "return", "km" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
ChunkedPool.imap_unordered
Customized version of imap_unordered. Directly send chunks to func, instead of iterating in each process and sending one by one. Original: https://hg.python.org/cpython/file/tip/Lib/multiprocessing/pool.py#l271 Other tried options: - map_async: makes a list(iterable), so it loads all the data for each process into RAM - apply_async: needs manual chunking
addok/helpers/__init__.py
def imap_unordered(self, func, iterable, chunksize): """Customized version of imap_unordered. Directly send chunks to func, instead of iterating in each process and sending one by one. Original: https://hg.python.org/cpython/file/tip/Lib/multiprocessing/pool.py#l271 Other tried options: - map_async: makes a list(iterable), so it loads all the data for each process into RAM - apply_async: needs manual chunking """ assert self._state == RUN task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) tasks = ((result._job, i, func, chunk, {}) for i, (_, chunk) in enumerate(task_batches)) self._taskqueue.put((tasks, result._set_length)) return result
def imap_unordered(self, func, iterable, chunksize): """Customized version of imap_unordered. Directly send chunks to func, instead of iterating in each process and sending one by one. Original: https://hg.python.org/cpython/file/tip/Lib/multiprocessing/pool.py#l271 Other tried options: - map_async: makes a list(iterable), so it loads all the data for each process into RAM - apply_async: needs manual chunking """ assert self._state == RUN task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) tasks = ((result._job, i, func, chunk, {}) for i, (_, chunk) in enumerate(task_batches)) self._taskqueue.put((tasks, result._set_length)) return result
[ "Customized", "version", "of", "imap_unordered", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/__init__.py#L144-L164
[ "def", "imap_unordered", "(", "self", ",", "func", ",", "iterable", ",", "chunksize", ")", ":", "assert", "self", ".", "_state", "==", "RUN", "task_batches", "=", "Pool", ".", "_get_tasks", "(", "func", ",", "iterable", ",", "chunksize", ")", "result", "=", "IMapUnorderedIterator", "(", "self", ".", "_cache", ")", "tasks", "=", "(", "(", "result", ".", "_job", ",", "i", ",", "func", ",", "chunk", ",", "{", "}", ")", "for", "i", ",", "(", "_", ",", "chunk", ")", "in", "enumerate", "(", "task_batches", ")", ")", "self", ".", "_taskqueue", ".", "put", "(", "(", "tasks", ",", "result", ".", "_set_length", ")", ")", "return", "result" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
make_fuzzy
Naive neighborhoods algo.
addok/fuzzy.py
def make_fuzzy(word, max=1): """Naive neighborhoods algo.""" # inversions neighbors = [] for i in range(0, len(word) - 1): neighbor = list(word) neighbor[i], neighbor[i+1] = neighbor[i+1], neighbor[i] neighbors.append(''.join(neighbor)) # substitutions for letter in string.ascii_lowercase: for i in range(0, len(word)): neighbor = list(word) if letter != neighbor[i]: neighbor[i] = letter neighbors.append(''.join(neighbor)) # insertions for letter in string.ascii_lowercase: for i in range(0, len(word) + 1): neighbor = list(word) neighbor.insert(i, letter) neighbors.append(''.join(neighbor)) if len(word) > 3: # removal for i in range(0, len(word)): neighbor = list(word) del neighbor[i] neighbors.append(''.join(neighbor)) return neighbors
def make_fuzzy(word, max=1): """Naive neighborhoods algo.""" # inversions neighbors = [] for i in range(0, len(word) - 1): neighbor = list(word) neighbor[i], neighbor[i+1] = neighbor[i+1], neighbor[i] neighbors.append(''.join(neighbor)) # substitutions for letter in string.ascii_lowercase: for i in range(0, len(word)): neighbor = list(word) if letter != neighbor[i]: neighbor[i] = letter neighbors.append(''.join(neighbor)) # insertions for letter in string.ascii_lowercase: for i in range(0, len(word) + 1): neighbor = list(word) neighbor.insert(i, letter) neighbors.append(''.join(neighbor)) if len(word) > 3: # removal for i in range(0, len(word)): neighbor = list(word) del neighbor[i] neighbors.append(''.join(neighbor)) return neighbors
[ "Naive", "neighborhoods", "algo", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/fuzzy.py#L11-L38
[ "def", "make_fuzzy", "(", "word", ",", "max", "=", "1", ")", ":", "# inversions", "neighbors", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "word", ")", "-", "1", ")", ":", "neighbor", "=", "list", "(", "word", ")", "neighbor", "[", "i", "]", ",", "neighbor", "[", "i", "+", "1", "]", "=", "neighbor", "[", "i", "+", "1", "]", ",", "neighbor", "[", "i", "]", "neighbors", ".", "append", "(", "''", ".", "join", "(", "neighbor", ")", ")", "# substitutions", "for", "letter", "in", "string", ".", "ascii_lowercase", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "word", ")", ")", ":", "neighbor", "=", "list", "(", "word", ")", "if", "letter", "!=", "neighbor", "[", "i", "]", ":", "neighbor", "[", "i", "]", "=", "letter", "neighbors", ".", "append", "(", "''", ".", "join", "(", "neighbor", ")", ")", "# insertions", "for", "letter", "in", "string", ".", "ascii_lowercase", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "word", ")", "+", "1", ")", ":", "neighbor", "=", "list", "(", "word", ")", "neighbor", ".", "insert", "(", "i", ",", "letter", ")", "neighbors", ".", "append", "(", "''", ".", "join", "(", "neighbor", ")", ")", "if", "len", "(", "word", ")", ">", "3", ":", "# removal", "for", "i", "in", "range", "(", "0", ",", "len", "(", "word", ")", ")", ":", "neighbor", "=", "list", "(", "word", ")", "del", "neighbor", "[", "i", "]", "neighbors", ".", "append", "(", "''", ".", "join", "(", "neighbor", ")", ")", "return", "neighbors" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
do_fuzzy
Compute fuzzy extensions of word. FUZZY lilas
addok/fuzzy.py
def do_fuzzy(self, word): """Compute fuzzy extensions of word. FUZZY lilas""" word = list(preprocess_query(word))[0] print(white(make_fuzzy(word)))
def do_fuzzy(self, word): """Compute fuzzy extensions of word. FUZZY lilas""" word = list(preprocess_query(word))[0] print(white(make_fuzzy(word)))
[ "Compute", "fuzzy", "extensions", "of", "word", ".", "FUZZY", "lilas" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/fuzzy.py#L100-L104
[ "def", "do_fuzzy", "(", "self", ",", "word", ")", ":", "word", "=", "list", "(", "preprocess_query", "(", "word", ")", ")", "[", "0", "]", "print", "(", "white", "(", "make_fuzzy", "(", "word", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
do_fuzzyindex
Compute fuzzy extensions of word that exist in index. FUZZYINDEX lilas
addok/fuzzy.py
def do_fuzzyindex(self, word): """Compute fuzzy extensions of word that exist in index. FUZZYINDEX lilas""" word = list(preprocess_query(word))[0] token = Token(word) neighbors = make_fuzzy(token) neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors] neighbors.sort(key=lambda n: n[1], reverse=True) for token, freq in neighbors: if freq == 0: break print(white(token), blue(freq))
def do_fuzzyindex(self, word): """Compute fuzzy extensions of word that exist in index. FUZZYINDEX lilas""" word = list(preprocess_query(word))[0] token = Token(word) neighbors = make_fuzzy(token) neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors] neighbors.sort(key=lambda n: n[1], reverse=True) for token, freq in neighbors: if freq == 0: break print(white(token), blue(freq))
[ "Compute", "fuzzy", "extensions", "of", "word", "that", "exist", "in", "index", ".", "FUZZYINDEX", "lilas" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/fuzzy.py#L107-L118
[ "def", "do_fuzzyindex", "(", "self", ",", "word", ")", ":", "word", "=", "list", "(", "preprocess_query", "(", "word", ")", ")", "[", "0", "]", "token", "=", "Token", "(", "word", ")", "neighbors", "=", "make_fuzzy", "(", "token", ")", "neighbors", "=", "[", "(", "n", ",", "DB", ".", "zcard", "(", "dbkeys", ".", "token_key", "(", "n", ")", ")", ")", "for", "n", "in", "neighbors", "]", "neighbors", ".", "sort", "(", "key", "=", "lambda", "n", ":", "n", "[", "1", "]", ",", "reverse", "=", "True", ")", "for", "token", ",", "freq", "in", "neighbors", ":", "if", "freq", "==", "0", ":", "break", "print", "(", "white", "(", "token", ")", ",", "blue", "(", "freq", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
extend_results_extrapoling_relations
Try to extract the bigger group of interlinked tokens. Should generally be used at last in the collectors chain.
addok/helpers/collectors.py
def extend_results_extrapoling_relations(helper): """Try to extract the bigger group of interlinked tokens. Should generally be used at last in the collectors chain. """ if not helper.bucket_dry: return # No need. tokens = set(helper.meaningful + helper.common) for relation in _extract_manytomany_relations(tokens): helper.add_to_bucket([t.db_key for t in relation]) if helper.bucket_overflow: break else: helper.debug('No relation extrapolated.')
def extend_results_extrapoling_relations(helper): """Try to extract the bigger group of interlinked tokens. Should generally be used at last in the collectors chain. """ if not helper.bucket_dry: return # No need. tokens = set(helper.meaningful + helper.common) for relation in _extract_manytomany_relations(tokens): helper.add_to_bucket([t.db_key for t in relation]) if helper.bucket_overflow: break else: helper.debug('No relation extrapolated.')
[ "Try", "to", "extract", "the", "bigger", "group", "of", "interlinked", "tokens", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/helpers/collectors.py#L133-L146
[ "def", "extend_results_extrapoling_relations", "(", "helper", ")", ":", "if", "not", "helper", ".", "bucket_dry", ":", "return", "# No need.", "tokens", "=", "set", "(", "helper", ".", "meaningful", "+", "helper", ".", "common", ")", "for", "relation", "in", "_extract_manytomany_relations", "(", "tokens", ")", ":", "helper", ".", "add_to_bucket", "(", "[", "t", ".", "db_key", "for", "t", "in", "relation", "]", ")", "if", "helper", ".", "bucket_overflow", ":", "break", "else", ":", "helper", ".", "debug", "(", "'No relation extrapolated.'", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_help
Display this help message.
addok/shell.py
def do_help(self, command): """Display this help message.""" if command: doc = getattr(self, 'do_' + command).__doc__ print(cyan(doc.replace(' ' * 8, ''))) else: print(magenta('Available commands:')) print(magenta('Type "HELP <command>" to get more info.')) names = self.get_names() names.sort() for name in names: if name[:3] != 'do_': continue doc = getattr(self, name).__doc__ doc = doc.split('\n')[0] print('{} {}'.format(yellow(name[3:]), cyan(doc.replace(' ' * 8, ' ') .replace('\n', ''))))
def do_help(self, command): """Display this help message.""" if command: doc = getattr(self, 'do_' + command).__doc__ print(cyan(doc.replace(' ' * 8, ''))) else: print(magenta('Available commands:')) print(magenta('Type "HELP <command>" to get more info.')) names = self.get_names() names.sort() for name in names: if name[:3] != 'do_': continue doc = getattr(self, name).__doc__ doc = doc.split('\n')[0] print('{} {}'.format(yellow(name[3:]), cyan(doc.replace(' ' * 8, ' ') .replace('\n', ''))))
[ "Display", "this", "help", "message", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L110-L127
[ "def", "do_help", "(", "self", ",", "command", ")", ":", "if", "command", ":", "doc", "=", "getattr", "(", "self", ",", "'do_'", "+", "command", ")", ".", "__doc__", "print", "(", "cyan", "(", "doc", ".", "replace", "(", "' '", "*", "8", ",", "''", ")", ")", ")", "else", ":", "print", "(", "magenta", "(", "'Available commands:'", ")", ")", "print", "(", "magenta", "(", "'Type \"HELP <command>\" to get more info.'", ")", ")", "names", "=", "self", ".", "get_names", "(", ")", "names", ".", "sort", "(", ")", "for", "name", "in", "names", ":", "if", "name", "[", ":", "3", "]", "!=", "'do_'", ":", "continue", "doc", "=", "getattr", "(", "self", ",", "name", ")", ".", "__doc__", "doc", "=", "doc", ".", "split", "(", "'\\n'", ")", "[", "0", "]", "print", "(", "'{} {}'", ".", "format", "(", "yellow", "(", "name", "[", "3", ":", "]", ")", ",", "cyan", "(", "doc", ".", "replace", "(", "' '", "*", "8", ",", "' '", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_BENCH
Run a search many times to benchmark it. BENCH [100] rue des Lilas
addok/shell.py
def do_BENCH(self, query): """Run a search many times to benchmark it. BENCH [100] rue des Lilas""" try: count = int(re.match(r'^(\d+).*', query).group(1)) except AttributeError: count = 100 self._search(query, count=count)
def do_BENCH(self, query): """Run a search many times to benchmark it. BENCH [100] rue des Lilas""" try: count = int(re.match(r'^(\d+).*', query).group(1)) except AttributeError: count = 100 self._search(query, count=count)
[ "Run", "a", "search", "many", "times", "to", "benchmark", "it", ".", "BENCH", "[", "100", "]", "rue", "des", "Lilas" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L209-L216
[ "def", "do_BENCH", "(", "self", ",", "query", ")", ":", "try", ":", "count", "=", "int", "(", "re", ".", "match", "(", "r'^(\\d+).*'", ",", "query", ")", ".", "group", "(", "1", ")", ")", "except", "AttributeError", ":", "count", "=", "100", "self", ".", "_search", "(", "query", ",", "count", "=", "count", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_INTERSECT
Do a raw intersect between tokens (default limit 100). INTERSECT rue des lilas [LIMIT 100]
addok/shell.py
def do_INTERSECT(self, words): """Do a raw intersect between tokens (default limit 100). INTERSECT rue des lilas [LIMIT 100]""" start = time.time() limit = 100 if 'LIMIT' in words: words, limit = words.split('LIMIT') limit = int(limit) tokens = [keys.token_key(w) for w in preprocess_query(words)] DB.zinterstore(words, tokens) results = DB.zrevrange(words, 0, limit, withscores=True) DB.delete(words) for id_, score in results: r = Result(id_) print('{} {} {}'.format(white(r), blue(r._id), cyan(score))) duration = round((time.time() - start) * 1000, 1) print(magenta("({} in {} ms)".format(len(results), duration)))
def do_INTERSECT(self, words): """Do a raw intersect between tokens (default limit 100). INTERSECT rue des lilas [LIMIT 100]""" start = time.time() limit = 100 if 'LIMIT' in words: words, limit = words.split('LIMIT') limit = int(limit) tokens = [keys.token_key(w) for w in preprocess_query(words)] DB.zinterstore(words, tokens) results = DB.zrevrange(words, 0, limit, withscores=True) DB.delete(words) for id_, score in results: r = Result(id_) print('{} {} {}'.format(white(r), blue(r._id), cyan(score))) duration = round((time.time() - start) * 1000, 1) print(magenta("({} in {} ms)".format(len(results), duration)))
[ "Do", "a", "raw", "intersect", "between", "tokens", "(", "default", "limit", "100", ")", ".", "INTERSECT", "rue", "des", "lilas", "[", "LIMIT", "100", "]" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L218-L234
[ "def", "do_INTERSECT", "(", "self", ",", "words", ")", ":", "start", "=", "time", ".", "time", "(", ")", "limit", "=", "100", "if", "'LIMIT'", "in", "words", ":", "words", ",", "limit", "=", "words", ".", "split", "(", "'LIMIT'", ")", "limit", "=", "int", "(", "limit", ")", "tokens", "=", "[", "keys", ".", "token_key", "(", "w", ")", "for", "w", "in", "preprocess_query", "(", "words", ")", "]", "DB", ".", "zinterstore", "(", "words", ",", "tokens", ")", "results", "=", "DB", ".", "zrevrange", "(", "words", ",", "0", ",", "limit", ",", "withscores", "=", "True", ")", "DB", ".", "delete", "(", "words", ")", "for", "id_", ",", "score", "in", "results", ":", "r", "=", "Result", "(", "id_", ")", "print", "(", "'{} {} {}'", ".", "format", "(", "white", "(", "r", ")", ",", "blue", "(", "r", ".", "_id", ")", ",", "cyan", "(", "score", ")", ")", ")", "duration", "=", "round", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1000", ",", "1", ")", "print", "(", "magenta", "(", "\"({} in {} ms)\"", ".", "format", "(", "len", "(", "results", ")", ",", "duration", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_DBINFO
Print some useful infos from Redis DB.
addok/shell.py
def do_DBINFO(self, *args): """Print some useful infos from Redis DB.""" info = DB.info() keys = [ 'keyspace_misses', 'keyspace_hits', 'used_memory_human', 'total_commands_processed', 'total_connections_received', 'connected_clients'] for key in keys: print('{}: {}'.format(white(key), blue(info[key]))) nb_of_redis_db = int(DB.config_get('databases')['databases']) for db_index in range(nb_of_redis_db - 1): db_name = 'db{}'.format(db_index) if db_name in info: label = white('nb keys (db {})'.format(db_index)) print('{}: {}'.format(label, blue(info[db_name]['keys'])))
def do_DBINFO(self, *args): """Print some useful infos from Redis DB.""" info = DB.info() keys = [ 'keyspace_misses', 'keyspace_hits', 'used_memory_human', 'total_commands_processed', 'total_connections_received', 'connected_clients'] for key in keys: print('{}: {}'.format(white(key), blue(info[key]))) nb_of_redis_db = int(DB.config_get('databases')['databases']) for db_index in range(nb_of_redis_db - 1): db_name = 'db{}'.format(db_index) if db_name in info: label = white('nb keys (db {})'.format(db_index)) print('{}: {}'.format(label, blue(info[db_name]['keys'])))
[ "Print", "some", "useful", "infos", "from", "Redis", "DB", "." ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L236-L250
[ "def", "do_DBINFO", "(", "self", ",", "*", "args", ")", ":", "info", "=", "DB", ".", "info", "(", ")", "keys", "=", "[", "'keyspace_misses'", ",", "'keyspace_hits'", ",", "'used_memory_human'", ",", "'total_commands_processed'", ",", "'total_connections_received'", ",", "'connected_clients'", "]", "for", "key", "in", "keys", ":", "print", "(", "'{}: {}'", ".", "format", "(", "white", "(", "key", ")", ",", "blue", "(", "info", "[", "key", "]", ")", ")", ")", "nb_of_redis_db", "=", "int", "(", "DB", ".", "config_get", "(", "'databases'", ")", "[", "'databases'", "]", ")", "for", "db_index", "in", "range", "(", "nb_of_redis_db", "-", "1", ")", ":", "db_name", "=", "'db{}'", ".", "format", "(", "db_index", ")", "if", "db_name", "in", "info", ":", "label", "=", "white", "(", "'nb keys (db {})'", ".", "format", "(", "db_index", ")", ")", "print", "(", "'{}: {}'", ".", "format", "(", "label", ",", "blue", "(", "info", "[", "db_name", "]", "[", "'keys'", "]", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_DBKEY
Print raw content of a DB key. DBKEY g|u09tyzfe
addok/shell.py
def do_DBKEY(self, key): """Print raw content of a DB key. DBKEY g|u09tyzfe""" type_ = DB.type(key).decode() if type_ == 'set': out = DB.smembers(key) elif type_ == 'string': out = DB.get(key) else: out = 'Unsupported type {}'.format(type_) print('type:', magenta(type_)) print('value:', white(out))
def do_DBKEY(self, key): """Print raw content of a DB key. DBKEY g|u09tyzfe""" type_ = DB.type(key).decode() if type_ == 'set': out = DB.smembers(key) elif type_ == 'string': out = DB.get(key) else: out = 'Unsupported type {}'.format(type_) print('type:', magenta(type_)) print('value:', white(out))
[ "Print", "raw", "content", "of", "a", "DB", "key", ".", "DBKEY", "g|u09tyzfe" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L252-L263
[ "def", "do_DBKEY", "(", "self", ",", "key", ")", ":", "type_", "=", "DB", ".", "type", "(", "key", ")", ".", "decode", "(", ")", "if", "type_", "==", "'set'", ":", "out", "=", "DB", ".", "smembers", "(", "key", ")", "elif", "type_", "==", "'string'", ":", "out", "=", "DB", ".", "get", "(", "key", ")", "else", ":", "out", "=", "'Unsupported type {}'", ".", "format", "(", "type_", ")", "print", "(", "'type:'", ",", "magenta", "(", "type_", ")", ")", "print", "(", "'value:'", ",", "white", "(", "out", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_GEODISTANCE
Compute geodistance from a result to a point. GEODISTANCE 772210180J 48.1234 2.9876
addok/shell.py
def do_GEODISTANCE(self, s): """Compute geodistance from a result to a point. GEODISTANCE 772210180J 48.1234 2.9876""" try: _id, lat, lon = s.split() except: return self.error('Malformed query. Use: ID lat lon') try: result = Result(keys.document_key(_id)) except ValueError as e: return self.error(e) center = (float(lat), float(lon)) km = haversine_distance((float(result.lat), float(result.lon)), center) score = km_to_score(km) print('km: {} | score: {}'.format(white(km), blue(score)))
def do_GEODISTANCE(self, s): """Compute geodistance from a result to a point. GEODISTANCE 772210180J 48.1234 2.9876""" try: _id, lat, lon = s.split() except: return self.error('Malformed query. Use: ID lat lon') try: result = Result(keys.document_key(_id)) except ValueError as e: return self.error(e) center = (float(lat), float(lon)) km = haversine_distance((float(result.lat), float(result.lon)), center) score = km_to_score(km) print('km: {} | score: {}'.format(white(km), blue(score)))
[ "Compute", "geodistance", "from", "a", "result", "to", "a", "point", ".", "GEODISTANCE", "772210180J", "48", ".", "1234", "2", ".", "9876" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L265-L279
[ "def", "do_GEODISTANCE", "(", "self", ",", "s", ")", ":", "try", ":", "_id", ",", "lat", ",", "lon", "=", "s", ".", "split", "(", ")", "except", ":", "return", "self", ".", "error", "(", "'Malformed query. Use: ID lat lon'", ")", "try", ":", "result", "=", "Result", "(", "keys", ".", "document_key", "(", "_id", ")", ")", "except", "ValueError", "as", "e", ":", "return", "self", ".", "error", "(", "e", ")", "center", "=", "(", "float", "(", "lat", ")", ",", "float", "(", "lon", ")", ")", "km", "=", "haversine_distance", "(", "(", "float", "(", "result", ".", "lat", ")", ",", "float", "(", "result", ".", "lon", ")", ")", ",", "center", ")", "score", "=", "km_to_score", "(", "km", ")", "print", "(", "'km: {} | score: {}'.", "f", "ormat(", "w", "hite(", "k", "m)", ",", " ", "lue(", "s", "core)", ")", ")", "" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_GEOHASHTOGEOJSON
Build GeoJSON corresponding to geohash given as parameter. GEOHASHTOGEOJSON u09vej04 [NEIGHBORS 0|1|2]
addok/shell.py
def do_GEOHASHTOGEOJSON(self, geoh): """Build GeoJSON corresponding to geohash given as parameter. GEOHASHTOGEOJSON u09vej04 [NEIGHBORS 0|1|2]""" geoh, with_neighbors = self._match_option('NEIGHBORS', geoh) bbox = geohash.bbox(geoh) try: with_neighbors = int(with_neighbors) except TypeError: with_neighbors = 0 def expand(bbox, geoh, depth): neighbors = geohash.neighbors(geoh) for neighbor in neighbors: other = geohash.bbox(neighbor) if with_neighbors > depth: expand(bbox, neighbor, depth + 1) else: if other['n'] > bbox['n']: bbox['n'] = other['n'] if other['s'] < bbox['s']: bbox['s'] = other['s'] if other['e'] > bbox['e']: bbox['e'] = other['e'] if other['w'] < bbox['w']: bbox['w'] = other['w'] if with_neighbors > 0: expand(bbox, geoh, 0) geojson = { "type": "Polygon", "coordinates": [[ [bbox['w'], bbox['n']], [bbox['e'], bbox['n']], [bbox['e'], bbox['s']], [bbox['w'], bbox['s']], [bbox['w'], bbox['n']] ]] } print(white(json.dumps(geojson)))
def do_GEOHASHTOGEOJSON(self, geoh): """Build GeoJSON corresponding to geohash given as parameter. GEOHASHTOGEOJSON u09vej04 [NEIGHBORS 0|1|2]""" geoh, with_neighbors = self._match_option('NEIGHBORS', geoh) bbox = geohash.bbox(geoh) try: with_neighbors = int(with_neighbors) except TypeError: with_neighbors = 0 def expand(bbox, geoh, depth): neighbors = geohash.neighbors(geoh) for neighbor in neighbors: other = geohash.bbox(neighbor) if with_neighbors > depth: expand(bbox, neighbor, depth + 1) else: if other['n'] > bbox['n']: bbox['n'] = other['n'] if other['s'] < bbox['s']: bbox['s'] = other['s'] if other['e'] > bbox['e']: bbox['e'] = other['e'] if other['w'] < bbox['w']: bbox['w'] = other['w'] if with_neighbors > 0: expand(bbox, geoh, 0) geojson = { "type": "Polygon", "coordinates": [[ [bbox['w'], bbox['n']], [bbox['e'], bbox['n']], [bbox['e'], bbox['s']], [bbox['w'], bbox['s']], [bbox['w'], bbox['n']] ]] } print(white(json.dumps(geojson)))
[ "Build", "GeoJSON", "corresponding", "to", "geohash", "given", "as", "parameter", ".", "GEOHASHTOGEOJSON", "u09vej04", "[", "NEIGHBORS", "0|1|2", "]" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L281-L320
[ "def", "do_GEOHASHTOGEOJSON", "(", "self", ",", "geoh", ")", ":", "geoh", ",", "with_neighbors", "=", "self", ".", "_match_option", "(", "'NEIGHBORS'", ",", "geoh", ")", "bbox", "=", "geohash", ".", "bbox", "(", "geoh", ")", "try", ":", "with_neighbors", "=", "int", "(", "with_neighbors", ")", "except", "TypeError", ":", "with_neighbors", "=", "0", "def", "expand", "(", "bbox", ",", "geoh", ",", "depth", ")", ":", "neighbors", "=", "geohash", ".", "neighbors", "(", "geoh", ")", "for", "neighbor", "in", "neighbors", ":", "other", "=", "geohash", ".", "bbox", "(", "neighbor", ")", "if", "with_neighbors", ">", "depth", ":", "expand", "(", "bbox", ",", "neighbor", ",", "depth", "+", "1", ")", "else", ":", "if", "other", "[", "'n'", "]", ">", "bbox", "[", "'n'", "]", ":", "bbox", "[", "'n'", "]", "=", "other", "[", "'n'", "]", "if", "other", "[", "'s'", "]", "<", "bbox", "[", "'s'", "]", ":", "bbox", "[", "'s'", "]", "=", "other", "[", "'s'", "]", "if", "other", "[", "'e'", "]", ">", "bbox", "[", "'e'", "]", ":", "bbox", "[", "'e'", "]", "=", "other", "[", "'e'", "]", "if", "other", "[", "'w'", "]", "<", "bbox", "[", "'w'", "]", ":", "bbox", "[", "'w'", "]", "=", "other", "[", "'w'", "]", "if", "with_neighbors", ">", "0", ":", "expand", "(", "bbox", ",", "geoh", ",", "0", ")", "geojson", "=", "{", "\"type\"", ":", "\"Polygon\"", ",", "\"coordinates\"", ":", "[", "[", "[", "bbox", "[", "'w'", "]", ",", "bbox", "[", "'n'", "]", "]", ",", "[", "bbox", "[", "'e'", "]", ",", "bbox", "[", "'n'", "]", "]", ",", "[", "bbox", "[", "'e'", "]", ",", "bbox", "[", "'s'", "]", "]", ",", "[", "bbox", "[", "'w'", "]", ",", "bbox", "[", "'s'", "]", "]", ",", "[", "bbox", "[", "'w'", "]", ",", "bbox", "[", "'n'", "]", "]", "]", "]", "}", "print", "(", "white", "(", "json", ".", "dumps", "(", "geojson", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_GEOHASH
Compute a geohash from latitude and longitude. GEOHASH 48.1234 2.9876
addok/shell.py
def do_GEOHASH(self, latlon): """Compute a geohash from latitude and longitude. GEOHASH 48.1234 2.9876""" try: lat, lon = map(float, latlon.split()) except ValueError: print(red('Invalid lat and lon {}'.format(latlon))) else: print(white(geohash.encode(lat, lon, config.GEOHASH_PRECISION)))
def do_GEOHASH(self, latlon): """Compute a geohash from latitude and longitude. GEOHASH 48.1234 2.9876""" try: lat, lon = map(float, latlon.split()) except ValueError: print(red('Invalid lat and lon {}'.format(latlon))) else: print(white(geohash.encode(lat, lon, config.GEOHASH_PRECISION)))
[ "Compute", "a", "geohash", "from", "latitude", "and", "longitude", ".", "GEOHASH", "48", ".", "1234", "2", ".", "9876" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L322-L330
[ "def", "do_GEOHASH", "(", "self", ",", "latlon", ")", ":", "try", ":", "lat", ",", "lon", "=", "map", "(", "float", ",", "latlon", ".", "split", "(", ")", ")", "except", "ValueError", ":", "print", "(", "red", "(", "'Invalid lat and lon {}'", ".", "format", "(", "latlon", ")", ")", ")", "else", ":", "print", "(", "white", "(", "geohash", ".", "encode", "(", "lat", ",", "lon", ",", "config", ".", "GEOHASH_PRECISION", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_GEOHASHMEMBERS
Return members of a geohash and its neighbors. GEOHASHMEMBERS u09vej04 [NEIGHBORS 0]
addok/shell.py
def do_GEOHASHMEMBERS(self, geoh): """Return members of a geohash and its neighbors. GEOHASHMEMBERS u09vej04 [NEIGHBORS 0]""" geoh, with_neighbors = self._match_option('NEIGHBORS', geoh) key = compute_geohash_key(geoh, with_neighbors != '0') if key: for id_ in DB.smembers(key): r = Result(id_) print('{} {}'.format(white(r), blue(r._id)))
def do_GEOHASHMEMBERS(self, geoh): """Return members of a geohash and its neighbors. GEOHASHMEMBERS u09vej04 [NEIGHBORS 0]""" geoh, with_neighbors = self._match_option('NEIGHBORS', geoh) key = compute_geohash_key(geoh, with_neighbors != '0') if key: for id_ in DB.smembers(key): r = Result(id_) print('{} {}'.format(white(r), blue(r._id)))
[ "Return", "members", "of", "a", "geohash", "and", "its", "neighbors", ".", "GEOHASHMEMBERS", "u09vej04", "[", "NEIGHBORS", "0", "]" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L332-L340
[ "def", "do_GEOHASHMEMBERS", "(", "self", ",", "geoh", ")", ":", "geoh", ",", "with_neighbors", "=", "self", ".", "_match_option", "(", "'NEIGHBORS'", ",", "geoh", ")", "key", "=", "compute_geohash_key", "(", "geoh", ",", "with_neighbors", "!=", "'0'", ")", "if", "key", ":", "for", "id_", "in", "DB", ".", "smembers", "(", "key", ")", ":", "r", "=", "Result", "(", "id_", ")", "print", "(", "'{} {}'", ".", "format", "(", "white", "(", "r", ")", ",", "blue", "(", "r", ".", "_id", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_GET
Get document from index with its id. GET 772210180J
addok/shell.py
def do_GET(self, _id): """Get document from index with its id. GET 772210180J""" doc = doc_by_id(_id) if not doc: return self.error('id "{}" not found'.format(_id)) for key, value in doc.items(): if key == config.HOUSENUMBERS_FIELD: continue print('{} {}'.format(white(key), magenta(value))) if doc.get('housenumbers'): def sorter(v): try: return int(re.match(r'^\d+', v['raw']).group()) except AttributeError: return -1 housenumbers = sorted(doc['housenumbers'].values(), key=sorter) print(white('housenumbers'), magenta(', '.join(v['raw'] for v in housenumbers)))
def do_GET(self, _id): """Get document from index with its id. GET 772210180J""" doc = doc_by_id(_id) if not doc: return self.error('id "{}" not found'.format(_id)) for key, value in doc.items(): if key == config.HOUSENUMBERS_FIELD: continue print('{} {}'.format(white(key), magenta(value))) if doc.get('housenumbers'): def sorter(v): try: return int(re.match(r'^\d+', v['raw']).group()) except AttributeError: return -1 housenumbers = sorted(doc['housenumbers'].values(), key=sorter) print(white('housenumbers'), magenta(', '.join(v['raw'] for v in housenumbers)))
[ "Get", "document", "from", "index", "with", "its", "id", ".", "GET", "772210180J" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L342-L360
[ "def", "do_GET", "(", "self", ",", "_id", ")", ":", "doc", "=", "doc_by_id", "(", "_id", ")", "if", "not", "doc", ":", "return", "self", ".", "error", "(", "'id \"{}\" not found'", ".", "format", "(", "_id", ")", ")", "for", "key", ",", "value", "in", "doc", ".", "items", "(", ")", ":", "if", "key", "==", "config", ".", "HOUSENUMBERS_FIELD", ":", "continue", "print", "(", "'{} {}'", ".", "format", "(", "white", "(", "key", ")", ",", "magenta", "(", "value", ")", ")", ")", "if", "doc", ".", "get", "(", "'housenumbers'", ")", ":", "def", "sorter", "(", "v", ")", ":", "try", ":", "return", "int", "(", "re", ".", "match", "(", "r'^\\d+'", ",", "v", "[", "'raw'", "]", ")", ".", "group", "(", ")", ")", "except", "AttributeError", ":", "return", "-", "1", "housenumbers", "=", "sorted", "(", "doc", "[", "'housenumbers'", "]", ".", "values", "(", ")", ",", "key", "=", "sorter", ")", "print", "(", "white", "(", "'housenumbers'", ")", ",", "magenta", "(", "', '", ".", "join", "(", "v", "[", "'raw'", "]", "for", "v", "in", "housenumbers", ")", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_INDEX
Get index details for a document by its id. INDEX 772210180J
addok/shell.py
def do_INDEX(self, _id): """Get index details for a document by its id. INDEX 772210180J""" doc = doc_by_id(_id) if not doc: return self.error('id "{}" not found'.format(_id)) for field in config.FIELDS: key = field['key'] if key in doc: self._print_field_index_details(doc[key], _id)
def do_INDEX(self, _id): """Get index details for a document by its id. INDEX 772210180J""" doc = doc_by_id(_id) if not doc: return self.error('id "{}" not found'.format(_id)) for field in config.FIELDS: key = field['key'] if key in doc: self._print_field_index_details(doc[key], _id)
[ "Get", "index", "details", "for", "a", "document", "by", "its", "id", ".", "INDEX", "772210180J" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L375-L384
[ "def", "do_INDEX", "(", "self", ",", "_id", ")", ":", "doc", "=", "doc_by_id", "(", "_id", ")", "if", "not", "doc", ":", "return", "self", ".", "error", "(", "'id \"{}\" not found'", ".", "format", "(", "_id", ")", ")", "for", "field", "in", "config", ".", "FIELDS", ":", "key", "=", "field", "[", "'key'", "]", "if", "key", "in", "doc", ":", "self", ".", "_print_field_index_details", "(", "doc", "[", "key", "]", ",", "_id", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2
test
Cmd.do_BESTSCORE
Return document linked to word with higher score. BESTSCORE lilas
addok/shell.py
def do_BESTSCORE(self, word): """Return document linked to word with higher score. BESTSCORE lilas""" key = keys.token_key(indexed_string(word)[0]) for _id, score in DB.zrevrange(key, 0, 20, withscores=True): result = Result(_id) print(white(result), blue(score), green(result._id))
def do_BESTSCORE(self, word): """Return document linked to word with higher score. BESTSCORE lilas""" key = keys.token_key(indexed_string(word)[0]) for _id, score in DB.zrevrange(key, 0, 20, withscores=True): result = Result(_id) print(white(result), blue(score), green(result._id))
[ "Return", "document", "linked", "to", "word", "with", "higher", "score", ".", "BESTSCORE", "lilas" ]
addok/addok
python
https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/shell.py#L386-L392
[ "def", "do_BESTSCORE", "(", "self", ",", "word", ")", ":", "key", "=", "keys", ".", "token_key", "(", "indexed_string", "(", "word", ")", "[", "0", "]", ")", "for", "_id", ",", "score", "in", "DB", ".", "zrevrange", "(", "key", ",", "0", ",", "20", ",", "withscores", "=", "True", ")", ":", "result", "=", "Result", "(", "_id", ")", "print", "(", "white", "(", "result", ")", ",", "blue", "(", "score", ")", ",", "green", "(", "result", ".", "_id", ")", ")" ]
46a270d76ec778d2b445c2be753e5c6ba070a9b2