partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
EventHeap.add_event
Add an event to the heap/priority queue Parameters ---------- event : Event
gtfspy/spreading/heap.py
def add_event(self, event): """ Add an event to the heap/priority queue Parameters ---------- event : Event """ assert event.dep_time_ut <= event.arr_time_ut heappush(self.heap, event)
def add_event(self, event): """ Add an event to the heap/priority queue Parameters ---------- event : Event """ assert event.dep_time_ut <= event.arr_time_ut heappush(self.heap, event)
[ "Add", "an", "event", "to", "the", "heap", "/", "priority", "queue" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/heap.py#L38-L47
[ "def", "add_event", "(", "self", ",", "event", ")", ":", "assert", "event", ".", "dep_time_ut", "<=", "event", ".", "arr_time_ut", "heappush", "(", "self", ".", "heap", ",", "event", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
EventHeap.add_walk_events_to_heap
Parameters ---------- transfer_distances: e : Event start_time_ut : int walk_speed : float uninfected_stops : list max_duration_ut : int
gtfspy/spreading/heap.py
def add_walk_events_to_heap(self, transfer_distances, e, start_time_ut, walk_speed, uninfected_stops, max_duration_ut): """ Parameters ---------- transfer_distances: e : Event start_time_ut : int walk_speed : float uninfected_stops : list max_duration_ut : int """ n = len(transfer_distances) dists_values = transfer_distances.values to_stop_I_index = np.nonzero(transfer_distances.columns == 'to_stop_I')[0][0] d_index = np.nonzero(transfer_distances.columns == 'd')[0][0] for i in range(n): transfer_to_stop_I = dists_values[i, to_stop_I_index] if transfer_to_stop_I in uninfected_stops: d = dists_values[i, d_index] transfer_arr_time = e.arr_time_ut + int(d/float(walk_speed)) if transfer_arr_time > start_time_ut+max_duration_ut: continue te = Event(transfer_arr_time, e.arr_time_ut, e.to_stop_I, transfer_to_stop_I, WALK) self.add_event(te)
def add_walk_events_to_heap(self, transfer_distances, e, start_time_ut, walk_speed, uninfected_stops, max_duration_ut): """ Parameters ---------- transfer_distances: e : Event start_time_ut : int walk_speed : float uninfected_stops : list max_duration_ut : int """ n = len(transfer_distances) dists_values = transfer_distances.values to_stop_I_index = np.nonzero(transfer_distances.columns == 'to_stop_I')[0][0] d_index = np.nonzero(transfer_distances.columns == 'd')[0][0] for i in range(n): transfer_to_stop_I = dists_values[i, to_stop_I_index] if transfer_to_stop_I in uninfected_stops: d = dists_values[i, d_index] transfer_arr_time = e.arr_time_ut + int(d/float(walk_speed)) if transfer_arr_time > start_time_ut+max_duration_ut: continue te = Event(transfer_arr_time, e.arr_time_ut, e.to_stop_I, transfer_to_stop_I, WALK) self.add_event(te)
[ "Parameters", "----------", "transfer_distances", ":", "e", ":", "Event", "start_time_ut", ":", "int", "walk_speed", ":", "float", "uninfected_stops", ":", "list", "max_duration_ut", ":", "int" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/spreading/heap.py#L58-L81
[ "def", "add_walk_events_to_heap", "(", "self", ",", "transfer_distances", ",", "e", ",", "start_time_ut", ",", "walk_speed", ",", "uninfected_stops", ",", "max_duration_ut", ")", ":", "n", "=", "len", "(", "transfer_distances", ")", "dists_values", "=", "transfer_distances", ".", "values", "to_stop_I_index", "=", "np", ".", "nonzero", "(", "transfer_distances", ".", "columns", "==", "'to_stop_I'", ")", "[", "0", "]", "[", "0", "]", "d_index", "=", "np", ".", "nonzero", "(", "transfer_distances", ".", "columns", "==", "'d'", ")", "[", "0", "]", "[", "0", "]", "for", "i", "in", "range", "(", "n", ")", ":", "transfer_to_stop_I", "=", "dists_values", "[", "i", ",", "to_stop_I_index", "]", "if", "transfer_to_stop_I", "in", "uninfected_stops", ":", "d", "=", "dists_values", "[", "i", ",", "d_index", "]", "transfer_arr_time", "=", "e", ".", "arr_time_ut", "+", "int", "(", "d", "/", "float", "(", "walk_speed", ")", ")", "if", "transfer_arr_time", ">", "start_time_ut", "+", "max_duration_ut", ":", "continue", "te", "=", "Event", "(", "transfer_arr_time", ",", "e", ".", "arr_time_ut", ",", "e", ".", "to_stop_I", ",", "transfer_to_stop_I", ",", "WALK", ")", "self", ".", "add_event", "(", "te", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
NodeProfileMultiObjective._check_dep_time_is_valid
A simple checker, that connections are coming in descending order of departure time and that no departure time has been "skipped". Parameters ---------- dep_time Returns ------- None
gtfspy/routing/node_profile_multiobjective.py
def _check_dep_time_is_valid(self, dep_time): """ A simple checker, that connections are coming in descending order of departure time and that no departure time has been "skipped". Parameters ---------- dep_time Returns ------- None """ assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time." dep_time_index = self.dep_times_to_index[dep_time] if self._min_dep_time < float('inf'): min_dep_index = self.dep_times_to_index[self._min_dep_time] assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \ "dep times should be ordered sequentially" else: assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)" self._min_dep_time = dep_time
def _check_dep_time_is_valid(self, dep_time): """ A simple checker, that connections are coming in descending order of departure time and that no departure time has been "skipped". Parameters ---------- dep_time Returns ------- None """ assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time." dep_time_index = self.dep_times_to_index[dep_time] if self._min_dep_time < float('inf'): min_dep_index = self.dep_times_to_index[self._min_dep_time] assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \ "dep times should be ordered sequentially" else: assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)" self._min_dep_time = dep_time
[ "A", "simple", "checker", "that", "connections", "are", "coming", "in", "descending", "order", "of", "departure", "time", "and", "that", "no", "departure", "time", "has", "been", "skipped", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L58-L79
[ "def", "_check_dep_time_is_valid", "(", "self", ",", "dep_time", ")", ":", "assert", "dep_time", "<=", "self", ".", "_min_dep_time", ",", "\"Labels should be entered in decreasing order of departure time.\"", "dep_time_index", "=", "self", ".", "dep_times_to_index", "[", "dep_time", "]", "if", "self", ".", "_min_dep_time", "<", "float", "(", "'inf'", ")", ":", "min_dep_index", "=", "self", ".", "dep_times_to_index", "[", "self", ".", "_min_dep_time", "]", "assert", "min_dep_index", "==", "dep_time_index", "or", "(", "min_dep_index", "==", "dep_time_index", "-", "1", ")", ",", "\"dep times should be ordered sequentially\"", "else", ":", "assert", "dep_time_index", "is", "0", ",", "\"first dep_time index should be zero (ensuring that all connections are properly handled)\"", "self", ".", "_min_dep_time", "=", "dep_time" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
NodeProfileMultiObjective.update
Update the profile with the new labels. Each new label should have the same departure_time. Parameters ---------- new_labels: list[LabelTime] Returns ------- added: bool whether new_pareto_tuple was added to the set of pareto-optimal tuples
gtfspy/routing/node_profile_multiobjective.py
def update(self, new_labels, departure_time_backup=None): """ Update the profile with the new labels. Each new label should have the same departure_time. Parameters ---------- new_labels: list[LabelTime] Returns ------- added: bool whether new_pareto_tuple was added to the set of pareto-optimal tuples """ if self._closed: raise RuntimeError("Profile is closed, no updates can be made") try: departure_time = next(iter(new_labels)).departure_time except StopIteration: departure_time = departure_time_backup self._check_dep_time_is_valid(departure_time) for new_label in new_labels: assert (new_label.departure_time == departure_time) dep_time_index = self.dep_times_to_index[departure_time] if dep_time_index > 0: # Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored. mod_prev_labels = [label.get_copy_with_specified_departure_time(departure_time) for label in self._label_bags[dep_time_index - 1]] else: mod_prev_labels = list() mod_prev_labels += self._label_bags[dep_time_index] walk_label = self._get_label_to_target(departure_time) if walk_label: new_labels = new_labels + [walk_label] new_frontier = merge_pareto_frontiers(new_labels, mod_prev_labels) self._label_bags[dep_time_index] = new_frontier return True
def update(self, new_labels, departure_time_backup=None): """ Update the profile with the new labels. Each new label should have the same departure_time. Parameters ---------- new_labels: list[LabelTime] Returns ------- added: bool whether new_pareto_tuple was added to the set of pareto-optimal tuples """ if self._closed: raise RuntimeError("Profile is closed, no updates can be made") try: departure_time = next(iter(new_labels)).departure_time except StopIteration: departure_time = departure_time_backup self._check_dep_time_is_valid(departure_time) for new_label in new_labels: assert (new_label.departure_time == departure_time) dep_time_index = self.dep_times_to_index[departure_time] if dep_time_index > 0: # Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored. mod_prev_labels = [label.get_copy_with_specified_departure_time(departure_time) for label in self._label_bags[dep_time_index - 1]] else: mod_prev_labels = list() mod_prev_labels += self._label_bags[dep_time_index] walk_label = self._get_label_to_target(departure_time) if walk_label: new_labels = new_labels + [walk_label] new_frontier = merge_pareto_frontiers(new_labels, mod_prev_labels) self._label_bags[dep_time_index] = new_frontier return True
[ "Update", "the", "profile", "with", "the", "new", "labels", ".", "Each", "new", "label", "should", "have", "the", "same", "departure_time", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L91-L131
[ "def", "update", "(", "self", ",", "new_labels", ",", "departure_time_backup", "=", "None", ")", ":", "if", "self", ".", "_closed", ":", "raise", "RuntimeError", "(", "\"Profile is closed, no updates can be made\"", ")", "try", ":", "departure_time", "=", "next", "(", "iter", "(", "new_labels", ")", ")", ".", "departure_time", "except", "StopIteration", ":", "departure_time", "=", "departure_time_backup", "self", ".", "_check_dep_time_is_valid", "(", "departure_time", ")", "for", "new_label", "in", "new_labels", ":", "assert", "(", "new_label", ".", "departure_time", "==", "departure_time", ")", "dep_time_index", "=", "self", ".", "dep_times_to_index", "[", "departure_time", "]", "if", "dep_time_index", ">", "0", ":", "# Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored.", "mod_prev_labels", "=", "[", "label", ".", "get_copy_with_specified_departure_time", "(", "departure_time", ")", "for", "label", "in", "self", ".", "_label_bags", "[", "dep_time_index", "-", "1", "]", "]", "else", ":", "mod_prev_labels", "=", "list", "(", ")", "mod_prev_labels", "+=", "self", ".", "_label_bags", "[", "dep_time_index", "]", "walk_label", "=", "self", ".", "_get_label_to_target", "(", "departure_time", ")", "if", "walk_label", ":", "new_labels", "=", "new_labels", "+", "[", "walk_label", "]", "new_frontier", "=", "merge_pareto_frontiers", "(", "new_labels", ",", "mod_prev_labels", ")", "self", ".", "_label_bags", "[", "dep_time_index", "]", "=", "new_frontier", "return", "True" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
NodeProfileMultiObjective.evaluate
Get the pareto_optimal set of Labels, given a departure time. Parameters ---------- dep_time : float, int time in unix seconds first_leg_can_be_walk : bool, optional whether to allow walking to target to be included into the profile (I.e. whether this function is called when scanning a pseudo-connection: "double" walks are not allowed.) connection_arrival_time: float, int, optional used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity) connection: connection object Returns ------- pareto_optimal_labels : set Set of Labels
gtfspy/routing/node_profile_multiobjective.py
def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None): """ Get the pareto_optimal set of Labels, given a departure time. Parameters ---------- dep_time : float, int time in unix seconds first_leg_can_be_walk : bool, optional whether to allow walking to target to be included into the profile (I.e. whether this function is called when scanning a pseudo-connection: "double" walks are not allowed.) connection_arrival_time: float, int, optional used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity) connection: connection object Returns ------- pareto_optimal_labels : set Set of Labels """ walk_labels = list() # walk label towards target if first_leg_can_be_walk and self._walk_to_target_duration != float('inf'): # add walk_label if connection_arrival_time is not None: walk_labels.append(self._get_label_to_target(connection_arrival_time)) else: walk_labels.append(self._get_label_to_target(dep_time)) # if dep time is larger than the largest dep time -> only walk labels are possible if dep_time in self.dep_times_to_index: assert (dep_time != float('inf')) index = self.dep_times_to_index[dep_time] labels = self._label_bags[index] pareto_optimal_labels = merge_pareto_frontiers(labels, walk_labels) else: pareto_optimal_labels = walk_labels if not first_leg_can_be_walk: pareto_optimal_labels = [label for label in pareto_optimal_labels if not label.first_leg_is_walk] return pareto_optimal_labels
def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None): """ Get the pareto_optimal set of Labels, given a departure time. Parameters ---------- dep_time : float, int time in unix seconds first_leg_can_be_walk : bool, optional whether to allow walking to target to be included into the profile (I.e. whether this function is called when scanning a pseudo-connection: "double" walks are not allowed.) connection_arrival_time: float, int, optional used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity) connection: connection object Returns ------- pareto_optimal_labels : set Set of Labels """ walk_labels = list() # walk label towards target if first_leg_can_be_walk and self._walk_to_target_duration != float('inf'): # add walk_label if connection_arrival_time is not None: walk_labels.append(self._get_label_to_target(connection_arrival_time)) else: walk_labels.append(self._get_label_to_target(dep_time)) # if dep time is larger than the largest dep time -> only walk labels are possible if dep_time in self.dep_times_to_index: assert (dep_time != float('inf')) index = self.dep_times_to_index[dep_time] labels = self._label_bags[index] pareto_optimal_labels = merge_pareto_frontiers(labels, walk_labels) else: pareto_optimal_labels = walk_labels if not first_leg_can_be_walk: pareto_optimal_labels = [label for label in pareto_optimal_labels if not label.first_leg_is_walk] return pareto_optimal_labels
[ "Get", "the", "pareto_optimal", "set", "of", "Labels", "given", "a", "departure", "time", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L133-L175
[ "def", "evaluate", "(", "self", ",", "dep_time", ",", "first_leg_can_be_walk", "=", "True", ",", "connection_arrival_time", "=", "None", ")", ":", "walk_labels", "=", "list", "(", ")", "# walk label towards target", "if", "first_leg_can_be_walk", "and", "self", ".", "_walk_to_target_duration", "!=", "float", "(", "'inf'", ")", ":", "# add walk_label", "if", "connection_arrival_time", "is", "not", "None", ":", "walk_labels", ".", "append", "(", "self", ".", "_get_label_to_target", "(", "connection_arrival_time", ")", ")", "else", ":", "walk_labels", ".", "append", "(", "self", ".", "_get_label_to_target", "(", "dep_time", ")", ")", "# if dep time is larger than the largest dep time -> only walk labels are possible", "if", "dep_time", "in", "self", ".", "dep_times_to_index", ":", "assert", "(", "dep_time", "!=", "float", "(", "'inf'", ")", ")", "index", "=", "self", ".", "dep_times_to_index", "[", "dep_time", "]", "labels", "=", "self", ".", "_label_bags", "[", "index", "]", "pareto_optimal_labels", "=", "merge_pareto_frontiers", "(", "labels", ",", "walk_labels", ")", "else", ":", "pareto_optimal_labels", "=", "walk_labels", "if", "not", "first_leg_can_be_walk", ":", "pareto_optimal_labels", "=", "[", "label", "for", "label", "in", "pareto_optimal_labels", "if", "not", "label", ".", "first_leg_is_walk", "]", "return", "pareto_optimal_labels" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
NodeProfileMultiObjective.finalize
Parameters ---------- neighbor_label_bags: list each list element is a list of labels corresponding to a neighboring node (note: only labels with first connection being a departure should be included) walk_durations: list departure_arrival_stop_pairs: list of tuples Returns ------- None
gtfspy/routing/node_profile_multiobjective.py
def finalize(self, neighbor_label_bags=None, walk_durations=None, departure_arrival_stop_pairs=None): """ Parameters ---------- neighbor_label_bags: list each list element is a list of labels corresponding to a neighboring node (note: only labels with first connection being a departure should be included) walk_durations: list departure_arrival_stop_pairs: list of tuples Returns ------- None """ assert (not self._finalized) if self._final_pareto_optimal_labels is None: self._compute_real_connection_labels() if neighbor_label_bags is not None: assert (len(walk_durations) == len(neighbor_label_bags)) self._compute_final_pareto_optimal_labels(neighbor_label_bags, walk_durations, departure_arrival_stop_pairs) else: self._final_pareto_optimal_labels = self._real_connection_labels self._finalized = True self._closed = True
def finalize(self, neighbor_label_bags=None, walk_durations=None, departure_arrival_stop_pairs=None): """ Parameters ---------- neighbor_label_bags: list each list element is a list of labels corresponding to a neighboring node (note: only labels with first connection being a departure should be included) walk_durations: list departure_arrival_stop_pairs: list of tuples Returns ------- None """ assert (not self._finalized) if self._final_pareto_optimal_labels is None: self._compute_real_connection_labels() if neighbor_label_bags is not None: assert (len(walk_durations) == len(neighbor_label_bags)) self._compute_final_pareto_optimal_labels(neighbor_label_bags, walk_durations, departure_arrival_stop_pairs) else: self._final_pareto_optimal_labels = self._real_connection_labels self._finalized = True self._closed = True
[ "Parameters", "----------", "neighbor_label_bags", ":", "list", "each", "list", "element", "is", "a", "list", "of", "labels", "corresponding", "to", "a", "neighboring", "node", "(", "note", ":", "only", "labels", "with", "first", "connection", "being", "a", "departure", "should", "be", "included", ")", "walk_durations", ":", "list", "departure_arrival_stop_pairs", ":", "list", "of", "tuples", "Returns", "-------", "None" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L234-L258
[ "def", "finalize", "(", "self", ",", "neighbor_label_bags", "=", "None", ",", "walk_durations", "=", "None", ",", "departure_arrival_stop_pairs", "=", "None", ")", ":", "assert", "(", "not", "self", ".", "_finalized", ")", "if", "self", ".", "_final_pareto_optimal_labels", "is", "None", ":", "self", ".", "_compute_real_connection_labels", "(", ")", "if", "neighbor_label_bags", "is", "not", "None", ":", "assert", "(", "len", "(", "walk_durations", ")", "==", "len", "(", "neighbor_label_bags", ")", ")", "self", ".", "_compute_final_pareto_optimal_labels", "(", "neighbor_label_bags", ",", "walk_durations", ",", "departure_arrival_stop_pairs", ")", "else", ":", "self", ".", "_final_pareto_optimal_labels", "=", "self", ".", "_real_connection_labels", "self", ".", "_finalized", "=", "True", "self", ".", "_closed", "=", "True" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TableLoader.exists_by_source
Does this GTFS contain this file? (file specified by the class)
gtfspy/import_loaders/table_loader.py
def exists_by_source(self): """Does this GTFS contain this file? (file specified by the class)""" exists_list = [] for source in self.gtfs_sources: if isinstance(source, dict): # source can now be either a dict or a zipfile if self.fname in source: if source[self.fname]: exists_list.append(True) continue # Handle zipfiles specially if "zipfile" in source: try: Z = zipfile.ZipFile(source['zipfile'], mode='r') Z.getinfo(os.path.join(source['zip_commonprefix'], self.fname)) exists_list.append(True) continue # File does not exist in the zip archive except KeyError: print(self.fname, ' missing in ', source) exists_list.append(False) continue # Normal filename elif isinstance(source, string_types): if os.path.exists(os.path.join(source, self.fname)): exists_list.append(True) continue exists_list.append(False) # the "file" was not found in any of the sources, return false return exists_list
def exists_by_source(self): """Does this GTFS contain this file? (file specified by the class)""" exists_list = [] for source in self.gtfs_sources: if isinstance(source, dict): # source can now be either a dict or a zipfile if self.fname in source: if source[self.fname]: exists_list.append(True) continue # Handle zipfiles specially if "zipfile" in source: try: Z = zipfile.ZipFile(source['zipfile'], mode='r') Z.getinfo(os.path.join(source['zip_commonprefix'], self.fname)) exists_list.append(True) continue # File does not exist in the zip archive except KeyError: print(self.fname, ' missing in ', source) exists_list.append(False) continue # Normal filename elif isinstance(source, string_types): if os.path.exists(os.path.join(source, self.fname)): exists_list.append(True) continue exists_list.append(False) # the "file" was not found in any of the sources, return false return exists_list
[ "Does", "this", "GTFS", "contain", "this", "file?", "(", "file", "specified", "by", "the", "class", ")" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L112-L141
[ "def", "exists_by_source", "(", "self", ")", ":", "exists_list", "=", "[", "]", "for", "source", "in", "self", ".", "gtfs_sources", ":", "if", "isinstance", "(", "source", ",", "dict", ")", ":", "# source can now be either a dict or a zipfile", "if", "self", ".", "fname", "in", "source", ":", "if", "source", "[", "self", ".", "fname", "]", ":", "exists_list", ".", "append", "(", "True", ")", "continue", "# Handle zipfiles specially", "if", "\"zipfile\"", "in", "source", ":", "try", ":", "Z", "=", "zipfile", ".", "ZipFile", "(", "source", "[", "'zipfile'", "]", ",", "mode", "=", "'r'", ")", "Z", ".", "getinfo", "(", "os", ".", "path", ".", "join", "(", "source", "[", "'zip_commonprefix'", "]", ",", "self", ".", "fname", ")", ")", "exists_list", ".", "append", "(", "True", ")", "continue", "# File does not exist in the zip archive", "except", "KeyError", ":", "print", "(", "self", ".", "fname", ",", "' missing in '", ",", "source", ")", "exists_list", ".", "append", "(", "False", ")", "continue", "# Normal filename", "elif", "isinstance", "(", "source", ",", "string_types", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "source", ",", "self", ".", "fname", ")", ")", ":", "exists_list", ".", "append", "(", "True", ")", "continue", "exists_list", ".", "append", "(", "False", ")", "# the \"file\" was not found in any of the sources, return false", "return", "exists_list" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TableLoader.create_table
Make table definitions
gtfspy/import_loaders/table_loader.py
def create_table(self, conn): """Make table definitions""" # Make cursor cur = conn.cursor() # Drop table if it already exists, to be recreated. This # could in the future abort if table already exists, and not # recreate it from scratch. #cur.execute('''DROP TABLE IF EXISTS %s'''%self.table) #conn.commit() if self.tabledef is None: return if not self.tabledef.startswith('CREATE'): # "normal" table creation. cur.execute('CREATE TABLE IF NOT EXISTS %s %s' % (self.table, self.tabledef) ) else: # When tabledef contains the full CREATE statement (for # virtual tables). cur.execute(self.tabledef) conn.commit()
def create_table(self, conn): """Make table definitions""" # Make cursor cur = conn.cursor() # Drop table if it already exists, to be recreated. This # could in the future abort if table already exists, and not # recreate it from scratch. #cur.execute('''DROP TABLE IF EXISTS %s'''%self.table) #conn.commit() if self.tabledef is None: return if not self.tabledef.startswith('CREATE'): # "normal" table creation. cur.execute('CREATE TABLE IF NOT EXISTS %s %s' % (self.table, self.tabledef) ) else: # When tabledef contains the full CREATE statement (for # virtual tables). cur.execute(self.tabledef) conn.commit()
[ "Make", "table", "definitions" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L239-L259
[ "def", "create_table", "(", "self", ",", "conn", ")", ":", "# Make cursor", "cur", "=", "conn", ".", "cursor", "(", ")", "# Drop table if it already exists, to be recreated. This", "# could in the future abort if table already exists, and not", "# recreate it from scratch.", "#cur.execute('''DROP TABLE IF EXISTS %s'''%self.table)", "#conn.commit()", "if", "self", ".", "tabledef", "is", "None", ":", "return", "if", "not", "self", ".", "tabledef", ".", "startswith", "(", "'CREATE'", ")", ":", "# \"normal\" table creation.", "cur", ".", "execute", "(", "'CREATE TABLE IF NOT EXISTS %s %s'", "%", "(", "self", ".", "table", ",", "self", ".", "tabledef", ")", ")", "else", ":", "# When tabledef contains the full CREATE statement (for", "# virtual tables).", "cur", ".", "execute", "(", "self", ".", "tabledef", ")", "conn", ".", "commit", "(", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TableLoader.insert_data
Load data from GTFS file into database
gtfspy/import_loaders/table_loader.py
def insert_data(self, conn): """Load data from GTFS file into database""" cur = conn.cursor() # This is a bit hackish. It is annoying to have to write the # INSERT statement yourself and keep it up to date with the # table rows. This gets the first row, figures out the field # names from that, and then makes an INSERT statement like # "INSERT INTO table (col1, col2, ...) VALUES (:col1, :col2, # ...)". The ":col1" is sqlite syntax for named value. csv_reader_generators, prefixes = self._get_csv_reader_generators() for csv_reader, prefix in zip(csv_reader_generators, prefixes): try: row = next(iter(self.gen_rows([csv_reader], [prefix]))) fields = row.keys() except StopIteration: # The file has *only* a header and no data. # next(iter()) yields StopIteration and we can't # proceed. Since there is nothing to import, just continue the loop print("Not importing %s into %s for %s" % (self.fname, self.table, prefix)) continue stmt = '''INSERT INTO %s (%s) VALUES (%s)''' % ( self.table, (', '.join([x for x in fields if x[0] != '_'] + self.extra_keys)), (', '.join([":" + x for x in fields if x[0] != '_'] + self.extra_values)) ) # This does the actual insertions. Passed the INSERT # statement and then an iterator over dictionaries. Each # dictionary is inserted. if self.print_progress: print('Importing %s into %s for %s' % (self.fname, self.table, prefix)) # the first row was consumed by fetching the fields # (this could be optimized) from itertools import chain rows = chain([row], self.gen_rows([csv_reader], [prefix])) cur.executemany(stmt, rows) conn.commit()
def insert_data(self, conn): """Load data from GTFS file into database""" cur = conn.cursor() # This is a bit hackish. It is annoying to have to write the # INSERT statement yourself and keep it up to date with the # table rows. This gets the first row, figures out the field # names from that, and then makes an INSERT statement like # "INSERT INTO table (col1, col2, ...) VALUES (:col1, :col2, # ...)". The ":col1" is sqlite syntax for named value. csv_reader_generators, prefixes = self._get_csv_reader_generators() for csv_reader, prefix in zip(csv_reader_generators, prefixes): try: row = next(iter(self.gen_rows([csv_reader], [prefix]))) fields = row.keys() except StopIteration: # The file has *only* a header and no data. # next(iter()) yields StopIteration and we can't # proceed. Since there is nothing to import, just continue the loop print("Not importing %s into %s for %s" % (self.fname, self.table, prefix)) continue stmt = '''INSERT INTO %s (%s) VALUES (%s)''' % ( self.table, (', '.join([x for x in fields if x[0] != '_'] + self.extra_keys)), (', '.join([":" + x for x in fields if x[0] != '_'] + self.extra_values)) ) # This does the actual insertions. Passed the INSERT # statement and then an iterator over dictionaries. Each # dictionary is inserted. if self.print_progress: print('Importing %s into %s for %s' % (self.fname, self.table, prefix)) # the first row was consumed by fetching the fields # (this could be optimized) from itertools import chain rows = chain([row], self.gen_rows([csv_reader], [prefix])) cur.executemany(stmt, rows) conn.commit()
[ "Load", "data", "from", "GTFS", "file", "into", "database" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L261-L298
[ "def", "insert_data", "(", "self", ",", "conn", ")", ":", "cur", "=", "conn", ".", "cursor", "(", ")", "# This is a bit hackish. It is annoying to have to write the", "# INSERT statement yourself and keep it up to date with the", "# table rows. This gets the first row, figures out the field", "# names from that, and then makes an INSERT statement like", "# \"INSERT INTO table (col1, col2, ...) VALUES (:col1, :col2,", "# ...)\". The \":col1\" is sqlite syntax for named value.", "csv_reader_generators", ",", "prefixes", "=", "self", ".", "_get_csv_reader_generators", "(", ")", "for", "csv_reader", ",", "prefix", "in", "zip", "(", "csv_reader_generators", ",", "prefixes", ")", ":", "try", ":", "row", "=", "next", "(", "iter", "(", "self", ".", "gen_rows", "(", "[", "csv_reader", "]", ",", "[", "prefix", "]", ")", ")", ")", "fields", "=", "row", ".", "keys", "(", ")", "except", "StopIteration", ":", "# The file has *only* a header and no data.", "# next(iter()) yields StopIteration and we can't", "# proceed. Since there is nothing to import, just continue the loop", "print", "(", "\"Not importing %s into %s for %s\"", "%", "(", "self", ".", "fname", ",", "self", ".", "table", ",", "prefix", ")", ")", "continue", "stmt", "=", "'''INSERT INTO %s (%s) VALUES (%s)'''", "%", "(", "self", ".", "table", ",", "(", "', '", ".", "join", "(", "[", "x", "for", "x", "in", "fields", "if", "x", "[", "0", "]", "!=", "'_'", "]", "+", "self", ".", "extra_keys", ")", ")", ",", "(", "', '", ".", "join", "(", "[", "\":\"", "+", "x", "for", "x", "in", "fields", "if", "x", "[", "0", "]", "!=", "'_'", "]", "+", "self", ".", "extra_values", ")", ")", ")", "# This does the actual insertions. Passed the INSERT", "# statement and then an iterator over dictionaries. Each", "# dictionary is inserted.", "if", "self", ".", "print_progress", ":", "print", "(", "'Importing %s into %s for %s'", "%", "(", "self", ".", "fname", ",", "self", ".", "table", ",", "prefix", ")", ")", "# the first row was consumed by fetching the fields", "# (this could be optimized)", "from", "itertools", "import", "chain", "rows", "=", "chain", "(", "[", "row", "]", ",", "self", ".", "gen_rows", "(", "[", "csv_reader", "]", ",", "[", "prefix", "]", ")", ")", "cur", ".", "executemany", "(", "stmt", ",", "rows", ")", "conn", ".", "commit", "(", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TableLoader.import_
Do the actual import. Copy data and store in connection object. This function: - Creates the tables - Imports data (using self.gen_rows) - Run any post_import hooks. - Creates any indexs - Does *not* run self.make_views - those must be done after all tables are loaded.
gtfspy/import_loaders/table_loader.py
def import_(self, conn): """Do the actual import. Copy data and store in connection object. This function: - Creates the tables - Imports data (using self.gen_rows) - Run any post_import hooks. - Creates any indexs - Does *not* run self.make_views - those must be done after all tables are loaded. """ if self.print_progress: print('Beginning', self.__class__.__name__) # what is this mystical self._conn ? self._conn = conn self.create_table(conn) # This does insertions if self.mode in ('all', 'import') and self.fname and self.exists() and self.table not in ignore_tables: self.insert_data(conn) # This makes indexes in the DB. if self.mode in ('all', 'index') and hasattr(self, 'index'): self.create_index(conn) # Any post-processing to be done after the full import. if self.mode in ('all', 'import') and hasattr(self, 'post_import'): self.run_post_import(conn) # Commit it all conn.commit()
def import_(self, conn): """Do the actual import. Copy data and store in connection object. This function: - Creates the tables - Imports data (using self.gen_rows) - Run any post_import hooks. - Creates any indexs - Does *not* run self.make_views - those must be done after all tables are loaded. """ if self.print_progress: print('Beginning', self.__class__.__name__) # what is this mystical self._conn ? self._conn = conn self.create_table(conn) # This does insertions if self.mode in ('all', 'import') and self.fname and self.exists() and self.table not in ignore_tables: self.insert_data(conn) # This makes indexes in the DB. if self.mode in ('all', 'index') and hasattr(self, 'index'): self.create_index(conn) # Any post-processing to be done after the full import. if self.mode in ('all', 'import') and hasattr(self, 'post_import'): self.run_post_import(conn) # Commit it all conn.commit()
[ "Do", "the", "actual", "import", ".", "Copy", "data", "and", "store", "in", "connection", "object", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L338-L365
[ "def", "import_", "(", "self", ",", "conn", ")", ":", "if", "self", ".", "print_progress", ":", "print", "(", "'Beginning'", ",", "self", ".", "__class__", ".", "__name__", ")", "# what is this mystical self._conn ?", "self", ".", "_conn", "=", "conn", "self", ".", "create_table", "(", "conn", ")", "# This does insertions", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'import'", ")", "and", "self", ".", "fname", "and", "self", ".", "exists", "(", ")", "and", "self", ".", "table", "not", "in", "ignore_tables", ":", "self", ".", "insert_data", "(", "conn", ")", "# This makes indexes in the DB.", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'index'", ")", "and", "hasattr", "(", "self", ",", "'index'", ")", ":", "self", ".", "create_index", "(", "conn", ")", "# Any post-processing to be done after the full import.", "if", "self", ".", "mode", "in", "(", "'all'", ",", "'import'", ")", "and", "hasattr", "(", "self", ",", "'post_import'", ")", ":", "self", ".", "run_post_import", "(", "conn", ")", "# Commit it all", "conn", ".", "commit", "(", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TableLoader.copy
Copy data from one table to another while filtering data at the same time Parameters ---------- conn: sqlite3 DB connection. It must have a second database attached as "other". **where : keyword arguments specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses)
gtfspy/import_loaders/table_loader.py
def copy(cls, conn, **where): """Copy data from one table to another while filtering data at the same time Parameters ---------- conn: sqlite3 DB connection. It must have a second database attached as "other". **where : keyword arguments specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses) """ cur = conn.cursor() if where and cls.copy_where: copy_where = cls.copy_where.format(**where) # print(copy_where) else: copy_where = '' cur.execute('INSERT INTO %s ' 'SELECT * FROM source.%s %s' % (cls.table, cls.table, copy_where))
def copy(cls, conn, **where): """Copy data from one table to another while filtering data at the same time Parameters ---------- conn: sqlite3 DB connection. It must have a second database attached as "other". **where : keyword arguments specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses) """ cur = conn.cursor() if where and cls.copy_where: copy_where = cls.copy_where.format(**where) # print(copy_where) else: copy_where = '' cur.execute('INSERT INTO %s ' 'SELECT * FROM source.%s %s' % (cls.table, cls.table, copy_where))
[ "Copy", "data", "from", "one", "table", "to", "another", "while", "filtering", "data", "at", "the", "same", "time" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L375-L392
[ "def", "copy", "(", "cls", ",", "conn", ",", "*", "*", "where", ")", ":", "cur", "=", "conn", ".", "cursor", "(", ")", "if", "where", "and", "cls", ".", "copy_where", ":", "copy_where", "=", "cls", ".", "copy_where", ".", "format", "(", "*", "*", "where", ")", "# print(copy_where)", "else", ":", "copy_where", "=", "''", "cur", ".", "execute", "(", "'INSERT INTO %s '", "'SELECT * FROM source.%s %s'", "%", "(", "cls", ".", "table", ",", "cls", ".", "table", ",", "copy_where", ")", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
JourneyDataAnalyzer.get_journey_legs_to_target
Returns a dataframe of aggregated sections from source nodes to target. The returned sections are either transfer point to transfer point or stop to stop. In a before after setting, the results can be filtered based on values in a difference db. :param target: :param fastest_path: :param min_boardings: :param all_leg_sections: :param ignore_walk: :param diff_threshold: :param diff_path: :return:
gtfspy/routing/journey_data_analyzer.py
def get_journey_legs_to_target(self, target, fastest_path=True, min_boardings=False, all_leg_sections=True, ignore_walk=False, diff_threshold=None, diff_path=None): """ Returns a dataframe of aggregated sections from source nodes to target. The returned sections are either transfer point to transfer point or stop to stop. In a before after setting, the results can be filtered based on values in a difference db. :param target: :param fastest_path: :param min_boardings: :param all_leg_sections: :param ignore_walk: :param diff_threshold: :param diff_path: :return: """ assert not (fastest_path and min_boardings) if min_boardings: raise NotImplementedError if all_leg_sections and diff_threshold: raise NotImplementedError added_constraints = "" add_diff = "" if fastest_path: added_constraints += " AND journeys.pre_journey_wait_fp>=0" if ignore_walk: added_constraints += " AND legs.trip_I >= 0" if diff_path and diff_threshold: self.conn = attach_database(self.conn, diff_path, name="diff") add_diff = ", diff.diff_temporal_distance" added_constraints += " AND abs(diff_temporal_distance.diff_mean) >= %s " \ "AND diff_temporal_distance.from_stop_I = journeys.from_stop_I " \ "AND diff_temporal_distance.to_stop_I = journeys.to_stop_I" % (diff_threshold,) if all_leg_sections: df = self._get_journey_legs_to_target_with_all_sections(target, added_constraints) else: query = """SELECT from_stop_I, to_stop_I, coalesce(type, -1) AS type, count(*) AS n_trips FROM (SELECT legs.* FROM legs, journeys %s WHERE journeys.journey_id = legs.journey_id AND journeys.to_stop_I = %s %s) q1 LEFT JOIN (SELECT * FROM other.trips, other.routes WHERE trips.route_I = routes.route_I) q2 ON q1.trip_I = q2.trip_I GROUP BY from_stop_I, to_stop_I, type""" % (add_diff, str(target), added_constraints) df = read_sql_query(query, self.conn) return df
def get_journey_legs_to_target(self, target, fastest_path=True, min_boardings=False, all_leg_sections=True, ignore_walk=False, diff_threshold=None, diff_path=None): """ Returns a dataframe of aggregated sections from source nodes to target. The returned sections are either transfer point to transfer point or stop to stop. In a before after setting, the results can be filtered based on values in a difference db. :param target: :param fastest_path: :param min_boardings: :param all_leg_sections: :param ignore_walk: :param diff_threshold: :param diff_path: :return: """ assert not (fastest_path and min_boardings) if min_boardings: raise NotImplementedError if all_leg_sections and diff_threshold: raise NotImplementedError added_constraints = "" add_diff = "" if fastest_path: added_constraints += " AND journeys.pre_journey_wait_fp>=0" if ignore_walk: added_constraints += " AND legs.trip_I >= 0" if diff_path and diff_threshold: self.conn = attach_database(self.conn, diff_path, name="diff") add_diff = ", diff.diff_temporal_distance" added_constraints += " AND abs(diff_temporal_distance.diff_mean) >= %s " \ "AND diff_temporal_distance.from_stop_I = journeys.from_stop_I " \ "AND diff_temporal_distance.to_stop_I = journeys.to_stop_I" % (diff_threshold,) if all_leg_sections: df = self._get_journey_legs_to_target_with_all_sections(target, added_constraints) else: query = """SELECT from_stop_I, to_stop_I, coalesce(type, -1) AS type, count(*) AS n_trips FROM (SELECT legs.* FROM legs, journeys %s WHERE journeys.journey_id = legs.journey_id AND journeys.to_stop_I = %s %s) q1 LEFT JOIN (SELECT * FROM other.trips, other.routes WHERE trips.route_I = routes.route_I) q2 ON q1.trip_I = q2.trip_I GROUP BY from_stop_I, to_stop_I, type""" % (add_diff, str(target), added_constraints) df = read_sql_query(query, self.conn) return df
[ "Returns", "a", "dataframe", "of", "aggregated", "sections", "from", "source", "nodes", "to", "target", ".", "The", "returned", "sections", "are", "either", "transfer", "point", "to", "transfer", "point", "or", "stop", "to", "stop", ".", "In", "a", "before", "after", "setting", "the", "results", "can", "be", "filtered", "based", "on", "values", "in", "a", "difference", "db", ".", ":", "param", "target", ":", ":", "param", "fastest_path", ":", ":", "param", "min_boardings", ":", ":", "param", "all_leg_sections", ":", ":", "param", "ignore_walk", ":", ":", "param", "diff_threshold", ":", ":", "param", "diff_path", ":", ":", "return", ":" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data_analyzer.py#L26-L73
[ "def", "get_journey_legs_to_target", "(", "self", ",", "target", ",", "fastest_path", "=", "True", ",", "min_boardings", "=", "False", ",", "all_leg_sections", "=", "True", ",", "ignore_walk", "=", "False", ",", "diff_threshold", "=", "None", ",", "diff_path", "=", "None", ")", ":", "assert", "not", "(", "fastest_path", "and", "min_boardings", ")", "if", "min_boardings", ":", "raise", "NotImplementedError", "if", "all_leg_sections", "and", "diff_threshold", ":", "raise", "NotImplementedError", "added_constraints", "=", "\"\"", "add_diff", "=", "\"\"", "if", "fastest_path", ":", "added_constraints", "+=", "\" AND journeys.pre_journey_wait_fp>=0\"", "if", "ignore_walk", ":", "added_constraints", "+=", "\" AND legs.trip_I >= 0\"", "if", "diff_path", "and", "diff_threshold", ":", "self", ".", "conn", "=", "attach_database", "(", "self", ".", "conn", ",", "diff_path", ",", "name", "=", "\"diff\"", ")", "add_diff", "=", "\", diff.diff_temporal_distance\"", "added_constraints", "+=", "\" AND abs(diff_temporal_distance.diff_mean) >= %s \"", "\"AND diff_temporal_distance.from_stop_I = journeys.from_stop_I \"", "\"AND diff_temporal_distance.to_stop_I = journeys.to_stop_I\"", "%", "(", "diff_threshold", ",", ")", "if", "all_leg_sections", ":", "df", "=", "self", ".", "_get_journey_legs_to_target_with_all_sections", "(", "target", ",", "added_constraints", ")", "else", ":", "query", "=", "\"\"\"SELECT from_stop_I, to_stop_I, coalesce(type, -1) AS type,\n count(*) AS n_trips\n FROM\n (SELECT legs.* FROM legs, journeys %s\n WHERE journeys.journey_id = legs.journey_id AND journeys.to_stop_I = %s %s) q1\n LEFT JOIN (SELECT * FROM other.trips, other.routes WHERE trips.route_I = routes.route_I) q2\n ON q1.trip_I = q2.trip_I\n GROUP BY from_stop_I, to_stop_I, type\"\"\"", "%", "(", "add_diff", ",", "str", "(", "target", ")", ",", "added_constraints", ")", "df", "=", "read_sql_query", "(", "query", ",", "self", ".", "conn", ")", "return", "df" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
JourneyDataAnalyzer.get_upstream_stops_ratio
Selects the stops for which the ratio or higher proportion of trips to the target passes trough a set of trough stops :param target: target of trips :param trough_stops: stops where the selected trips are passing trough :param ratio: threshold for inclusion :return:
gtfspy/routing/journey_data_analyzer.py
def get_upstream_stops_ratio(self, target, trough_stops, ratio): """ Selects the stops for which the ratio or higher proportion of trips to the target passes trough a set of trough stops :param target: target of trips :param trough_stops: stops where the selected trips are passing trough :param ratio: threshold for inclusion :return: """ if isinstance(trough_stops, list): trough_stops = ",".join(trough_stops) query = """SELECT stops.* FROM other.stops, (SELECT q2.from_stop_I AS stop_I FROM (SELECT journeys.from_stop_I, count(*) AS n_total FROM journeys WHERE journeys.to_stop_I = {target} GROUP BY from_stop_I) q1, (SELECT journeys.from_stop_I, count(*) AS n_trough FROM journeys, legs WHERE journeys.journey_id=legs.journey_id AND legs.from_stop_I IN ({trough_stops}) AND journeys.to_stop_I = {target} GROUP BY journeys.from_stop_I) q2 WHERE q1.from_stop_I = q2.from_stop_I AND n_trough/(n_total*1.0) >= {ratio}) q1 WHERE stops.stop_I = q1.stop_I""".format(target=target, trough_stops=trough_stops, ratio=ratio) df = read_sql_query(query, self.conn) return df
def get_upstream_stops_ratio(self, target, trough_stops, ratio): """ Selects the stops for which the ratio or higher proportion of trips to the target passes trough a set of trough stops :param target: target of trips :param trough_stops: stops where the selected trips are passing trough :param ratio: threshold for inclusion :return: """ if isinstance(trough_stops, list): trough_stops = ",".join(trough_stops) query = """SELECT stops.* FROM other.stops, (SELECT q2.from_stop_I AS stop_I FROM (SELECT journeys.from_stop_I, count(*) AS n_total FROM journeys WHERE journeys.to_stop_I = {target} GROUP BY from_stop_I) q1, (SELECT journeys.from_stop_I, count(*) AS n_trough FROM journeys, legs WHERE journeys.journey_id=legs.journey_id AND legs.from_stop_I IN ({trough_stops}) AND journeys.to_stop_I = {target} GROUP BY journeys.from_stop_I) q2 WHERE q1.from_stop_I = q2.from_stop_I AND n_trough/(n_total*1.0) >= {ratio}) q1 WHERE stops.stop_I = q1.stop_I""".format(target=target, trough_stops=trough_stops, ratio=ratio) df = read_sql_query(query, self.conn) return df
[ "Selects", "the", "stops", "for", "which", "the", "ratio", "or", "higher", "proportion", "of", "trips", "to", "the", "target", "passes", "trough", "a", "set", "of", "trough", "stops", ":", "param", "target", ":", "target", "of", "trips", ":", "param", "trough_stops", ":", "stops", "where", "the", "selected", "trips", "are", "passing", "trough", ":", "param", "ratio", ":", "threshold", "for", "inclusion", ":", "return", ":" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/journey_data_analyzer.py#L222-L243
[ "def", "get_upstream_stops_ratio", "(", "self", ",", "target", ",", "trough_stops", ",", "ratio", ")", ":", "if", "isinstance", "(", "trough_stops", ",", "list", ")", ":", "trough_stops", "=", "\",\"", ".", "join", "(", "trough_stops", ")", "query", "=", "\"\"\"SELECT stops.* FROM other.stops, \n (SELECT q2.from_stop_I AS stop_I FROM \n (SELECT journeys.from_stop_I, count(*) AS n_total FROM journeys\n WHERE journeys.to_stop_I = {target} \n GROUP BY from_stop_I) q1,\n (SELECT journeys.from_stop_I, count(*) AS n_trough FROM journeys, legs \n WHERE journeys.journey_id=legs.journey_id AND legs.from_stop_I IN ({trough_stops}) AND journeys.to_stop_I = {target}\n GROUP BY journeys.from_stop_I) q2\n WHERE q1.from_stop_I = q2.from_stop_I AND n_trough/(n_total*1.0) >= {ratio}) q1\n WHERE stops.stop_I = q1.stop_I\"\"\"", ".", "format", "(", "target", "=", "target", ",", "trough_stops", "=", "trough_stops", ",", "ratio", "=", "ratio", ")", "df", "=", "read_sql_query", "(", "query", ",", "self", ".", "conn", ")", "return", "df" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
get_spatial_bounds
Parameters ---------- gtfs Returns ------- min_lon: float max_lon: float min_lat: float max_lat: float
gtfspy/stats.py
def get_spatial_bounds(gtfs, as_dict=False): """ Parameters ---------- gtfs Returns ------- min_lon: float max_lon: float min_lat: float max_lat: float """ stats = get_stats(gtfs) lon_min = stats['lon_min'] lon_max = stats['lon_max'] lat_min = stats['lat_min'] lat_max = stats['lat_max'] if as_dict: return {'lon_min': lon_min, 'lon_max': lon_max, 'lat_min': lat_min, 'lat_max': lat_max} else: return lon_min, lon_max, lat_min, lat_max
def get_spatial_bounds(gtfs, as_dict=False): """ Parameters ---------- gtfs Returns ------- min_lon: float max_lon: float min_lat: float max_lat: float """ stats = get_stats(gtfs) lon_min = stats['lon_min'] lon_max = stats['lon_max'] lat_min = stats['lat_min'] lat_max = stats['lat_max'] if as_dict: return {'lon_min': lon_min, 'lon_max': lon_max, 'lat_min': lat_min, 'lat_max': lat_max} else: return lon_min, lon_max, lat_min, lat_max
[ "Parameters", "----------", "gtfs" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L14-L35
[ "def", "get_spatial_bounds", "(", "gtfs", ",", "as_dict", "=", "False", ")", ":", "stats", "=", "get_stats", "(", "gtfs", ")", "lon_min", "=", "stats", "[", "'lon_min'", "]", "lon_max", "=", "stats", "[", "'lon_max'", "]", "lat_min", "=", "stats", "[", "'lat_min'", "]", "lat_max", "=", "stats", "[", "'lat_max'", "]", "if", "as_dict", ":", "return", "{", "'lon_min'", ":", "lon_min", ",", "'lon_max'", ":", "lon_max", ",", "'lat_min'", ":", "lat_min", ",", "'lat_max'", ":", "lat_max", "}", "else", ":", "return", "lon_min", ",", "lon_max", ",", "lat_min", ",", "lat_max" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
get_median_lat_lon_of_stops
Get median latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- median_lat : float median_lon : float
gtfspy/stats.py
def get_median_lat_lon_of_stops(gtfs): """ Get median latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- median_lat : float median_lon : float """ stops = gtfs.get_table("stops") median_lat = numpy.percentile(stops['lat'].values, 50) median_lon = numpy.percentile(stops['lon'].values, 50) return median_lat, median_lon
def get_median_lat_lon_of_stops(gtfs): """ Get median latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- median_lat : float median_lon : float """ stops = gtfs.get_table("stops") median_lat = numpy.percentile(stops['lat'].values, 50) median_lon = numpy.percentile(stops['lon'].values, 50) return median_lat, median_lon
[ "Get", "median", "latitude", "AND", "longitude", "of", "stops" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L48-L64
[ "def", "get_median_lat_lon_of_stops", "(", "gtfs", ")", ":", "stops", "=", "gtfs", ".", "get_table", "(", "\"stops\"", ")", "median_lat", "=", "numpy", ".", "percentile", "(", "stops", "[", "'lat'", "]", ".", "values", ",", "50", ")", "median_lon", "=", "numpy", ".", "percentile", "(", "stops", "[", "'lon'", "]", ".", "values", ",", "50", ")", "return", "median_lat", ",", "median_lon" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
get_centroid_of_stops
Get mean latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- mean_lat : float mean_lon : float
gtfspy/stats.py
def get_centroid_of_stops(gtfs): """ Get mean latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- mean_lat : float mean_lon : float """ stops = gtfs.get_table("stops") mean_lat = numpy.mean(stops['lat'].values) mean_lon = numpy.mean(stops['lon'].values) return mean_lat, mean_lon
def get_centroid_of_stops(gtfs): """ Get mean latitude AND longitude of stops Parameters ---------- gtfs: GTFS Returns ------- mean_lat : float mean_lon : float """ stops = gtfs.get_table("stops") mean_lat = numpy.mean(stops['lat'].values) mean_lon = numpy.mean(stops['lon'].values) return mean_lat, mean_lon
[ "Get", "mean", "latitude", "AND", "longitude", "of", "stops" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L66-L82
[ "def", "get_centroid_of_stops", "(", "gtfs", ")", ":", "stops", "=", "gtfs", ".", "get_table", "(", "\"stops\"", ")", "mean_lat", "=", "numpy", ".", "mean", "(", "stops", "[", "'lat'", "]", ".", "values", ")", "mean_lon", "=", "numpy", ".", "mean", "(", "stops", "[", "'lon'", "]", ".", "values", ")", "return", "mean_lat", ",", "mean_lon" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
write_stats_as_csv
Writes data from get_stats to csv file Parameters ---------- gtfs: GTFS path_to_csv: str filepath to the csv file to be generated re_write: insted of appending, create a new one.
gtfspy/stats.py
def write_stats_as_csv(gtfs, path_to_csv, re_write=False): """ Writes data from get_stats to csv file Parameters ---------- gtfs: GTFS path_to_csv: str filepath to the csv file to be generated re_write: insted of appending, create a new one. """ stats_dict = get_stats(gtfs) # check if file exist if re_write: os.remove(path_to_csv) #if not os.path.isfile(path_to_csv): # is_new = True #else: # is_new = False is_new = True mode = 'r' if os.path.exists(path_to_csv) else 'w+' with open(path_to_csv, mode) as csvfile: for line in csvfile: if line: is_new = False else: is_new = True with open(path_to_csv, 'a') as csvfile: if (sys.version_info > (3, 0)): delimiter = u"," else: delimiter = b"," statswriter = csv.writer(csvfile, delimiter=delimiter) # write column names if if is_new: statswriter.writerow([key for key in sorted(stats_dict.keys())]) row_to_write = [] # write stats row sorted by column name for key in sorted(stats_dict.keys()): row_to_write.append(stats_dict[key]) statswriter.writerow(row_to_write)
def write_stats_as_csv(gtfs, path_to_csv, re_write=False): """ Writes data from get_stats to csv file Parameters ---------- gtfs: GTFS path_to_csv: str filepath to the csv file to be generated re_write: insted of appending, create a new one. """ stats_dict = get_stats(gtfs) # check if file exist if re_write: os.remove(path_to_csv) #if not os.path.isfile(path_to_csv): # is_new = True #else: # is_new = False is_new = True mode = 'r' if os.path.exists(path_to_csv) else 'w+' with open(path_to_csv, mode) as csvfile: for line in csvfile: if line: is_new = False else: is_new = True with open(path_to_csv, 'a') as csvfile: if (sys.version_info > (3, 0)): delimiter = u"," else: delimiter = b"," statswriter = csv.writer(csvfile, delimiter=delimiter) # write column names if if is_new: statswriter.writerow([key for key in sorted(stats_dict.keys())]) row_to_write = [] # write stats row sorted by column name for key in sorted(stats_dict.keys()): row_to_write.append(stats_dict[key]) statswriter.writerow(row_to_write)
[ "Writes", "data", "from", "get_stats", "to", "csv", "file" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L85-L130
[ "def", "write_stats_as_csv", "(", "gtfs", ",", "path_to_csv", ",", "re_write", "=", "False", ")", ":", "stats_dict", "=", "get_stats", "(", "gtfs", ")", "# check if file exist", "if", "re_write", ":", "os", ".", "remove", "(", "path_to_csv", ")", "#if not os.path.isfile(path_to_csv):", "# is_new = True", "#else:", "# is_new = False", "is_new", "=", "True", "mode", "=", "'r'", "if", "os", ".", "path", ".", "exists", "(", "path_to_csv", ")", "else", "'w+'", "with", "open", "(", "path_to_csv", ",", "mode", ")", "as", "csvfile", ":", "for", "line", "in", "csvfile", ":", "if", "line", ":", "is_new", "=", "False", "else", ":", "is_new", "=", "True", "with", "open", "(", "path_to_csv", ",", "'a'", ")", "as", "csvfile", ":", "if", "(", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ")", ":", "delimiter", "=", "u\",\"", "else", ":", "delimiter", "=", "b\",\"", "statswriter", "=", "csv", ".", "writer", "(", "csvfile", ",", "delimiter", "=", "delimiter", ")", "# write column names if", "if", "is_new", ":", "statswriter", ".", "writerow", "(", "[", "key", "for", "key", "in", "sorted", "(", "stats_dict", ".", "keys", "(", ")", ")", "]", ")", "row_to_write", "=", "[", "]", "# write stats row sorted by column name", "for", "key", "in", "sorted", "(", "stats_dict", ".", "keys", "(", ")", ")", ":", "row_to_write", ".", "append", "(", "stats_dict", "[", "key", "]", ")", "statswriter", ".", "writerow", "(", "row_to_write", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
get_stats
Get basic statistics of the GTFS data. Parameters ---------- gtfs: GTFS Returns ------- stats: dict A dictionary of various statistics. Keys should be strings, values should be inputtable to a database (int, date, str, ...) (but not a list)
gtfspy/stats.py
def get_stats(gtfs): """ Get basic statistics of the GTFS data. Parameters ---------- gtfs: GTFS Returns ------- stats: dict A dictionary of various statistics. Keys should be strings, values should be inputtable to a database (int, date, str, ...) (but not a list) """ stats = {} # Basic table counts for table in ['agencies', 'routes', 'stops', 'stop_times', 'trips', 'calendar', 'shapes', 'calendar_dates', 'days', 'stop_distances', 'frequencies', 'feed_info', 'transfers']: stats["n_" + table] = gtfs.get_row_count(table) # Agency names agencies = gtfs.get_table("agencies") stats["agencies"] = "_".join(agencies['name'].values) # Stop lat/lon range stops = gtfs.get_table("stops") lats = stops['lat'].values lons = stops['lon'].values percentiles = [0, 10, 50, 90, 100] try: lat_percentiles = numpy.percentile(lats, percentiles) except IndexError: lat_percentiles = [None] * 5 lat_min, lat_10, lat_median, lat_90, lat_max = lat_percentiles stats["lat_min"] = lat_min stats["lat_10"] = lat_10 stats["lat_median"] = lat_median stats["lat_90"] = lat_90 stats["lat_max"] = lat_max try: lon_percentiles = numpy.percentile(lons, percentiles) except IndexError: lon_percentiles = [None] * 5 lon_min, lon_10, lon_median, lon_90, lon_max = lon_percentiles stats["lon_min"] = lon_min stats["lon_10"] = lon_10 stats["lon_median"] = lon_median stats["lon_90"] = lon_90 stats["lon_max"] = lon_max if len(lats) > 0: stats["height_km"] = wgs84_distance(lat_min, lon_median, lat_max, lon_median) / 1000. stats["width_km"] = wgs84_distance(lon_min, lat_median, lon_max, lat_median) / 1000. else: stats["height_km"] = None stats["width_km"] = None first_day_start_ut, last_day_start_ut = gtfs.get_day_start_ut_span() stats["start_time_ut"] = first_day_start_ut if last_day_start_ut is None: stats["end_time_ut"] = None else: # 28 (instead of 24) comes from the GTFS stANDard stats["end_time_ut"] = last_day_start_ut + 28 * 3600 stats["start_date"] = gtfs.get_min_date() stats["end_date"] = gtfs.get_max_date() # Maximum activity day max_activity_date = gtfs.execute_custom_query( 'SELECT count(*), date ' 'FROM days ' 'GROUP BY date ' 'ORDER BY count(*) DESC, date ' 'LIMIT 1;').fetchone() if max_activity_date: stats["max_activity_date"] = max_activity_date[1] max_activity_hour = gtfs.get_cursor().execute( 'SELECT count(*), arr_time_hour FROM day_stop_times ' 'WHERE date=? GROUP BY arr_time_hour ' 'ORDER BY count(*) DESC;', (stats["max_activity_date"],)).fetchone() if max_activity_hour: stats["max_activity_hour"] = max_activity_hour[1] else: stats["max_activity_hour"] = None # Fleet size estimate: considering each line separately if max_activity_date and max_activity_hour: fleet_size_estimates = _fleet_size_estimate(gtfs, stats['max_activity_hour'], stats['max_activity_date']) stats.update(fleet_size_estimates) # Compute simple distributions of various columns that have a finite range of values. # Commented lines refer to values that are not imported yet, ? stats['routes__type__dist'] = _distribution(gtfs, 'routes', 'type') # stats['stop_times__pickup_type__dist'] = _distribution(gtfs, 'stop_times', 'pickup_type') # stats['stop_times__drop_off_type__dist'] = _distribution(gtfs, 'stop_times', 'drop_off_type') # stats['stop_times__timepoint__dist'] = _distribution(gtfs, 'stop_times', 'timepoint') stats['calendar_dates__exception_type__dist'] = _distribution(gtfs, 'calendar_dates', 'exception_type') stats['frequencies__exact_times__dist'] = _distribution(gtfs, 'frequencies', 'exact_times') stats['transfers__transfer_type__dist'] = _distribution(gtfs, 'transfers', 'transfer_type') stats['agencies__lang__dist'] = _distribution(gtfs, 'agencies', 'lang') stats['stops__location_type__dist'] = _distribution(gtfs, 'stops', 'location_type') # stats['stops__wheelchair_boarding__dist'] = _distribution(gtfs, 'stops', 'wheelchair_boarding') # stats['trips__wheelchair_accessible__dist'] = _distribution(gtfs, 'trips', 'wheelchair_accessible') # stats['trips__bikes_allowed__dist'] = _distribution(gtfs, 'trips', 'bikes_allowed') # stats[''] = _distribution(gtfs, '', '') stats = _feed_calendar_span(gtfs, stats) return stats
def get_stats(gtfs): """ Get basic statistics of the GTFS data. Parameters ---------- gtfs: GTFS Returns ------- stats: dict A dictionary of various statistics. Keys should be strings, values should be inputtable to a database (int, date, str, ...) (but not a list) """ stats = {} # Basic table counts for table in ['agencies', 'routes', 'stops', 'stop_times', 'trips', 'calendar', 'shapes', 'calendar_dates', 'days', 'stop_distances', 'frequencies', 'feed_info', 'transfers']: stats["n_" + table] = gtfs.get_row_count(table) # Agency names agencies = gtfs.get_table("agencies") stats["agencies"] = "_".join(agencies['name'].values) # Stop lat/lon range stops = gtfs.get_table("stops") lats = stops['lat'].values lons = stops['lon'].values percentiles = [0, 10, 50, 90, 100] try: lat_percentiles = numpy.percentile(lats, percentiles) except IndexError: lat_percentiles = [None] * 5 lat_min, lat_10, lat_median, lat_90, lat_max = lat_percentiles stats["lat_min"] = lat_min stats["lat_10"] = lat_10 stats["lat_median"] = lat_median stats["lat_90"] = lat_90 stats["lat_max"] = lat_max try: lon_percentiles = numpy.percentile(lons, percentiles) except IndexError: lon_percentiles = [None] * 5 lon_min, lon_10, lon_median, lon_90, lon_max = lon_percentiles stats["lon_min"] = lon_min stats["lon_10"] = lon_10 stats["lon_median"] = lon_median stats["lon_90"] = lon_90 stats["lon_max"] = lon_max if len(lats) > 0: stats["height_km"] = wgs84_distance(lat_min, lon_median, lat_max, lon_median) / 1000. stats["width_km"] = wgs84_distance(lon_min, lat_median, lon_max, lat_median) / 1000. else: stats["height_km"] = None stats["width_km"] = None first_day_start_ut, last_day_start_ut = gtfs.get_day_start_ut_span() stats["start_time_ut"] = first_day_start_ut if last_day_start_ut is None: stats["end_time_ut"] = None else: # 28 (instead of 24) comes from the GTFS stANDard stats["end_time_ut"] = last_day_start_ut + 28 * 3600 stats["start_date"] = gtfs.get_min_date() stats["end_date"] = gtfs.get_max_date() # Maximum activity day max_activity_date = gtfs.execute_custom_query( 'SELECT count(*), date ' 'FROM days ' 'GROUP BY date ' 'ORDER BY count(*) DESC, date ' 'LIMIT 1;').fetchone() if max_activity_date: stats["max_activity_date"] = max_activity_date[1] max_activity_hour = gtfs.get_cursor().execute( 'SELECT count(*), arr_time_hour FROM day_stop_times ' 'WHERE date=? GROUP BY arr_time_hour ' 'ORDER BY count(*) DESC;', (stats["max_activity_date"],)).fetchone() if max_activity_hour: stats["max_activity_hour"] = max_activity_hour[1] else: stats["max_activity_hour"] = None # Fleet size estimate: considering each line separately if max_activity_date and max_activity_hour: fleet_size_estimates = _fleet_size_estimate(gtfs, stats['max_activity_hour'], stats['max_activity_date']) stats.update(fleet_size_estimates) # Compute simple distributions of various columns that have a finite range of values. # Commented lines refer to values that are not imported yet, ? stats['routes__type__dist'] = _distribution(gtfs, 'routes', 'type') # stats['stop_times__pickup_type__dist'] = _distribution(gtfs, 'stop_times', 'pickup_type') # stats['stop_times__drop_off_type__dist'] = _distribution(gtfs, 'stop_times', 'drop_off_type') # stats['stop_times__timepoint__dist'] = _distribution(gtfs, 'stop_times', 'timepoint') stats['calendar_dates__exception_type__dist'] = _distribution(gtfs, 'calendar_dates', 'exception_type') stats['frequencies__exact_times__dist'] = _distribution(gtfs, 'frequencies', 'exact_times') stats['transfers__transfer_type__dist'] = _distribution(gtfs, 'transfers', 'transfer_type') stats['agencies__lang__dist'] = _distribution(gtfs, 'agencies', 'lang') stats['stops__location_type__dist'] = _distribution(gtfs, 'stops', 'location_type') # stats['stops__wheelchair_boarding__dist'] = _distribution(gtfs, 'stops', 'wheelchair_boarding') # stats['trips__wheelchair_accessible__dist'] = _distribution(gtfs, 'trips', 'wheelchair_accessible') # stats['trips__bikes_allowed__dist'] = _distribution(gtfs, 'trips', 'bikes_allowed') # stats[''] = _distribution(gtfs, '', '') stats = _feed_calendar_span(gtfs, stats) return stats
[ "Get", "basic", "statistics", "of", "the", "GTFS", "data", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L133-L245
[ "def", "get_stats", "(", "gtfs", ")", ":", "stats", "=", "{", "}", "# Basic table counts", "for", "table", "in", "[", "'agencies'", ",", "'routes'", ",", "'stops'", ",", "'stop_times'", ",", "'trips'", ",", "'calendar'", ",", "'shapes'", ",", "'calendar_dates'", ",", "'days'", ",", "'stop_distances'", ",", "'frequencies'", ",", "'feed_info'", ",", "'transfers'", "]", ":", "stats", "[", "\"n_\"", "+", "table", "]", "=", "gtfs", ".", "get_row_count", "(", "table", ")", "# Agency names", "agencies", "=", "gtfs", ".", "get_table", "(", "\"agencies\"", ")", "stats", "[", "\"agencies\"", "]", "=", "\"_\"", ".", "join", "(", "agencies", "[", "'name'", "]", ".", "values", ")", "# Stop lat/lon range", "stops", "=", "gtfs", ".", "get_table", "(", "\"stops\"", ")", "lats", "=", "stops", "[", "'lat'", "]", ".", "values", "lons", "=", "stops", "[", "'lon'", "]", ".", "values", "percentiles", "=", "[", "0", ",", "10", ",", "50", ",", "90", ",", "100", "]", "try", ":", "lat_percentiles", "=", "numpy", ".", "percentile", "(", "lats", ",", "percentiles", ")", "except", "IndexError", ":", "lat_percentiles", "=", "[", "None", "]", "*", "5", "lat_min", ",", "lat_10", ",", "lat_median", ",", "lat_90", ",", "lat_max", "=", "lat_percentiles", "stats", "[", "\"lat_min\"", "]", "=", "lat_min", "stats", "[", "\"lat_10\"", "]", "=", "lat_10", "stats", "[", "\"lat_median\"", "]", "=", "lat_median", "stats", "[", "\"lat_90\"", "]", "=", "lat_90", "stats", "[", "\"lat_max\"", "]", "=", "lat_max", "try", ":", "lon_percentiles", "=", "numpy", ".", "percentile", "(", "lons", ",", "percentiles", ")", "except", "IndexError", ":", "lon_percentiles", "=", "[", "None", "]", "*", "5", "lon_min", ",", "lon_10", ",", "lon_median", ",", "lon_90", ",", "lon_max", "=", "lon_percentiles", "stats", "[", "\"lon_min\"", "]", "=", "lon_min", "stats", "[", "\"lon_10\"", "]", "=", "lon_10", "stats", "[", "\"lon_median\"", "]", "=", "lon_median", "stats", "[", "\"lon_90\"", "]", "=", "lon_90", "stats", "[", "\"lon_max\"", "]", "=", "lon_max", "if", "len", "(", "lats", ")", ">", "0", ":", "stats", "[", "\"height_km\"", "]", "=", "wgs84_distance", "(", "lat_min", ",", "lon_median", ",", "lat_max", ",", "lon_median", ")", "/", "1000.", "stats", "[", "\"width_km\"", "]", "=", "wgs84_distance", "(", "lon_min", ",", "lat_median", ",", "lon_max", ",", "lat_median", ")", "/", "1000.", "else", ":", "stats", "[", "\"height_km\"", "]", "=", "None", "stats", "[", "\"width_km\"", "]", "=", "None", "first_day_start_ut", ",", "last_day_start_ut", "=", "gtfs", ".", "get_day_start_ut_span", "(", ")", "stats", "[", "\"start_time_ut\"", "]", "=", "first_day_start_ut", "if", "last_day_start_ut", "is", "None", ":", "stats", "[", "\"end_time_ut\"", "]", "=", "None", "else", ":", "# 28 (instead of 24) comes from the GTFS stANDard", "stats", "[", "\"end_time_ut\"", "]", "=", "last_day_start_ut", "+", "28", "*", "3600", "stats", "[", "\"start_date\"", "]", "=", "gtfs", ".", "get_min_date", "(", ")", "stats", "[", "\"end_date\"", "]", "=", "gtfs", ".", "get_max_date", "(", ")", "# Maximum activity day", "max_activity_date", "=", "gtfs", ".", "execute_custom_query", "(", "'SELECT count(*), date '", "'FROM days '", "'GROUP BY date '", "'ORDER BY count(*) DESC, date '", "'LIMIT 1;'", ")", ".", "fetchone", "(", ")", "if", "max_activity_date", ":", "stats", "[", "\"max_activity_date\"", "]", "=", "max_activity_date", "[", "1", "]", "max_activity_hour", "=", "gtfs", ".", "get_cursor", "(", ")", ".", "execute", "(", "'SELECT count(*), arr_time_hour FROM day_stop_times '", "'WHERE date=? GROUP BY arr_time_hour '", "'ORDER BY count(*) DESC;'", ",", "(", "stats", "[", "\"max_activity_date\"", "]", ",", ")", ")", ".", "fetchone", "(", ")", "if", "max_activity_hour", ":", "stats", "[", "\"max_activity_hour\"", "]", "=", "max_activity_hour", "[", "1", "]", "else", ":", "stats", "[", "\"max_activity_hour\"", "]", "=", "None", "# Fleet size estimate: considering each line separately", "if", "max_activity_date", "and", "max_activity_hour", ":", "fleet_size_estimates", "=", "_fleet_size_estimate", "(", "gtfs", ",", "stats", "[", "'max_activity_hour'", "]", ",", "stats", "[", "'max_activity_date'", "]", ")", "stats", ".", "update", "(", "fleet_size_estimates", ")", "# Compute simple distributions of various columns that have a finite range of values.", "# Commented lines refer to values that are not imported yet, ?", "stats", "[", "'routes__type__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'routes'", ",", "'type'", ")", "# stats['stop_times__pickup_type__dist'] = _distribution(gtfs, 'stop_times', 'pickup_type')", "# stats['stop_times__drop_off_type__dist'] = _distribution(gtfs, 'stop_times', 'drop_off_type')", "# stats['stop_times__timepoint__dist'] = _distribution(gtfs, 'stop_times', 'timepoint')", "stats", "[", "'calendar_dates__exception_type__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'calendar_dates'", ",", "'exception_type'", ")", "stats", "[", "'frequencies__exact_times__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'frequencies'", ",", "'exact_times'", ")", "stats", "[", "'transfers__transfer_type__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'transfers'", ",", "'transfer_type'", ")", "stats", "[", "'agencies__lang__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'agencies'", ",", "'lang'", ")", "stats", "[", "'stops__location_type__dist'", "]", "=", "_distribution", "(", "gtfs", ",", "'stops'", ",", "'location_type'", ")", "# stats['stops__wheelchair_boarding__dist'] = _distribution(gtfs, 'stops', 'wheelchair_boarding')", "# stats['trips__wheelchair_accessible__dist'] = _distribution(gtfs, 'trips', 'wheelchair_accessible')", "# stats['trips__bikes_allowed__dist'] = _distribution(gtfs, 'trips', 'bikes_allowed')", "# stats[''] = _distribution(gtfs, '', '')", "stats", "=", "_feed_calendar_span", "(", "gtfs", ",", "stats", ")", "return", "stats" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
_distribution
Count occurrences of values AND return it as a string. Example return value: '1:5 2:15
gtfspy/stats.py
def _distribution(gtfs, table, column): """Count occurrences of values AND return it as a string. Example return value: '1:5 2:15'""" cur = gtfs.conn.cursor() cur.execute('SELECT {column}, count(*) ' 'FROM {table} GROUP BY {column} ' 'ORDER BY {column}'.format(column=column, table=table)) return ' '.join('%s:%s' % (t, c) for t, c in cur)
def _distribution(gtfs, table, column): """Count occurrences of values AND return it as a string. Example return value: '1:5 2:15'""" cur = gtfs.conn.cursor() cur.execute('SELECT {column}, count(*) ' 'FROM {table} GROUP BY {column} ' 'ORDER BY {column}'.format(column=column, table=table)) return ' '.join('%s:%s' % (t, c) for t, c in cur)
[ "Count", "occurrences", "of", "values", "AND", "return", "it", "as", "a", "string", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L248-L256
[ "def", "_distribution", "(", "gtfs", ",", "table", ",", "column", ")", ":", "cur", "=", "gtfs", ".", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'SELECT {column}, count(*) '", "'FROM {table} GROUP BY {column} '", "'ORDER BY {column}'", ".", "format", "(", "column", "=", "column", ",", "table", "=", "table", ")", ")", "return", "' '", ".", "join", "(", "'%s:%s'", "%", "(", "t", ",", "c", ")", "for", "t", ",", "c", "in", "cur", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
_fleet_size_estimate
Calculates fleet size estimates by two separate formula: 1. Considering all routes separately with no interlining and doing a deficit calculation at every terminal 2. By looking at the maximum number of vehicles in simultaneous movement Parameters ---------- gtfs: GTFS hour: int date: ? Returns ------- results: dict a dict with keys: fleet_size_route_based fleet_size_max_movement
gtfspy/stats.py
def _fleet_size_estimate(gtfs, hour, date): """ Calculates fleet size estimates by two separate formula: 1. Considering all routes separately with no interlining and doing a deficit calculation at every terminal 2. By looking at the maximum number of vehicles in simultaneous movement Parameters ---------- gtfs: GTFS hour: int date: ? Returns ------- results: dict a dict with keys: fleet_size_route_based fleet_size_max_movement """ results = {} fleet_size_list = [] cur = gtfs.conn.cursor() rows = cur.execute( 'SELECT type, max(vehicles) ' 'FROM (' 'SELECT type, direction_id, sum(vehicles) as vehicles ' 'FROM ' '(' 'SELECT trips.route_I, trips.direction_id, routes.route_id, name, type, count(*) as vehicles, cycle_time_min ' 'FROM trips, routes, days, ' '(' 'SELECT first_trip.route_I, first_trip.direction_id, first_trip_start_time, first_trip_end_time, ' 'MIN(start_time_ds) as return_trip_start_time, end_time_ds as return_trip_end_time, ' '(end_time_ds - first_trip_start_time)/60 as cycle_time_min ' 'FROM ' 'trips, ' '(SELECT route_I, direction_id, MIN(start_time_ds) as first_trip_start_time, ' 'end_time_ds as first_trip_end_time ' 'FROM trips, days ' 'WHERE trips.trip_I=days.trip_I AND start_time_ds >= ? * 3600 ' 'AND start_time_ds <= (? + 1) * 3600 AND date = ? ' 'GROUP BY route_I, direction_id) first_trip ' 'WHERE first_trip.route_I = trips.route_I ' 'AND first_trip.direction_id != trips.direction_id ' 'AND start_time_ds >= first_trip_end_time ' 'GROUP BY trips.route_I, trips.direction_id' ') return_trip ' 'WHERE trips.trip_I=days.trip_I AND trips.route_I= routes.route_I ' 'AND date = ? AND trips.route_I = return_trip.route_I ' 'AND trips.direction_id = return_trip.direction_id ' 'AND start_time_ds >= first_trip_start_time ' 'AND start_time_ds < return_trip_end_time ' 'GROUP BY trips.route_I, trips.direction_id ' 'ORDER BY type, name, vehicles desc' ') cycle_times ' 'GROUP BY direction_id, type' ') vehicles_type ' 'GROUP BY type;', (hour, hour, date, date)) for row in rows: fleet_size_list.append(str(row[0]) + ':' + str(row[1])) results['fleet_size_route_based'] = " ".join(fleet_size_list) # Fleet size estimate: maximum number of vehicles in movement fleet_size_list = [] fleet_size_dict = {} if hour: for minute in range(hour * 3600, (hour + 1) * 3600, 60): rows = gtfs.conn.cursor().execute( 'SELECT type, count(*) ' 'FROM trips, routes, days ' 'WHERE trips.route_I = routes.route_I ' 'AND trips.trip_I=days.trip_I ' 'AND start_time_ds <= ? ' 'AND end_time_ds > ? + 60 ' 'AND date = ? ' 'GROUP BY type;', (minute, minute, date)) for row in rows: if fleet_size_dict.get(row[0], 0) < row[1]: fleet_size_dict[row[0]] = row[1] for key in fleet_size_dict.keys(): fleet_size_list.append(str(key) + ':' + str(fleet_size_dict[key])) results["fleet_size_max_movement"] = ' '.join(fleet_size_list) return results
def _fleet_size_estimate(gtfs, hour, date): """ Calculates fleet size estimates by two separate formula: 1. Considering all routes separately with no interlining and doing a deficit calculation at every terminal 2. By looking at the maximum number of vehicles in simultaneous movement Parameters ---------- gtfs: GTFS hour: int date: ? Returns ------- results: dict a dict with keys: fleet_size_route_based fleet_size_max_movement """ results = {} fleet_size_list = [] cur = gtfs.conn.cursor() rows = cur.execute( 'SELECT type, max(vehicles) ' 'FROM (' 'SELECT type, direction_id, sum(vehicles) as vehicles ' 'FROM ' '(' 'SELECT trips.route_I, trips.direction_id, routes.route_id, name, type, count(*) as vehicles, cycle_time_min ' 'FROM trips, routes, days, ' '(' 'SELECT first_trip.route_I, first_trip.direction_id, first_trip_start_time, first_trip_end_time, ' 'MIN(start_time_ds) as return_trip_start_time, end_time_ds as return_trip_end_time, ' '(end_time_ds - first_trip_start_time)/60 as cycle_time_min ' 'FROM ' 'trips, ' '(SELECT route_I, direction_id, MIN(start_time_ds) as first_trip_start_time, ' 'end_time_ds as first_trip_end_time ' 'FROM trips, days ' 'WHERE trips.trip_I=days.trip_I AND start_time_ds >= ? * 3600 ' 'AND start_time_ds <= (? + 1) * 3600 AND date = ? ' 'GROUP BY route_I, direction_id) first_trip ' 'WHERE first_trip.route_I = trips.route_I ' 'AND first_trip.direction_id != trips.direction_id ' 'AND start_time_ds >= first_trip_end_time ' 'GROUP BY trips.route_I, trips.direction_id' ') return_trip ' 'WHERE trips.trip_I=days.trip_I AND trips.route_I= routes.route_I ' 'AND date = ? AND trips.route_I = return_trip.route_I ' 'AND trips.direction_id = return_trip.direction_id ' 'AND start_time_ds >= first_trip_start_time ' 'AND start_time_ds < return_trip_end_time ' 'GROUP BY trips.route_I, trips.direction_id ' 'ORDER BY type, name, vehicles desc' ') cycle_times ' 'GROUP BY direction_id, type' ') vehicles_type ' 'GROUP BY type;', (hour, hour, date, date)) for row in rows: fleet_size_list.append(str(row[0]) + ':' + str(row[1])) results['fleet_size_route_based'] = " ".join(fleet_size_list) # Fleet size estimate: maximum number of vehicles in movement fleet_size_list = [] fleet_size_dict = {} if hour: for minute in range(hour * 3600, (hour + 1) * 3600, 60): rows = gtfs.conn.cursor().execute( 'SELECT type, count(*) ' 'FROM trips, routes, days ' 'WHERE trips.route_I = routes.route_I ' 'AND trips.trip_I=days.trip_I ' 'AND start_time_ds <= ? ' 'AND end_time_ds > ? + 60 ' 'AND date = ? ' 'GROUP BY type;', (minute, minute, date)) for row in rows: if fleet_size_dict.get(row[0], 0) < row[1]: fleet_size_dict[row[0]] = row[1] for key in fleet_size_dict.keys(): fleet_size_list.append(str(key) + ':' + str(fleet_size_dict[key])) results["fleet_size_max_movement"] = ' '.join(fleet_size_list) return results
[ "Calculates", "fleet", "size", "estimates", "by", "two", "separate", "formula", ":", "1", ".", "Considering", "all", "routes", "separately", "with", "no", "interlining", "and", "doing", "a", "deficit", "calculation", "at", "every", "terminal", "2", ".", "By", "looking", "at", "the", "maximum", "number", "of", "vehicles", "in", "simultaneous", "movement" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L259-L346
[ "def", "_fleet_size_estimate", "(", "gtfs", ",", "hour", ",", "date", ")", ":", "results", "=", "{", "}", "fleet_size_list", "=", "[", "]", "cur", "=", "gtfs", ".", "conn", ".", "cursor", "(", ")", "rows", "=", "cur", ".", "execute", "(", "'SELECT type, max(vehicles) '", "'FROM ('", "'SELECT type, direction_id, sum(vehicles) as vehicles '", "'FROM '", "'('", "'SELECT trips.route_I, trips.direction_id, routes.route_id, name, type, count(*) as vehicles, cycle_time_min '", "'FROM trips, routes, days, '", "'('", "'SELECT first_trip.route_I, first_trip.direction_id, first_trip_start_time, first_trip_end_time, '", "'MIN(start_time_ds) as return_trip_start_time, end_time_ds as return_trip_end_time, '", "'(end_time_ds - first_trip_start_time)/60 as cycle_time_min '", "'FROM '", "'trips, '", "'(SELECT route_I, direction_id, MIN(start_time_ds) as first_trip_start_time, '", "'end_time_ds as first_trip_end_time '", "'FROM trips, days '", "'WHERE trips.trip_I=days.trip_I AND start_time_ds >= ? * 3600 '", "'AND start_time_ds <= (? + 1) * 3600 AND date = ? '", "'GROUP BY route_I, direction_id) first_trip '", "'WHERE first_trip.route_I = trips.route_I '", "'AND first_trip.direction_id != trips.direction_id '", "'AND start_time_ds >= first_trip_end_time '", "'GROUP BY trips.route_I, trips.direction_id'", "') return_trip '", "'WHERE trips.trip_I=days.trip_I AND trips.route_I= routes.route_I '", "'AND date = ? AND trips.route_I = return_trip.route_I '", "'AND trips.direction_id = return_trip.direction_id '", "'AND start_time_ds >= first_trip_start_time '", "'AND start_time_ds < return_trip_end_time '", "'GROUP BY trips.route_I, trips.direction_id '", "'ORDER BY type, name, vehicles desc'", "') cycle_times '", "'GROUP BY direction_id, type'", "') vehicles_type '", "'GROUP BY type;'", ",", "(", "hour", ",", "hour", ",", "date", ",", "date", ")", ")", "for", "row", "in", "rows", ":", "fleet_size_list", ".", "append", "(", "str", "(", "row", "[", "0", "]", ")", "+", "':'", "+", "str", "(", "row", "[", "1", "]", ")", ")", "results", "[", "'fleet_size_route_based'", "]", "=", "\" \"", ".", "join", "(", "fleet_size_list", ")", "# Fleet size estimate: maximum number of vehicles in movement", "fleet_size_list", "=", "[", "]", "fleet_size_dict", "=", "{", "}", "if", "hour", ":", "for", "minute", "in", "range", "(", "hour", "*", "3600", ",", "(", "hour", "+", "1", ")", "*", "3600", ",", "60", ")", ":", "rows", "=", "gtfs", ".", "conn", ".", "cursor", "(", ")", ".", "execute", "(", "'SELECT type, count(*) '", "'FROM trips, routes, days '", "'WHERE trips.route_I = routes.route_I '", "'AND trips.trip_I=days.trip_I '", "'AND start_time_ds <= ? '", "'AND end_time_ds > ? + 60 '", "'AND date = ? '", "'GROUP BY type;'", ",", "(", "minute", ",", "minute", ",", "date", ")", ")", "for", "row", "in", "rows", ":", "if", "fleet_size_dict", ".", "get", "(", "row", "[", "0", "]", ",", "0", ")", "<", "row", "[", "1", "]", ":", "fleet_size_dict", "[", "row", "[", "0", "]", "]", "=", "row", "[", "1", "]", "for", "key", "in", "fleet_size_dict", ".", "keys", "(", ")", ":", "fleet_size_list", ".", "append", "(", "str", "(", "key", ")", "+", "':'", "+", "str", "(", "fleet_size_dict", "[", "key", "]", ")", ")", "results", "[", "\"fleet_size_max_movement\"", "]", "=", "' '", ".", "join", "(", "fleet_size_list", ")", "return", "results" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
_feed_calendar_span
Computes the temporal coverage of each source feed Parameters ---------- gtfs: gtfspy.GTFS object stats: dict where to append the stats Returns ------- stats: dict
gtfspy/stats.py
def _feed_calendar_span(gtfs, stats): """ Computes the temporal coverage of each source feed Parameters ---------- gtfs: gtfspy.GTFS object stats: dict where to append the stats Returns ------- stats: dict """ n_feeds = _n_gtfs_sources(gtfs)[0] max_start = None min_end = None if n_feeds > 1: for i in range(n_feeds): feed_key = "feed_" + str(i) + "_" start_key = feed_key + "calendar_start" end_key = feed_key + "calendar_end" calendar_span = gtfs.conn.cursor().execute( 'SELECT min(date), max(date) FROM trips, days ' 'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone() stats[start_key] = calendar_span[0] stats[end_key] = calendar_span[1] if calendar_span[0] is not None and calendar_span[1] is not None: if not max_start and not min_end: max_start = calendar_span[0] min_end = calendar_span[1] else: if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start): max_start = calendar_span[0] if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end): min_end = calendar_span[1] stats["latest_feed_start_date"] = max_start stats["earliest_feed_end_date"] = min_end else: stats["latest_feed_start_date"] = stats["start_date"] stats["earliest_feed_end_date"] = stats["end_date"] return stats
def _feed_calendar_span(gtfs, stats): """ Computes the temporal coverage of each source feed Parameters ---------- gtfs: gtfspy.GTFS object stats: dict where to append the stats Returns ------- stats: dict """ n_feeds = _n_gtfs_sources(gtfs)[0] max_start = None min_end = None if n_feeds > 1: for i in range(n_feeds): feed_key = "feed_" + str(i) + "_" start_key = feed_key + "calendar_start" end_key = feed_key + "calendar_end" calendar_span = gtfs.conn.cursor().execute( 'SELECT min(date), max(date) FROM trips, days ' 'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone() stats[start_key] = calendar_span[0] stats[end_key] = calendar_span[1] if calendar_span[0] is not None and calendar_span[1] is not None: if not max_start and not min_end: max_start = calendar_span[0] min_end = calendar_span[1] else: if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start): max_start = calendar_span[0] if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end): min_end = calendar_span[1] stats["latest_feed_start_date"] = max_start stats["earliest_feed_end_date"] = min_end else: stats["latest_feed_start_date"] = stats["start_date"] stats["earliest_feed_end_date"] = stats["end_date"] return stats
[ "Computes", "the", "temporal", "coverage", "of", "each", "source", "feed" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L357-L399
[ "def", "_feed_calendar_span", "(", "gtfs", ",", "stats", ")", ":", "n_feeds", "=", "_n_gtfs_sources", "(", "gtfs", ")", "[", "0", "]", "max_start", "=", "None", "min_end", "=", "None", "if", "n_feeds", ">", "1", ":", "for", "i", "in", "range", "(", "n_feeds", ")", ":", "feed_key", "=", "\"feed_\"", "+", "str", "(", "i", ")", "+", "\"_\"", "start_key", "=", "feed_key", "+", "\"calendar_start\"", "end_key", "=", "feed_key", "+", "\"calendar_end\"", "calendar_span", "=", "gtfs", ".", "conn", ".", "cursor", "(", ")", ".", "execute", "(", "'SELECT min(date), max(date) FROM trips, days '", "'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;'", ",", "(", "feed_key", "+", "'%'", ",", ")", ")", ".", "fetchone", "(", ")", "stats", "[", "start_key", "]", "=", "calendar_span", "[", "0", "]", "stats", "[", "end_key", "]", "=", "calendar_span", "[", "1", "]", "if", "calendar_span", "[", "0", "]", "is", "not", "None", "and", "calendar_span", "[", "1", "]", "is", "not", "None", ":", "if", "not", "max_start", "and", "not", "min_end", ":", "max_start", "=", "calendar_span", "[", "0", "]", "min_end", "=", "calendar_span", "[", "1", "]", "else", ":", "if", "gtfs", ".", "get_day_start_ut", "(", "calendar_span", "[", "0", "]", ")", ">", "gtfs", ".", "get_day_start_ut", "(", "max_start", ")", ":", "max_start", "=", "calendar_span", "[", "0", "]", "if", "gtfs", ".", "get_day_start_ut", "(", "calendar_span", "[", "1", "]", ")", "<", "gtfs", ".", "get_day_start_ut", "(", "min_end", ")", ":", "min_end", "=", "calendar_span", "[", "1", "]", "stats", "[", "\"latest_feed_start_date\"", "]", "=", "max_start", "stats", "[", "\"earliest_feed_end_date\"", "]", "=", "min_end", "else", ":", "stats", "[", "\"latest_feed_start_date\"", "]", "=", "stats", "[", "\"start_date\"", "]", "stats", "[", "\"earliest_feed_end_date\"", "]", "=", "stats", "[", "\"end_date\"", "]", "return", "stats" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
route_frequencies
Return the frequency of all types of routes per day. Parameters ----------- gtfs: GTFS Returns ------- pandas.DataFrame with columns route_I, type, frequency
gtfspy/stats.py
def route_frequencies(gtfs, results_by_mode=False): """ Return the frequency of all types of routes per day. Parameters ----------- gtfs: GTFS Returns ------- pandas.DataFrame with columns route_I, type, frequency """ day = gtfs.get_suitable_date_for_daily_extract() query = ( " SELECT f.route_I, type, frequency FROM routes as r" " JOIN" " (SELECT route_I, COUNT(route_I) as frequency" " FROM" " (SELECT date, route_I, trip_I" " FROM day_stop_times" " WHERE date = '{day}'" " GROUP by route_I, trip_I)" " GROUP BY route_I) as f" " ON f.route_I = r.route_I" " ORDER BY frequency DESC".format(day=day)) return pd.DataFrame(gtfs.execute_custom_query_pandas(query))
def route_frequencies(gtfs, results_by_mode=False): """ Return the frequency of all types of routes per day. Parameters ----------- gtfs: GTFS Returns ------- pandas.DataFrame with columns route_I, type, frequency """ day = gtfs.get_suitable_date_for_daily_extract() query = ( " SELECT f.route_I, type, frequency FROM routes as r" " JOIN" " (SELECT route_I, COUNT(route_I) as frequency" " FROM" " (SELECT date, route_I, trip_I" " FROM day_stop_times" " WHERE date = '{day}'" " GROUP by route_I, trip_I)" " GROUP BY route_I) as f" " ON f.route_I = r.route_I" " ORDER BY frequency DESC".format(day=day)) return pd.DataFrame(gtfs.execute_custom_query_pandas(query))
[ "Return", "the", "frequency", "of", "all", "types", "of", "routes", "per", "day", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L506-L533
[ "def", "route_frequencies", "(", "gtfs", ",", "results_by_mode", "=", "False", ")", ":", "day", "=", "gtfs", ".", "get_suitable_date_for_daily_extract", "(", ")", "query", "=", "(", "\" SELECT f.route_I, type, frequency FROM routes as r\"", "\" JOIN\"", "\" (SELECT route_I, COUNT(route_I) as frequency\"", "\" FROM\"", "\" (SELECT date, route_I, trip_I\"", "\" FROM day_stop_times\"", "\" WHERE date = '{day}'\"", "\" GROUP by route_I, trip_I)\"", "\" GROUP BY route_I) as f\"", "\" ON f.route_I = r.route_I\"", "\" ORDER BY frequency DESC\"", ".", "format", "(", "day", "=", "day", ")", ")", "return", "pd", ".", "DataFrame", "(", "gtfs", ".", "execute_custom_query_pandas", "(", "query", ")", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
hourly_frequencies
Return all the number of vehicles (i.e. busses,trams,etc) that pass hourly through a stop in a time frame. Parameters ---------- gtfs: GTFS st : int start time of the time framein unix time et : int end time of the time frame in unix time route_type: int Returns ------- numeric pandas.DataFrame with columns stop_I, lat, lon, frequency
gtfspy/stats.py
def hourly_frequencies(gtfs, st, et, route_type): """ Return all the number of vehicles (i.e. busses,trams,etc) that pass hourly through a stop in a time frame. Parameters ---------- gtfs: GTFS st : int start time of the time framein unix time et : int end time of the time frame in unix time route_type: int Returns ------- numeric pandas.DataFrame with columns stop_I, lat, lon, frequency """ timeframe = et-st hours = timeframe/ 3600 day = gtfs.get_suitable_date_for_daily_extract() stops = gtfs.get_stops_for_route_type(route_type).T.drop_duplicates().T query = ("SELECT * FROM stops as x" " JOIN" " (SELECT * , COUNT(*)/{h} as frequency" " FROM stop_times, days" " WHERE stop_times.trip_I = days.trip_I" " AND dep_time_ds > {st}" " AND dep_time_ds < {et}" " AND date = '{day}'" " GROUP BY stop_I) as y" " ON y.stop_I = x.stop_I".format(h=hours, st=st, et=et, day=day)) try: trips_frequency = gtfs.execute_custom_query_pandas(query).T.drop_duplicates().T df = pd.merge(stops[['stop_I', 'lat', 'lon']], trips_frequency[['stop_I', 'frequency']], on='stop_I', how='inner') return df.apply(pd.to_numeric) except: raise ValueError("Maybe too short time frame!")
def hourly_frequencies(gtfs, st, et, route_type): """ Return all the number of vehicles (i.e. busses,trams,etc) that pass hourly through a stop in a time frame. Parameters ---------- gtfs: GTFS st : int start time of the time framein unix time et : int end time of the time frame in unix time route_type: int Returns ------- numeric pandas.DataFrame with columns stop_I, lat, lon, frequency """ timeframe = et-st hours = timeframe/ 3600 day = gtfs.get_suitable_date_for_daily_extract() stops = gtfs.get_stops_for_route_type(route_type).T.drop_duplicates().T query = ("SELECT * FROM stops as x" " JOIN" " (SELECT * , COUNT(*)/{h} as frequency" " FROM stop_times, days" " WHERE stop_times.trip_I = days.trip_I" " AND dep_time_ds > {st}" " AND dep_time_ds < {et}" " AND date = '{day}'" " GROUP BY stop_I) as y" " ON y.stop_I = x.stop_I".format(h=hours, st=st, et=et, day=day)) try: trips_frequency = gtfs.execute_custom_query_pandas(query).T.drop_duplicates().T df = pd.merge(stops[['stop_I', 'lat', 'lon']], trips_frequency[['stop_I', 'frequency']], on='stop_I', how='inner') return df.apply(pd.to_numeric) except: raise ValueError("Maybe too short time frame!")
[ "Return", "all", "the", "number", "of", "vehicles", "(", "i", ".", "e", ".", "busses", "trams", "etc", ")", "that", "pass", "hourly", "through", "a", "stop", "in", "a", "time", "frame", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L536-L574
[ "def", "hourly_frequencies", "(", "gtfs", ",", "st", ",", "et", ",", "route_type", ")", ":", "timeframe", "=", "et", "-", "st", "hours", "=", "timeframe", "/", "3600", "day", "=", "gtfs", ".", "get_suitable_date_for_daily_extract", "(", ")", "stops", "=", "gtfs", ".", "get_stops_for_route_type", "(", "route_type", ")", ".", "T", ".", "drop_duplicates", "(", ")", ".", "T", "query", "=", "(", "\"SELECT * FROM stops as x\"", "\" JOIN\"", "\" (SELECT * , COUNT(*)/{h} as frequency\"", "\" FROM stop_times, days\"", "\" WHERE stop_times.trip_I = days.trip_I\"", "\" AND dep_time_ds > {st}\"", "\" AND dep_time_ds < {et}\"", "\" AND date = '{day}'\"", "\" GROUP BY stop_I) as y\"", "\" ON y.stop_I = x.stop_I\"", ".", "format", "(", "h", "=", "hours", ",", "st", "=", "st", ",", "et", "=", "et", ",", "day", "=", "day", ")", ")", "try", ":", "trips_frequency", "=", "gtfs", ".", "execute_custom_query_pandas", "(", "query", ")", ".", "T", ".", "drop_duplicates", "(", ")", ".", "T", "df", "=", "pd", ".", "merge", "(", "stops", "[", "[", "'stop_I'", ",", "'lat'", ",", "'lon'", "]", "]", ",", "trips_frequency", "[", "[", "'stop_I'", ",", "'frequency'", "]", "]", ",", "on", "=", "'stop_I'", ",", "how", "=", "'inner'", ")", "return", "df", ".", "apply", "(", "pd", ".", "to_numeric", ")", "except", ":", "raise", "ValueError", "(", "\"Maybe too short time frame!\"", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
get_vehicle_hours_by_type
Return the sum of vehicle hours in a particular day by route type.
gtfspy/stats.py
def get_vehicle_hours_by_type(gtfs, route_type): """ Return the sum of vehicle hours in a particular day by route type. """ day = gtfs.get_suitable_date_for_daily_extract() query = (" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type" " FROM" " (SELECT * FROM day_trips as q1" " INNER JOIN" " (SELECT route_I, type FROM routes) as q2" " ON q1.route_I = q2.route_I" " WHERE type = {route_type}" " AND date = '{day}')".format(day=day, route_type=route_type)) df = gtfs.execute_custom_query_pandas(query) return df['vehicle_hours_type'].item()
def get_vehicle_hours_by_type(gtfs, route_type): """ Return the sum of vehicle hours in a particular day by route type. """ day = gtfs.get_suitable_date_for_daily_extract() query = (" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type" " FROM" " (SELECT * FROM day_trips as q1" " INNER JOIN" " (SELECT route_I, type FROM routes) as q2" " ON q1.route_I = q2.route_I" " WHERE type = {route_type}" " AND date = '{day}')".format(day=day, route_type=route_type)) df = gtfs.execute_custom_query_pandas(query) return df['vehicle_hours_type'].item()
[ "Return", "the", "sum", "of", "vehicle", "hours", "in", "a", "particular", "day", "by", "route", "type", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/stats.py#L607-L622
[ "def", "get_vehicle_hours_by_type", "(", "gtfs", ",", "route_type", ")", ":", "day", "=", "gtfs", ".", "get_suitable_date_for_daily_extract", "(", ")", "query", "=", "(", "\" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type\"", "\" FROM\"", "\" (SELECT * FROM day_trips as q1\"", "\" INNER JOIN\"", "\" (SELECT route_I, type FROM routes) as q2\"", "\" ON q1.route_I = q2.route_I\"", "\" WHERE type = {route_type}\"", "\" AND date = '{day}')\"", ".", "format", "(", "day", "=", "day", ",", "route_type", "=", "route_type", ")", ")", "df", "=", "gtfs", ".", "execute_custom_query_pandas", "(", "query", ")", "return", "df", "[", "'vehicle_hours_type'", "]", ".", "item", "(", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
ConnectionScan._scan_footpaths
Scan the footpaths originating from stop_id Parameters ---------- stop_id: int
gtfspy/routing/connection_scan.py
def _scan_footpaths(self, stop_id, walk_departure_time): """ Scan the footpaths originating from stop_id Parameters ---------- stop_id: int """ for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True): d_walk = data["d_walk"] arrival_time = walk_departure_time + d_walk / self._walk_speed self._update_stop_label(neighbor, arrival_time)
def _scan_footpaths(self, stop_id, walk_departure_time): """ Scan the footpaths originating from stop_id Parameters ---------- stop_id: int """ for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True): d_walk = data["d_walk"] arrival_time = walk_departure_time + d_walk / self._walk_speed self._update_stop_label(neighbor, arrival_time)
[ "Scan", "the", "footpaths", "originating", "from", "stop_id" ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/connection_scan.py#L92-L103
[ "def", "_scan_footpaths", "(", "self", ",", "stop_id", ",", "walk_departure_time", ")", ":", "for", "_", ",", "neighbor", ",", "data", "in", "self", ".", "_walk_network", ".", "edges_iter", "(", "nbunch", "=", "[", "stop_id", "]", ",", "data", "=", "True", ")", ":", "d_walk", "=", "data", "[", "\"d_walk\"", "]", "arrival_time", "=", "walk_departure_time", "+", "d_walk", "/", "self", ".", "_walk_speed", "self", ".", "_update_stop_label", "(", "neighbor", ",", "arrival_time", ")" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
timeit
A Python decorator for printing out the execution time for a function. Adapted from: www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
gtfspy/routing/util.py
def timeit(method): """ A Python decorator for printing out the execution time for a function. Adapted from: www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods """ def timed(*args, **kw): time_start = time.time() result = method(*args, **kw) time_end = time.time() print('timeit: %r %2.2f sec (%r, %r) ' % (method.__name__, time_end-time_start, str(args)[:20], kw)) return result return timed
def timeit(method): """ A Python decorator for printing out the execution time for a function. Adapted from: www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods """ def timed(*args, **kw): time_start = time.time() result = method(*args, **kw) time_end = time.time() print('timeit: %r %2.2f sec (%r, %r) ' % (method.__name__, time_end-time_start, str(args)[:20], kw)) return result return timed
[ "A", "Python", "decorator", "for", "printing", "out", "the", "execution", "time", "for", "a", "function", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/util.py#L3-L17
[ "def", "timeit", "(", "method", ")", ":", "def", "timed", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "time_start", "=", "time", ".", "time", "(", ")", "result", "=", "method", "(", "*", "args", ",", "*", "*", "kw", ")", "time_end", "=", "time", ".", "time", "(", ")", "print", "(", "'timeit: %r %2.2f sec (%r, %r) '", "%", "(", "method", ".", "__name__", ",", "time_end", "-", "time_start", ",", "str", "(", "args", ")", "[", ":", "20", "]", ",", "kw", ")", ")", "return", "result", "return", "timed" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
TimetableValidator.validate_and_get_warnings
Validates/checks a given GTFS feed with respect to a number of different issues. The set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS Returns ------- warnings: WarningsContainer
gtfspy/timetable_validator.py
def validate_and_get_warnings(self): """ Validates/checks a given GTFS feed with respect to a number of different issues. The set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS Returns ------- warnings: WarningsContainer """ self.warnings_container.clear() self._validate_stops_with_same_stop_time() self._validate_speeds_and_trip_times() self._validate_stop_spacings() self._validate_stop_sequence() self._validate_misplaced_stops() return self.warnings_container
def validate_and_get_warnings(self): """ Validates/checks a given GTFS feed with respect to a number of different issues. The set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS Returns ------- warnings: WarningsContainer """ self.warnings_container.clear() self._validate_stops_with_same_stop_time() self._validate_speeds_and_trip_times() self._validate_stop_spacings() self._validate_stop_sequence() self._validate_misplaced_stops() return self.warnings_container
[ "Validates", "/", "checks", "a", "given", "GTFS", "feed", "with", "respect", "to", "a", "number", "of", "different", "issues", "." ]
CxAalto/gtfspy
python
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/timetable_validator.py#L70-L86
[ "def", "validate_and_get_warnings", "(", "self", ")", ":", "self", ".", "warnings_container", ".", "clear", "(", ")", "self", ".", "_validate_stops_with_same_stop_time", "(", ")", "self", ".", "_validate_speeds_and_trip_times", "(", ")", "self", ".", "_validate_stop_spacings", "(", ")", "self", ".", "_validate_stop_sequence", "(", ")", "self", ".", "_validate_misplaced_stops", "(", ")", "return", "self", ".", "warnings_container" ]
bddba4b74faae6c1b91202f19184811e326547e5
valid
LockdownForm.clean_password
Check that the password is valid.
lockdown/forms.py
def clean_password(self): """Check that the password is valid.""" value = self.cleaned_data.get('password') if value not in self.valid_passwords: raise forms.ValidationError('Incorrect password.') return value
def clean_password(self): """Check that the password is valid.""" value = self.cleaned_data.get('password') if value not in self.valid_passwords: raise forms.ValidationError('Incorrect password.') return value
[ "Check", "that", "the", "password", "is", "valid", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/forms.py#L22-L27
[ "def", "clean_password", "(", "self", ")", ":", "value", "=", "self", ".", "cleaned_data", ".", "get", "(", "'password'", ")", "if", "value", "not", "in", "self", ".", "valid_passwords", ":", "raise", "forms", ".", "ValidationError", "(", "'Incorrect password.'", ")", "return", "value" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
AuthForm.clean
When receiving the filled out form, check for valid access.
lockdown/forms.py
def clean(self): """When receiving the filled out form, check for valid access.""" cleaned_data = super(AuthForm, self).clean() user = self.get_user() if self.staff_only and (not user or not user.is_staff): raise forms.ValidationError('Sorry, only staff are allowed.') if self.superusers_only and (not user or not user.is_superuser): raise forms.ValidationError('Sorry, only superusers are allowed.') return cleaned_data
def clean(self): """When receiving the filled out form, check for valid access.""" cleaned_data = super(AuthForm, self).clean() user = self.get_user() if self.staff_only and (not user or not user.is_staff): raise forms.ValidationError('Sorry, only staff are allowed.') if self.superusers_only and (not user or not user.is_superuser): raise forms.ValidationError('Sorry, only superusers are allowed.') return cleaned_data
[ "When", "receiving", "the", "filled", "out", "form", "check", "for", "valid", "access", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/forms.py#L72-L80
[ "def", "clean", "(", "self", ")", ":", "cleaned_data", "=", "super", "(", "AuthForm", ",", "self", ")", ".", "clean", "(", ")", "user", "=", "self", ".", "get_user", "(", ")", "if", "self", ".", "staff_only", "and", "(", "not", "user", "or", "not", "user", ".", "is_staff", ")", ":", "raise", "forms", ".", "ValidationError", "(", "'Sorry, only staff are allowed.'", ")", "if", "self", ".", "superusers_only", "and", "(", "not", "user", "or", "not", "user", ".", "is_superuser", ")", ":", "raise", "forms", ".", "ValidationError", "(", "'Sorry, only superusers are allowed.'", ")", "return", "cleaned_data" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
AuthForm.authenticate
Check that the password is valid. This allows for revoking of a user's preview rights by changing the valid passwords.
lockdown/forms.py
def authenticate(self, token_value): """Check that the password is valid. This allows for revoking of a user's preview rights by changing the valid passwords. """ try: backend_path, user_id = token_value.split(':', 1) except (ValueError, AttributeError): return False backend = auth.load_backend(backend_path) return bool(backend.get_user(user_id))
def authenticate(self, token_value): """Check that the password is valid. This allows for revoking of a user's preview rights by changing the valid passwords. """ try: backend_path, user_id = token_value.split(':', 1) except (ValueError, AttributeError): return False backend = auth.load_backend(backend_path) return bool(backend.get_user(user_id))
[ "Check", "that", "the", "password", "is", "valid", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/forms.py#L91-L102
[ "def", "authenticate", "(", "self", ",", "token_value", ")", ":", "try", ":", "backend_path", ",", "user_id", "=", "token_value", ".", "split", "(", "':'", ",", "1", ")", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "return", "False", "backend", "=", "auth", ".", "load_backend", "(", "backend_path", ")", "return", "bool", "(", "backend", ".", "get_user", "(", "user_id", ")", ")" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
get_lockdown_form
Return a form class for a given string pointing to a lockdown form.
lockdown/middleware.py
def get_lockdown_form(form_path): """Return a form class for a given string pointing to a lockdown form.""" if not form_path: raise ImproperlyConfigured('No LOCKDOWN_FORM specified.') form_path_list = form_path.split(".") new_module = ".".join(form_path_list[:-1]) attr = form_path_list[-1] try: mod = import_module(new_module) except (ImportError, ValueError): raise ImproperlyConfigured('Module configured in LOCKDOWN_FORM (%s) to' ' contain the form class couldn\'t be ' 'found.' % new_module) try: form = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('The module configured in LOCKDOWN_FORM ' ' (%s) doesn\'t define a "%s" form.' % (new_module, attr)) return form
def get_lockdown_form(form_path): """Return a form class for a given string pointing to a lockdown form.""" if not form_path: raise ImproperlyConfigured('No LOCKDOWN_FORM specified.') form_path_list = form_path.split(".") new_module = ".".join(form_path_list[:-1]) attr = form_path_list[-1] try: mod = import_module(new_module) except (ImportError, ValueError): raise ImproperlyConfigured('Module configured in LOCKDOWN_FORM (%s) to' ' contain the form class couldn\'t be ' 'found.' % new_module) try: form = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('The module configured in LOCKDOWN_FORM ' ' (%s) doesn\'t define a "%s" form.' % (new_module, attr)) return form
[ "Return", "a", "form", "class", "for", "a", "given", "string", "pointing", "to", "a", "lockdown", "form", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/middleware.py#L21-L40
[ "def", "get_lockdown_form", "(", "form_path", ")", ":", "if", "not", "form_path", ":", "raise", "ImproperlyConfigured", "(", "'No LOCKDOWN_FORM specified.'", ")", "form_path_list", "=", "form_path", ".", "split", "(", "\".\"", ")", "new_module", "=", "\".\"", ".", "join", "(", "form_path_list", "[", ":", "-", "1", "]", ")", "attr", "=", "form_path_list", "[", "-", "1", "]", "try", ":", "mod", "=", "import_module", "(", "new_module", ")", "except", "(", "ImportError", ",", "ValueError", ")", ":", "raise", "ImproperlyConfigured", "(", "'Module configured in LOCKDOWN_FORM (%s) to'", "' contain the form class couldn\\'t be '", "'found.'", "%", "new_module", ")", "try", ":", "form", "=", "getattr", "(", "mod", ",", "attr", ")", "except", "AttributeError", ":", "raise", "ImproperlyConfigured", "(", "'The module configured in LOCKDOWN_FORM '", "' (%s) doesn\\'t define a \"%s\" form.'", "%", "(", "new_module", ",", "attr", ")", ")", "return", "form" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
LockdownMiddleware.process_request
Check if each request is allowed to access the current resource.
lockdown/middleware.py
def process_request(self, request): """Check if each request is allowed to access the current resource.""" try: session = request.session except AttributeError: raise ImproperlyConfigured('django-lockdown requires the Django ' 'sessions framework') # Don't lock down if django-lockdown is disabled altogether. if settings.ENABLED is False: return None # Don't lock down if the client REMOTE_ADDR matched and is part of the # exception list. if self.remote_addr_exceptions: remote_addr_exceptions = self.remote_addr_exceptions else: remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONS if remote_addr_exceptions: # If forwarding proxies are used they must be listed as trusted trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIES remote_addr = request.META.get('REMOTE_ADDR') if remote_addr in remote_addr_exceptions: return None if remote_addr in trusted_proxies: # If REMOTE_ADDR is a trusted proxy check x-forwarded-for x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: remote_addr = x_forwarded_for.split(',')[-1].strip() if remote_addr in remote_addr_exceptions: return None # Don't lock down if the URL matches an exception pattern. if self.url_exceptions: url_exceptions = compile_url_exceptions(self.url_exceptions) else: url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS) for pattern in url_exceptions: if pattern.search(request.path): return None # Don't lock down if the URL resolves to a whitelisted view. try: resolved_path = resolve(request.path) except Resolver404: pass else: if resolved_path.func in settings.VIEW_EXCEPTIONS: return None # Don't lock down if outside of the lockdown dates. if self.until_date: until_date = self.until_date else: until_date = settings.UNTIL_DATE if self.after_date: after_date = self.after_date else: after_date = settings.AFTER_DATE if until_date or after_date: locked_date = False if until_date and datetime.datetime.now() < until_date: locked_date = True if after_date and datetime.datetime.now() > after_date: locked_date = True if not locked_date: return None form_data = request.POST if request.method == 'POST' else None if self.form: form_class = self.form else: form_class = get_lockdown_form(settings.FORM) form = form_class(data=form_data, **self.form_kwargs) authorized = False token = session.get(self.session_key) if hasattr(form, 'authenticate'): if form.authenticate(token): authorized = True elif token is True: authorized = True if authorized and self.logout_key and self.logout_key in request.GET: if self.session_key in session: del session[self.session_key] querystring = request.GET.copy() del querystring[self.logout_key] return self.redirect(request) # Don't lock down if the user is already authorized for previewing. if authorized: return None if form.is_valid(): if hasattr(form, 'generate_token'): token = form.generate_token() else: token = True session[self.session_key] = token return self.redirect(request) page_data = {'until_date': until_date, 'after_date': after_date} if not hasattr(form, 'show_form') or form.show_form(): page_data['form'] = form if self.extra_context: page_data.update(self.extra_context) return render(request, 'lockdown/form.html', page_data)
def process_request(self, request): """Check if each request is allowed to access the current resource.""" try: session = request.session except AttributeError: raise ImproperlyConfigured('django-lockdown requires the Django ' 'sessions framework') # Don't lock down if django-lockdown is disabled altogether. if settings.ENABLED is False: return None # Don't lock down if the client REMOTE_ADDR matched and is part of the # exception list. if self.remote_addr_exceptions: remote_addr_exceptions = self.remote_addr_exceptions else: remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONS if remote_addr_exceptions: # If forwarding proxies are used they must be listed as trusted trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIES remote_addr = request.META.get('REMOTE_ADDR') if remote_addr in remote_addr_exceptions: return None if remote_addr in trusted_proxies: # If REMOTE_ADDR is a trusted proxy check x-forwarded-for x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: remote_addr = x_forwarded_for.split(',')[-1].strip() if remote_addr in remote_addr_exceptions: return None # Don't lock down if the URL matches an exception pattern. if self.url_exceptions: url_exceptions = compile_url_exceptions(self.url_exceptions) else: url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS) for pattern in url_exceptions: if pattern.search(request.path): return None # Don't lock down if the URL resolves to a whitelisted view. try: resolved_path = resolve(request.path) except Resolver404: pass else: if resolved_path.func in settings.VIEW_EXCEPTIONS: return None # Don't lock down if outside of the lockdown dates. if self.until_date: until_date = self.until_date else: until_date = settings.UNTIL_DATE if self.after_date: after_date = self.after_date else: after_date = settings.AFTER_DATE if until_date or after_date: locked_date = False if until_date and datetime.datetime.now() < until_date: locked_date = True if after_date and datetime.datetime.now() > after_date: locked_date = True if not locked_date: return None form_data = request.POST if request.method == 'POST' else None if self.form: form_class = self.form else: form_class = get_lockdown_form(settings.FORM) form = form_class(data=form_data, **self.form_kwargs) authorized = False token = session.get(self.session_key) if hasattr(form, 'authenticate'): if form.authenticate(token): authorized = True elif token is True: authorized = True if authorized and self.logout_key and self.logout_key in request.GET: if self.session_key in session: del session[self.session_key] querystring = request.GET.copy() del querystring[self.logout_key] return self.redirect(request) # Don't lock down if the user is already authorized for previewing. if authorized: return None if form.is_valid(): if hasattr(form, 'generate_token'): token = form.generate_token() else: token = True session[self.session_key] = token return self.redirect(request) page_data = {'until_date': until_date, 'after_date': after_date} if not hasattr(form, 'show_form') or form.show_form(): page_data['form'] = form if self.extra_context: page_data.update(self.extra_context) return render(request, 'lockdown/form.html', page_data)
[ "Check", "if", "each", "request", "is", "allowed", "to", "access", "the", "current", "resource", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/middleware.py#L81-L194
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "try", ":", "session", "=", "request", ".", "session", "except", "AttributeError", ":", "raise", "ImproperlyConfigured", "(", "'django-lockdown requires the Django '", "'sessions framework'", ")", "# Don't lock down if django-lockdown is disabled altogether.", "if", "settings", ".", "ENABLED", "is", "False", ":", "return", "None", "# Don't lock down if the client REMOTE_ADDR matched and is part of the", "# exception list.", "if", "self", ".", "remote_addr_exceptions", ":", "remote_addr_exceptions", "=", "self", ".", "remote_addr_exceptions", "else", ":", "remote_addr_exceptions", "=", "settings", ".", "REMOTE_ADDR_EXCEPTIONS", "if", "remote_addr_exceptions", ":", "# If forwarding proxies are used they must be listed as trusted", "trusted_proxies", "=", "self", ".", "trusted_proxies", "or", "settings", ".", "TRUSTED_PROXIES", "remote_addr", "=", "request", ".", "META", ".", "get", "(", "'REMOTE_ADDR'", ")", "if", "remote_addr", "in", "remote_addr_exceptions", ":", "return", "None", "if", "remote_addr", "in", "trusted_proxies", ":", "# If REMOTE_ADDR is a trusted proxy check x-forwarded-for", "x_forwarded_for", "=", "request", ".", "META", ".", "get", "(", "'HTTP_X_FORWARDED_FOR'", ")", "if", "x_forwarded_for", ":", "remote_addr", "=", "x_forwarded_for", ".", "split", "(", "','", ")", "[", "-", "1", "]", ".", "strip", "(", ")", "if", "remote_addr", "in", "remote_addr_exceptions", ":", "return", "None", "# Don't lock down if the URL matches an exception pattern.", "if", "self", ".", "url_exceptions", ":", "url_exceptions", "=", "compile_url_exceptions", "(", "self", ".", "url_exceptions", ")", "else", ":", "url_exceptions", "=", "compile_url_exceptions", "(", "settings", ".", "URL_EXCEPTIONS", ")", "for", "pattern", "in", "url_exceptions", ":", "if", "pattern", ".", "search", "(", "request", ".", "path", ")", ":", "return", "None", "# Don't lock down if the URL resolves to a whitelisted view.", "try", ":", "resolved_path", "=", "resolve", "(", "request", ".", "path", ")", "except", "Resolver404", ":", "pass", "else", ":", "if", "resolved_path", ".", "func", "in", "settings", ".", "VIEW_EXCEPTIONS", ":", "return", "None", "# Don't lock down if outside of the lockdown dates.", "if", "self", ".", "until_date", ":", "until_date", "=", "self", ".", "until_date", "else", ":", "until_date", "=", "settings", ".", "UNTIL_DATE", "if", "self", ".", "after_date", ":", "after_date", "=", "self", ".", "after_date", "else", ":", "after_date", "=", "settings", ".", "AFTER_DATE", "if", "until_date", "or", "after_date", ":", "locked_date", "=", "False", "if", "until_date", "and", "datetime", ".", "datetime", ".", "now", "(", ")", "<", "until_date", ":", "locked_date", "=", "True", "if", "after_date", "and", "datetime", ".", "datetime", ".", "now", "(", ")", ">", "after_date", ":", "locked_date", "=", "True", "if", "not", "locked_date", ":", "return", "None", "form_data", "=", "request", ".", "POST", "if", "request", ".", "method", "==", "'POST'", "else", "None", "if", "self", ".", "form", ":", "form_class", "=", "self", ".", "form", "else", ":", "form_class", "=", "get_lockdown_form", "(", "settings", ".", "FORM", ")", "form", "=", "form_class", "(", "data", "=", "form_data", ",", "*", "*", "self", ".", "form_kwargs", ")", "authorized", "=", "False", "token", "=", "session", ".", "get", "(", "self", ".", "session_key", ")", "if", "hasattr", "(", "form", ",", "'authenticate'", ")", ":", "if", "form", ".", "authenticate", "(", "token", ")", ":", "authorized", "=", "True", "elif", "token", "is", "True", ":", "authorized", "=", "True", "if", "authorized", "and", "self", ".", "logout_key", "and", "self", ".", "logout_key", "in", "request", ".", "GET", ":", "if", "self", ".", "session_key", "in", "session", ":", "del", "session", "[", "self", ".", "session_key", "]", "querystring", "=", "request", ".", "GET", ".", "copy", "(", ")", "del", "querystring", "[", "self", ".", "logout_key", "]", "return", "self", ".", "redirect", "(", "request", ")", "# Don't lock down if the user is already authorized for previewing.", "if", "authorized", ":", "return", "None", "if", "form", ".", "is_valid", "(", ")", ":", "if", "hasattr", "(", "form", ",", "'generate_token'", ")", ":", "token", "=", "form", ".", "generate_token", "(", ")", "else", ":", "token", "=", "True", "session", "[", "self", ".", "session_key", "]", "=", "token", "return", "self", ".", "redirect", "(", "request", ")", "page_data", "=", "{", "'until_date'", ":", "until_date", ",", "'after_date'", ":", "after_date", "}", "if", "not", "hasattr", "(", "form", ",", "'show_form'", ")", "or", "form", ".", "show_form", "(", ")", ":", "page_data", "[", "'form'", "]", "=", "form", "if", "self", ".", "extra_context", ":", "page_data", ".", "update", "(", "self", ".", "extra_context", ")", "return", "render", "(", "request", ",", "'lockdown/form.html'", ",", "page_data", ")" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
LockdownMiddleware.redirect
Handle redirects properly.
lockdown/middleware.py
def redirect(self, request): """Handle redirects properly.""" url = request.path querystring = request.GET.copy() if self.logout_key and self.logout_key in request.GET: del querystring[self.logout_key] if querystring: url = '%s?%s' % (url, querystring.urlencode()) return HttpResponseRedirect(url)
def redirect(self, request): """Handle redirects properly.""" url = request.path querystring = request.GET.copy() if self.logout_key and self.logout_key in request.GET: del querystring[self.logout_key] if querystring: url = '%s?%s' % (url, querystring.urlencode()) return HttpResponseRedirect(url)
[ "Handle", "redirects", "properly", "." ]
Dunedan/django-lockdown
python
https://github.com/Dunedan/django-lockdown/blob/f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec/lockdown/middleware.py#L196-L204
[ "def", "redirect", "(", "self", ",", "request", ")", ":", "url", "=", "request", ".", "path", "querystring", "=", "request", ".", "GET", ".", "copy", "(", ")", "if", "self", ".", "logout_key", "and", "self", ".", "logout_key", "in", "request", ".", "GET", ":", "del", "querystring", "[", "self", ".", "logout_key", "]", "if", "querystring", ":", "url", "=", "'%s?%s'", "%", "(", "url", ",", "querystring", ".", "urlencode", "(", ")", ")", "return", "HttpResponseRedirect", "(", "url", ")" ]
f2f3fee174e14e2da32c6d4f8528ba6b8c2106ec
valid
infer
https://github.com/frictionlessdata/datapackage-py#infer
datapackage/infer.py
def infer(pattern, base_path=None): """https://github.com/frictionlessdata/datapackage-py#infer """ package = Package({}, base_path=base_path) descriptor = package.infer(pattern) return descriptor
def infer(pattern, base_path=None): """https://github.com/frictionlessdata/datapackage-py#infer """ package = Package({}, base_path=base_path) descriptor = package.infer(pattern) return descriptor
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#infer" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/infer.py#L13-L18
[ "def", "infer", "(", "pattern", ",", "base_path", "=", "None", ")", ":", "package", "=", "Package", "(", "{", "}", ",", "base_path", "=", "base_path", ")", "descriptor", "=", "package", ".", "infer", "(", "pattern", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Registry.get
Returns the profile with the received ID as a dict If a local copy of the profile exists, it'll be returned. If not, it'll be downloaded from the web. The results are cached, so any subsequent calls won't hit the filesystem or the web. Args: profile_id (str): The ID of the profile you want. Raises: RegistryError: If there was some problem opening the profile file or its format was incorrect.
datapackage/registry.py
def get(self, profile_id): '''Returns the profile with the received ID as a dict If a local copy of the profile exists, it'll be returned. If not, it'll be downloaded from the web. The results are cached, so any subsequent calls won't hit the filesystem or the web. Args: profile_id (str): The ID of the profile you want. Raises: RegistryError: If there was some problem opening the profile file or its format was incorrect. ''' if profile_id not in self._profiles: try: self._profiles[profile_id] = self._get_profile(profile_id) except (ValueError, IOError) as e: six.raise_from(RegistryError(e), e) return self._profiles[profile_id]
def get(self, profile_id): '''Returns the profile with the received ID as a dict If a local copy of the profile exists, it'll be returned. If not, it'll be downloaded from the web. The results are cached, so any subsequent calls won't hit the filesystem or the web. Args: profile_id (str): The ID of the profile you want. Raises: RegistryError: If there was some problem opening the profile file or its format was incorrect. ''' if profile_id not in self._profiles: try: self._profiles[profile_id] = self._get_profile(profile_id) except (ValueError, IOError) as e: six.raise_from(RegistryError(e), e) return self._profiles[profile_id]
[ "Returns", "the", "profile", "with", "the", "received", "ID", "as", "a", "dict" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L61-L81
[ "def", "get", "(", "self", ",", "profile_id", ")", ":", "if", "profile_id", "not", "in", "self", ".", "_profiles", ":", "try", ":", "self", ".", "_profiles", "[", "profile_id", "]", "=", "self", ".", "_get_profile", "(", "profile_id", ")", "except", "(", "ValueError", ",", "IOError", ")", "as", "e", ":", "six", ".", "raise_from", "(", "RegistryError", "(", "e", ")", ",", "e", ")", "return", "self", ".", "_profiles", "[", "profile_id", "]" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Registry._get_profile
dict: Return the profile with the received ID as a dict (None if it doesn't exist).
datapackage/registry.py
def _get_profile(self, profile_id): '''dict: Return the profile with the received ID as a dict (None if it doesn't exist).''' profile_metadata = self._registry.get(profile_id) if not profile_metadata: return path = self._get_absolute_path(profile_metadata.get('schema_path')) url = profile_metadata.get('schema') if path: try: return self._load_json_file(path) except IOError as local_exc: if not url: raise local_exc try: return self._load_json_url(url) except IOError: msg = ( 'Error loading profile locally at "{path}" ' 'and remotely at "{url}".' ).format(path=path, url=url) six.raise_from(IOError(msg), local_exc) elif url: return self._load_json_url(url)
def _get_profile(self, profile_id): '''dict: Return the profile with the received ID as a dict (None if it doesn't exist).''' profile_metadata = self._registry.get(profile_id) if not profile_metadata: return path = self._get_absolute_path(profile_metadata.get('schema_path')) url = profile_metadata.get('schema') if path: try: return self._load_json_file(path) except IOError as local_exc: if not url: raise local_exc try: return self._load_json_url(url) except IOError: msg = ( 'Error loading profile locally at "{path}" ' 'and remotely at "{url}".' ).format(path=path, url=url) six.raise_from(IOError(msg), local_exc) elif url: return self._load_json_url(url)
[ "dict", ":", "Return", "the", "profile", "with", "the", "received", "ID", "as", "a", "dict", "(", "None", "if", "it", "doesn", "t", "exist", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L85-L110
[ "def", "_get_profile", "(", "self", ",", "profile_id", ")", ":", "profile_metadata", "=", "self", ".", "_registry", ".", "get", "(", "profile_id", ")", "if", "not", "profile_metadata", ":", "return", "path", "=", "self", ".", "_get_absolute_path", "(", "profile_metadata", ".", "get", "(", "'schema_path'", ")", ")", "url", "=", "profile_metadata", ".", "get", "(", "'schema'", ")", "if", "path", ":", "try", ":", "return", "self", ".", "_load_json_file", "(", "path", ")", "except", "IOError", "as", "local_exc", ":", "if", "not", "url", ":", "raise", "local_exc", "try", ":", "return", "self", ".", "_load_json_url", "(", "url", ")", "except", "IOError", ":", "msg", "=", "(", "'Error loading profile locally at \"{path}\" '", "'and remotely at \"{url}\".'", ")", ".", "format", "(", "path", "=", "path", ",", "url", "=", "url", ")", "six", ".", "raise_from", "(", "IOError", "(", "msg", ")", ",", "local_exc", ")", "elif", "url", ":", "return", "self", ".", "_load_json_url", "(", "url", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Registry._get_registry
dict: Return the registry as dict with profiles keyed by id.
datapackage/registry.py
def _get_registry(self, registry_path_or_url): '''dict: Return the registry as dict with profiles keyed by id.''' if registry_path_or_url.startswith('http'): profiles = self._load_json_url(registry_path_or_url) else: profiles = self._load_json_file(registry_path_or_url) try: registry = {} for profile in profiles: registry[profile['id']] = profile return registry except KeyError as e: msg = ( 'Registry at "{path}" has no "id" column.' ).format(path=registry_path_or_url) six.raise_from(ValueError(msg), e)
def _get_registry(self, registry_path_or_url): '''dict: Return the registry as dict with profiles keyed by id.''' if registry_path_or_url.startswith('http'): profiles = self._load_json_url(registry_path_or_url) else: profiles = self._load_json_file(registry_path_or_url) try: registry = {} for profile in profiles: registry[profile['id']] = profile return registry except KeyError as e: msg = ( 'Registry at "{path}" has no "id" column.' ).format(path=registry_path_or_url) six.raise_from(ValueError(msg), e)
[ "dict", ":", "Return", "the", "registry", "as", "dict", "with", "profiles", "keyed", "by", "id", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L112-L127
[ "def", "_get_registry", "(", "self", ",", "registry_path_or_url", ")", ":", "if", "registry_path_or_url", ".", "startswith", "(", "'http'", ")", ":", "profiles", "=", "self", ".", "_load_json_url", "(", "registry_path_or_url", ")", "else", ":", "profiles", "=", "self", ".", "_load_json_file", "(", "registry_path_or_url", ")", "try", ":", "registry", "=", "{", "}", "for", "profile", "in", "profiles", ":", "registry", "[", "profile", "[", "'id'", "]", "]", "=", "profile", "return", "registry", "except", "KeyError", "as", "e", ":", "msg", "=", "(", "'Registry at \"{path}\" has no \"id\" column.'", ")", ".", "format", "(", "path", "=", "registry_path_or_url", ")", "six", ".", "raise_from", "(", "ValueError", "(", "msg", ")", ",", "e", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Registry._get_absolute_path
str: Return the received relative_path joined with the base path (None if there were some error).
datapackage/registry.py
def _get_absolute_path(self, relative_path): '''str: Return the received relative_path joined with the base path (None if there were some error).''' try: return os.path.join(self.base_path, relative_path) except (AttributeError, TypeError): pass
def _get_absolute_path(self, relative_path): '''str: Return the received relative_path joined with the base path (None if there were some error).''' try: return os.path.join(self.base_path, relative_path) except (AttributeError, TypeError): pass
[ "str", ":", "Return", "the", "received", "relative_path", "joined", "with", "the", "base", "path", "(", "None", "if", "there", "were", "some", "error", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L129-L135
[ "def", "_get_absolute_path", "(", "self", ",", "relative_path", ")", ":", "try", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "base_path", ",", "relative_path", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "pass" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Registry._load_json_url
dict: Return the JSON at the local path or URL as a dict.
datapackage/registry.py
def _load_json_url(self, url): '''dict: Return the JSON at the local path or URL as a dict.''' res = requests.get(url) res.raise_for_status() return res.json()
def _load_json_url(self, url): '''dict: Return the JSON at the local path or URL as a dict.''' res = requests.get(url) res.raise_for_status() return res.json()
[ "dict", ":", "Return", "the", "JSON", "at", "the", "local", "path", "or", "URL", "as", "a", "dict", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/registry.py#L141-L146
[ "def", "_load_json_url", "(", "self", ",", "url", ")", ":", "res", "=", "requests", ".", "get", "(", "url", ")", "res", ".", "raise_for_status", "(", ")", "return", "res", ".", "json", "(", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
get_descriptor_base_path
Get descriptor base path if string or return None.
datapackage/helpers.py
def get_descriptor_base_path(descriptor): """Get descriptor base path if string or return None. """ # Infer from path/url if isinstance(descriptor, six.string_types): if os.path.exists(descriptor): base_path = os.path.dirname(os.path.abspath(descriptor)) else: # suppose descriptor is a URL base_path = os.path.dirname(descriptor) # Current dir by default else: base_path = '.' return base_path
def get_descriptor_base_path(descriptor): """Get descriptor base path if string or return None. """ # Infer from path/url if isinstance(descriptor, six.string_types): if os.path.exists(descriptor): base_path = os.path.dirname(os.path.abspath(descriptor)) else: # suppose descriptor is a URL base_path = os.path.dirname(descriptor) # Current dir by default else: base_path = '.' return base_path
[ "Get", "descriptor", "base", "path", "if", "string", "or", "return", "None", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L20-L36
[ "def", "get_descriptor_base_path", "(", "descriptor", ")", ":", "# Infer from path/url", "if", "isinstance", "(", "descriptor", ",", "six", ".", "string_types", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "descriptor", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "descriptor", ")", ")", "else", ":", "# suppose descriptor is a URL", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "descriptor", ")", "# Current dir by default", "else", ":", "base_path", "=", "'.'", "return", "base_path" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
retrieve_descriptor
Retrieve descriptor.
datapackage/helpers.py
def retrieve_descriptor(descriptor): """Retrieve descriptor. """ the_descriptor = descriptor if the_descriptor is None: the_descriptor = {} if isinstance(the_descriptor, six.string_types): try: if os.path.isfile(the_descriptor): with open(the_descriptor, 'r') as f: the_descriptor = json.load(f) else: req = requests.get(the_descriptor) req.raise_for_status() # Force UTF8 encoding for 'text/plain' sources req.encoding = 'utf8' the_descriptor = req.json() except (IOError, requests.exceptions.RequestException) as error: message = 'Unable to load JSON at "%s"' % descriptor six.raise_from(exceptions.DataPackageException(message), error) except ValueError as error: # Python2 doesn't have json.JSONDecodeError (use ValueErorr) message = 'Unable to parse JSON at "%s". %s' % (descriptor, error) six.raise_from(exceptions.DataPackageException(message), error) if hasattr(the_descriptor, 'read'): try: the_descriptor = json.load(the_descriptor) except ValueError as e: six.raise_from(exceptions.DataPackageException(str(e)), e) if not isinstance(the_descriptor, dict): msg = 'Data must be a \'dict\', but was a \'{0}\'' raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__)) return the_descriptor
def retrieve_descriptor(descriptor): """Retrieve descriptor. """ the_descriptor = descriptor if the_descriptor is None: the_descriptor = {} if isinstance(the_descriptor, six.string_types): try: if os.path.isfile(the_descriptor): with open(the_descriptor, 'r') as f: the_descriptor = json.load(f) else: req = requests.get(the_descriptor) req.raise_for_status() # Force UTF8 encoding for 'text/plain' sources req.encoding = 'utf8' the_descriptor = req.json() except (IOError, requests.exceptions.RequestException) as error: message = 'Unable to load JSON at "%s"' % descriptor six.raise_from(exceptions.DataPackageException(message), error) except ValueError as error: # Python2 doesn't have json.JSONDecodeError (use ValueErorr) message = 'Unable to parse JSON at "%s". %s' % (descriptor, error) six.raise_from(exceptions.DataPackageException(message), error) if hasattr(the_descriptor, 'read'): try: the_descriptor = json.load(the_descriptor) except ValueError as e: six.raise_from(exceptions.DataPackageException(str(e)), e) if not isinstance(the_descriptor, dict): msg = 'Data must be a \'dict\', but was a \'{0}\'' raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__)) return the_descriptor
[ "Retrieve", "descriptor", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L41-L78
[ "def", "retrieve_descriptor", "(", "descriptor", ")", ":", "the_descriptor", "=", "descriptor", "if", "the_descriptor", "is", "None", ":", "the_descriptor", "=", "{", "}", "if", "isinstance", "(", "the_descriptor", ",", "six", ".", "string_types", ")", ":", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "the_descriptor", ")", ":", "with", "open", "(", "the_descriptor", ",", "'r'", ")", "as", "f", ":", "the_descriptor", "=", "json", ".", "load", "(", "f", ")", "else", ":", "req", "=", "requests", ".", "get", "(", "the_descriptor", ")", "req", ".", "raise_for_status", "(", ")", "# Force UTF8 encoding for 'text/plain' sources", "req", ".", "encoding", "=", "'utf8'", "the_descriptor", "=", "req", ".", "json", "(", ")", "except", "(", "IOError", ",", "requests", ".", "exceptions", ".", "RequestException", ")", "as", "error", ":", "message", "=", "'Unable to load JSON at \"%s\"'", "%", "descriptor", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "message", ")", ",", "error", ")", "except", "ValueError", "as", "error", ":", "# Python2 doesn't have json.JSONDecodeError (use ValueErorr)", "message", "=", "'Unable to parse JSON at \"%s\". %s'", "%", "(", "descriptor", ",", "error", ")", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "message", ")", ",", "error", ")", "if", "hasattr", "(", "the_descriptor", ",", "'read'", ")", ":", "try", ":", "the_descriptor", "=", "json", ".", "load", "(", "the_descriptor", ")", "except", "ValueError", "as", "e", ":", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "str", "(", "e", ")", ")", ",", "e", ")", "if", "not", "isinstance", "(", "the_descriptor", ",", "dict", ")", ":", "msg", "=", "'Data must be a \\'dict\\', but was a \\'{0}\\''", "raise", "exceptions", ".", "DataPackageException", "(", "msg", ".", "format", "(", "type", "(", "the_descriptor", ")", ".", "__name__", ")", ")", "return", "the_descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
dereference_package_descriptor
Dereference data package descriptor (IN-PLACE FOR NOW).
datapackage/helpers.py
def dereference_package_descriptor(descriptor, base_path): """Dereference data package descriptor (IN-PLACE FOR NOW). """ for resource in descriptor.get('resources', []): dereference_resource_descriptor(resource, base_path, descriptor) return descriptor
def dereference_package_descriptor(descriptor, base_path): """Dereference data package descriptor (IN-PLACE FOR NOW). """ for resource in descriptor.get('resources', []): dereference_resource_descriptor(resource, base_path, descriptor) return descriptor
[ "Dereference", "data", "package", "descriptor", "(", "IN", "-", "PLACE", "FOR", "NOW", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L83-L88
[ "def", "dereference_package_descriptor", "(", "descriptor", ",", "base_path", ")", ":", "for", "resource", "in", "descriptor", ".", "get", "(", "'resources'", ",", "[", "]", ")", ":", "dereference_resource_descriptor", "(", "resource", ",", "base_path", ",", "descriptor", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
dereference_resource_descriptor
Dereference resource descriptor (IN-PLACE FOR NOW).
datapackage/helpers.py
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None): """Dereference resource descriptor (IN-PLACE FOR NOW). """ PROPERTIES = ['schema', 'dialect'] if base_descriptor is None: base_descriptor = descriptor for property in PROPERTIES: value = descriptor.get(property) # URI -> No if not isinstance(value, six.string_types): continue # URI -> Pointer if value.startswith('#'): try: pointer = jsonpointer.JsonPointer(value[1:]) descriptor[property] = pointer.resolve(base_descriptor) except Exception as error: message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) # URI -> Remote elif value.startswith('http'): try: response = requests.get(value) response.raise_for_status() descriptor[property] = response.json() except Exception as error: message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) # URI -> Local else: if not is_safe_path(value): raise exceptions.DataPackageException( 'Not safe path in Local URI "%s" ' 'for resource.%s' % (value, property)) if not base_path: raise exceptions.DataPackageException( 'Local URI "%s" requires base path ' 'for resource.%s' % (value, property)) fullpath = os.path.join(base_path, value) try: with io.open(fullpath, encoding='utf-8') as file: descriptor[property] = json.load(file) except Exception as error: message = 'Not resolved Local URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) return descriptor
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None): """Dereference resource descriptor (IN-PLACE FOR NOW). """ PROPERTIES = ['schema', 'dialect'] if base_descriptor is None: base_descriptor = descriptor for property in PROPERTIES: value = descriptor.get(property) # URI -> No if not isinstance(value, six.string_types): continue # URI -> Pointer if value.startswith('#'): try: pointer = jsonpointer.JsonPointer(value[1:]) descriptor[property] = pointer.resolve(base_descriptor) except Exception as error: message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) # URI -> Remote elif value.startswith('http'): try: response = requests.get(value) response.raise_for_status() descriptor[property] = response.json() except Exception as error: message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) # URI -> Local else: if not is_safe_path(value): raise exceptions.DataPackageException( 'Not safe path in Local URI "%s" ' 'for resource.%s' % (value, property)) if not base_path: raise exceptions.DataPackageException( 'Local URI "%s" requires base path ' 'for resource.%s' % (value, property)) fullpath = os.path.join(base_path, value) try: with io.open(fullpath, encoding='utf-8') as file: descriptor[property] = json.load(file) except Exception as error: message = 'Not resolved Local URI "%s" for resource.%s' % (value, property) six.raise_from( exceptions.DataPackageException(message), error ) return descriptor
[ "Dereference", "resource", "descriptor", "(", "IN", "-", "PLACE", "FOR", "NOW", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L91-L150
[ "def", "dereference_resource_descriptor", "(", "descriptor", ",", "base_path", ",", "base_descriptor", "=", "None", ")", ":", "PROPERTIES", "=", "[", "'schema'", ",", "'dialect'", "]", "if", "base_descriptor", "is", "None", ":", "base_descriptor", "=", "descriptor", "for", "property", "in", "PROPERTIES", ":", "value", "=", "descriptor", ".", "get", "(", "property", ")", "# URI -> No", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "continue", "# URI -> Pointer", "if", "value", ".", "startswith", "(", "'#'", ")", ":", "try", ":", "pointer", "=", "jsonpointer", ".", "JsonPointer", "(", "value", "[", "1", ":", "]", ")", "descriptor", "[", "property", "]", "=", "pointer", ".", "resolve", "(", "base_descriptor", ")", "except", "Exception", "as", "error", ":", "message", "=", "'Not resolved Pointer URI \"%s\" for resource.%s'", "%", "(", "value", ",", "property", ")", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "message", ")", ",", "error", ")", "# URI -> Remote", "elif", "value", ".", "startswith", "(", "'http'", ")", ":", "try", ":", "response", "=", "requests", ".", "get", "(", "value", ")", "response", ".", "raise_for_status", "(", ")", "descriptor", "[", "property", "]", "=", "response", ".", "json", "(", ")", "except", "Exception", "as", "error", ":", "message", "=", "'Not resolved Remote URI \"%s\" for resource.%s'", "%", "(", "value", ",", "property", ")", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "message", ")", ",", "error", ")", "# URI -> Local", "else", ":", "if", "not", "is_safe_path", "(", "value", ")", ":", "raise", "exceptions", ".", "DataPackageException", "(", "'Not safe path in Local URI \"%s\" '", "'for resource.%s'", "%", "(", "value", ",", "property", ")", ")", "if", "not", "base_path", ":", "raise", "exceptions", ".", "DataPackageException", "(", "'Local URI \"%s\" requires base path '", "'for resource.%s'", "%", "(", "value", ",", "property", ")", ")", "fullpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "value", ")", "try", ":", "with", "io", ".", "open", "(", "fullpath", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "descriptor", "[", "property", "]", "=", "json", ".", "load", "(", "file", ")", "except", "Exception", "as", "error", ":", "message", "=", "'Not resolved Local URI \"%s\" for resource.%s'", "%", "(", "value", ",", "property", ")", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "message", ")", ",", "error", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
expand_package_descriptor
Apply defaults to data package descriptor (IN-PLACE FOR NOW).
datapackage/helpers.py
def expand_package_descriptor(descriptor): """Apply defaults to data package descriptor (IN-PLACE FOR NOW). """ descriptor.setdefault('profile', config.DEFAULT_DATA_PACKAGE_PROFILE) for resource in descriptor.get('resources', []): expand_resource_descriptor(resource) return descriptor
def expand_package_descriptor(descriptor): """Apply defaults to data package descriptor (IN-PLACE FOR NOW). """ descriptor.setdefault('profile', config.DEFAULT_DATA_PACKAGE_PROFILE) for resource in descriptor.get('resources', []): expand_resource_descriptor(resource) return descriptor
[ "Apply", "defaults", "to", "data", "package", "descriptor", "(", "IN", "-", "PLACE", "FOR", "NOW", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L155-L161
[ "def", "expand_package_descriptor", "(", "descriptor", ")", ":", "descriptor", ".", "setdefault", "(", "'profile'", ",", "config", ".", "DEFAULT_DATA_PACKAGE_PROFILE", ")", "for", "resource", "in", "descriptor", ".", "get", "(", "'resources'", ",", "[", "]", ")", ":", "expand_resource_descriptor", "(", "resource", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
expand_resource_descriptor
Apply defaults to resource descriptor (IN-PLACE FOR NOW).
datapackage/helpers.py
def expand_resource_descriptor(descriptor): """Apply defaults to resource descriptor (IN-PLACE FOR NOW). """ descriptor.setdefault('profile', config.DEFAULT_RESOURCE_PROFILE) if descriptor['profile'] == 'tabular-data-resource': # Schema schema = descriptor.get('schema') if schema is not None: for field in schema.get('fields', []): field.setdefault('type', config.DEFAULT_FIELD_TYPE) field.setdefault('format', config.DEFAULT_FIELD_FORMAT) schema.setdefault('missingValues', config.DEFAULT_MISSING_VALUES) # Dialect dialect = descriptor.get('dialect') if dialect is not None: for key, value in config.DEFAULT_DIALECT.items(): dialect.setdefault(key, value) return descriptor
def expand_resource_descriptor(descriptor): """Apply defaults to resource descriptor (IN-PLACE FOR NOW). """ descriptor.setdefault('profile', config.DEFAULT_RESOURCE_PROFILE) if descriptor['profile'] == 'tabular-data-resource': # Schema schema = descriptor.get('schema') if schema is not None: for field in schema.get('fields', []): field.setdefault('type', config.DEFAULT_FIELD_TYPE) field.setdefault('format', config.DEFAULT_FIELD_FORMAT) schema.setdefault('missingValues', config.DEFAULT_MISSING_VALUES) # Dialect dialect = descriptor.get('dialect') if dialect is not None: for key, value in config.DEFAULT_DIALECT.items(): dialect.setdefault(key, value) return descriptor
[ "Apply", "defaults", "to", "resource", "descriptor", "(", "IN", "-", "PLACE", "FOR", "NOW", ")", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L164-L184
[ "def", "expand_resource_descriptor", "(", "descriptor", ")", ":", "descriptor", ".", "setdefault", "(", "'profile'", ",", "config", ".", "DEFAULT_RESOURCE_PROFILE", ")", "if", "descriptor", "[", "'profile'", "]", "==", "'tabular-data-resource'", ":", "# Schema", "schema", "=", "descriptor", ".", "get", "(", "'schema'", ")", "if", "schema", "is", "not", "None", ":", "for", "field", "in", "schema", ".", "get", "(", "'fields'", ",", "[", "]", ")", ":", "field", ".", "setdefault", "(", "'type'", ",", "config", ".", "DEFAULT_FIELD_TYPE", ")", "field", ".", "setdefault", "(", "'format'", ",", "config", ".", "DEFAULT_FIELD_FORMAT", ")", "schema", ".", "setdefault", "(", "'missingValues'", ",", "config", ".", "DEFAULT_MISSING_VALUES", ")", "# Dialect", "dialect", "=", "descriptor", ".", "get", "(", "'dialect'", ")", "if", "dialect", "is", "not", "None", ":", "for", "key", ",", "value", "in", "config", ".", "DEFAULT_DIALECT", ".", "items", "(", ")", ":", "dialect", ".", "setdefault", "(", "key", ",", "value", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
is_safe_path
Check if path is safe and allowed.
datapackage/helpers.py
def is_safe_path(path): """Check if path is safe and allowed. """ contains_windows_var = lambda val: re.match(r'%.+%', val) contains_posix_var = lambda val: re.match(r'\$.+', val) unsafeness_conditions = [ os.path.isabs(path), ('..%s' % os.path.sep) in path, path.startswith('~'), os.path.expandvars(path) != path, contains_windows_var(path), contains_posix_var(path), ] return not any(unsafeness_conditions)
def is_safe_path(path): """Check if path is safe and allowed. """ contains_windows_var = lambda val: re.match(r'%.+%', val) contains_posix_var = lambda val: re.match(r'\$.+', val) unsafeness_conditions = [ os.path.isabs(path), ('..%s' % os.path.sep) in path, path.startswith('~'), os.path.expandvars(path) != path, contains_windows_var(path), contains_posix_var(path), ] return not any(unsafeness_conditions)
[ "Check", "if", "path", "is", "safe", "and", "allowed", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/helpers.py#L197-L212
[ "def", "is_safe_path", "(", "path", ")", ":", "contains_windows_var", "=", "lambda", "val", ":", "re", ".", "match", "(", "r'%.+%'", ",", "val", ")", "contains_posix_var", "=", "lambda", "val", ":", "re", ".", "match", "(", "r'\\$.+'", ",", "val", ")", "unsafeness_conditions", "=", "[", "os", ".", "path", ".", "isabs", "(", "path", ")", ",", "(", "'..%s'", "%", "os", ".", "path", ".", "sep", ")", "in", "path", ",", "path", ".", "startswith", "(", "'~'", ")", ",", "os", ".", "path", ".", "expandvars", "(", "path", ")", "!=", "path", ",", "contains_windows_var", "(", "path", ")", ",", "contains_posix_var", "(", "path", ")", ",", "]", "return", "not", "any", "(", "unsafeness_conditions", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_extract_zip_if_possible
If descriptor is a path to zip file extract and return (tempdir, descriptor)
datapackage/package.py
def _extract_zip_if_possible(descriptor): """If descriptor is a path to zip file extract and return (tempdir, descriptor) """ tempdir = None result = descriptor try: if isinstance(descriptor, six.string_types): res = requests.get(descriptor) res.raise_for_status() result = res.content except (IOError, ValueError, requests.exceptions.RequestException): pass try: the_zip = result if isinstance(the_zip, bytes): try: os.path.isfile(the_zip) except (TypeError, ValueError): # the_zip contains the zip file contents the_zip = io.BytesIO(the_zip) if zipfile.is_zipfile(the_zip): with zipfile.ZipFile(the_zip, 'r') as z: _validate_zip(z) descriptor_path = [ f for f in z.namelist() if f.endswith('datapackage.json')][0] tempdir = tempfile.mkdtemp('-datapackage') z.extractall(tempdir) result = os.path.join(tempdir, descriptor_path) else: result = descriptor except (TypeError, zipfile.BadZipfile): pass if hasattr(descriptor, 'seek'): # Rewind descriptor if it's a file, as we read it for testing if it's # a zip file descriptor.seek(0) return (tempdir, result)
def _extract_zip_if_possible(descriptor): """If descriptor is a path to zip file extract and return (tempdir, descriptor) """ tempdir = None result = descriptor try: if isinstance(descriptor, six.string_types): res = requests.get(descriptor) res.raise_for_status() result = res.content except (IOError, ValueError, requests.exceptions.RequestException): pass try: the_zip = result if isinstance(the_zip, bytes): try: os.path.isfile(the_zip) except (TypeError, ValueError): # the_zip contains the zip file contents the_zip = io.BytesIO(the_zip) if zipfile.is_zipfile(the_zip): with zipfile.ZipFile(the_zip, 'r') as z: _validate_zip(z) descriptor_path = [ f for f in z.namelist() if f.endswith('datapackage.json')][0] tempdir = tempfile.mkdtemp('-datapackage') z.extractall(tempdir) result = os.path.join(tempdir, descriptor_path) else: result = descriptor except (TypeError, zipfile.BadZipfile): pass if hasattr(descriptor, 'seek'): # Rewind descriptor if it's a file, as we read it for testing if it's # a zip file descriptor.seek(0) return (tempdir, result)
[ "If", "descriptor", "is", "a", "path", "to", "zip", "file", "extract", "and", "return", "(", "tempdir", "descriptor", ")" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L432-L471
[ "def", "_extract_zip_if_possible", "(", "descriptor", ")", ":", "tempdir", "=", "None", "result", "=", "descriptor", "try", ":", "if", "isinstance", "(", "descriptor", ",", "six", ".", "string_types", ")", ":", "res", "=", "requests", ".", "get", "(", "descriptor", ")", "res", ".", "raise_for_status", "(", ")", "result", "=", "res", ".", "content", "except", "(", "IOError", ",", "ValueError", ",", "requests", ".", "exceptions", ".", "RequestException", ")", ":", "pass", "try", ":", "the_zip", "=", "result", "if", "isinstance", "(", "the_zip", ",", "bytes", ")", ":", "try", ":", "os", ".", "path", ".", "isfile", "(", "the_zip", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "# the_zip contains the zip file contents", "the_zip", "=", "io", ".", "BytesIO", "(", "the_zip", ")", "if", "zipfile", ".", "is_zipfile", "(", "the_zip", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "the_zip", ",", "'r'", ")", "as", "z", ":", "_validate_zip", "(", "z", ")", "descriptor_path", "=", "[", "f", "for", "f", "in", "z", ".", "namelist", "(", ")", "if", "f", ".", "endswith", "(", "'datapackage.json'", ")", "]", "[", "0", "]", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", "'-datapackage'", ")", "z", ".", "extractall", "(", "tempdir", ")", "result", "=", "os", ".", "path", ".", "join", "(", "tempdir", ",", "descriptor_path", ")", "else", ":", "result", "=", "descriptor", "except", "(", "TypeError", ",", "zipfile", ".", "BadZipfile", ")", ":", "pass", "if", "hasattr", "(", "descriptor", ",", "'seek'", ")", ":", "# Rewind descriptor if it's a file, as we read it for testing if it's", "# a zip file", "descriptor", ".", "seek", "(", "0", ")", "return", "(", "tempdir", ",", "result", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_validate_zip
Validate zipped data package
datapackage/package.py
def _validate_zip(the_zip): """Validate zipped data package """ datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')] if len(datapackage_jsons) != 1: msg = 'DataPackage must have only one "datapackage.json" (had {n})' raise exceptions.DataPackageException(msg.format(n=len(datapackage_jsons)))
def _validate_zip(the_zip): """Validate zipped data package """ datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')] if len(datapackage_jsons) != 1: msg = 'DataPackage must have only one "datapackage.json" (had {n})' raise exceptions.DataPackageException(msg.format(n=len(datapackage_jsons)))
[ "Validate", "zipped", "data", "package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L474-L480
[ "def", "_validate_zip", "(", "the_zip", ")", ":", "datapackage_jsons", "=", "[", "f", "for", "f", "in", "the_zip", ".", "namelist", "(", ")", "if", "f", ".", "endswith", "(", "'datapackage.json'", ")", "]", "if", "len", "(", "datapackage_jsons", ")", "!=", "1", ":", "msg", "=", "'DataPackage must have only one \"datapackage.json\" (had {n})'", "raise", "exceptions", ".", "DataPackageException", "(", "msg", ".", "format", "(", "n", "=", "len", "(", "datapackage_jsons", ")", ")", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_slugify_foreign_key
Slugify foreign key
datapackage/package.py
def _slugify_foreign_key(schema): """Slugify foreign key """ for foreign_key in schema.get('foreignKeys', []): foreign_key['reference']['resource'] = _slugify_resource_name( foreign_key['reference'].get('resource', '')) return schema
def _slugify_foreign_key(schema): """Slugify foreign key """ for foreign_key in schema.get('foreignKeys', []): foreign_key['reference']['resource'] = _slugify_resource_name( foreign_key['reference'].get('resource', '')) return schema
[ "Slugify", "foreign", "key" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L489-L495
[ "def", "_slugify_foreign_key", "(", "schema", ")", ":", "for", "foreign_key", "in", "schema", ".", "get", "(", "'foreignKeys'", ",", "[", "]", ")", ":", "foreign_key", "[", "'reference'", "]", "[", "'resource'", "]", "=", "_slugify_resource_name", "(", "foreign_key", "[", "'reference'", "]", ".", "get", "(", "'resource'", ",", "''", ")", ")", "return", "schema" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.get_resource
https://github.com/frictionlessdata/datapackage-py#package
datapackage/package.py
def get_resource(self, name): """https://github.com/frictionlessdata/datapackage-py#package """ for resource in self.resources: if resource.name == name: return resource return None
def get_resource(self, name): """https://github.com/frictionlessdata/datapackage-py#package """ for resource in self.resources: if resource.name == name: return resource return None
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L156-L162
[ "def", "get_resource", "(", "self", ",", "name", ")", ":", "for", "resource", "in", "self", ".", "resources", ":", "if", "resource", ".", "name", "==", "name", ":", "return", "resource", "return", "None" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.add_resource
https://github.com/frictionlessdata/datapackage-py#package
datapackage/package.py
def add_resource(self, descriptor): """https://github.com/frictionlessdata/datapackage-py#package """ self.__current_descriptor.setdefault('resources', []) self.__current_descriptor['resources'].append(descriptor) self.__build() return self.__resources[-1]
def add_resource(self, descriptor): """https://github.com/frictionlessdata/datapackage-py#package """ self.__current_descriptor.setdefault('resources', []) self.__current_descriptor['resources'].append(descriptor) self.__build() return self.__resources[-1]
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L164-L170
[ "def", "add_resource", "(", "self", ",", "descriptor", ")", ":", "self", ".", "__current_descriptor", ".", "setdefault", "(", "'resources'", ",", "[", "]", ")", "self", ".", "__current_descriptor", "[", "'resources'", "]", ".", "append", "(", "descriptor", ")", "self", ".", "__build", "(", ")", "return", "self", ".", "__resources", "[", "-", "1", "]" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.remove_resource
https://github.com/frictionlessdata/datapackage-py#package
datapackage/package.py
def remove_resource(self, name): """https://github.com/frictionlessdata/datapackage-py#package """ resource = self.get_resource(name) if resource: predicat = lambda resource: resource.get('name') != name self.__current_descriptor['resources'] = list(filter( predicat, self.__current_descriptor['resources'])) self.__build() return resource
def remove_resource(self, name): """https://github.com/frictionlessdata/datapackage-py#package """ resource = self.get_resource(name) if resource: predicat = lambda resource: resource.get('name') != name self.__current_descriptor['resources'] = list(filter( predicat, self.__current_descriptor['resources'])) self.__build() return resource
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L172-L181
[ "def", "remove_resource", "(", "self", ",", "name", ")", ":", "resource", "=", "self", ".", "get_resource", "(", "name", ")", "if", "resource", ":", "predicat", "=", "lambda", "resource", ":", "resource", ".", "get", "(", "'name'", ")", "!=", "name", "self", ".", "__current_descriptor", "[", "'resources'", "]", "=", "list", "(", "filter", "(", "predicat", ",", "self", ".", "__current_descriptor", "[", "'resources'", "]", ")", ")", "self", ".", "__build", "(", ")", "return", "resource" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.infer
https://github.com/frictionlessdata/datapackage-py#package
datapackage/package.py
def infer(self, pattern=False): """https://github.com/frictionlessdata/datapackage-py#package """ # Files if pattern: # No base path if not self.__base_path: message = 'Base path is required for pattern infer' raise exceptions.DataPackageException(message) # Add resources options = {'recursive': True} if '**' in pattern else {} for path in glob.glob(os.path.join(self.__base_path, pattern), **options): self.add_resource({'path': os.path.relpath(path, self.__base_path)}) # Resources for index, resource in enumerate(self.resources): descriptor = resource.infer() self.__current_descriptor['resources'][index] = descriptor self.__build() # Profile if self.__next_descriptor['profile'] == config.DEFAULT_DATA_PACKAGE_PROFILE: if self.resources and all(map(lambda resource: resource.tabular, self.resources)): self.__current_descriptor['profile'] = 'tabular-data-package' self.__build() return self.__current_descriptor
def infer(self, pattern=False): """https://github.com/frictionlessdata/datapackage-py#package """ # Files if pattern: # No base path if not self.__base_path: message = 'Base path is required for pattern infer' raise exceptions.DataPackageException(message) # Add resources options = {'recursive': True} if '**' in pattern else {} for path in glob.glob(os.path.join(self.__base_path, pattern), **options): self.add_resource({'path': os.path.relpath(path, self.__base_path)}) # Resources for index, resource in enumerate(self.resources): descriptor = resource.infer() self.__current_descriptor['resources'][index] = descriptor self.__build() # Profile if self.__next_descriptor['profile'] == config.DEFAULT_DATA_PACKAGE_PROFILE: if self.resources and all(map(lambda resource: resource.tabular, self.resources)): self.__current_descriptor['profile'] = 'tabular-data-package' self.__build() return self.__current_descriptor
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L183-L212
[ "def", "infer", "(", "self", ",", "pattern", "=", "False", ")", ":", "# Files", "if", "pattern", ":", "# No base path", "if", "not", "self", ".", "__base_path", ":", "message", "=", "'Base path is required for pattern infer'", "raise", "exceptions", ".", "DataPackageException", "(", "message", ")", "# Add resources", "options", "=", "{", "'recursive'", ":", "True", "}", "if", "'**'", "in", "pattern", "else", "{", "}", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "__base_path", ",", "pattern", ")", ",", "*", "*", "options", ")", ":", "self", ".", "add_resource", "(", "{", "'path'", ":", "os", ".", "path", ".", "relpath", "(", "path", ",", "self", ".", "__base_path", ")", "}", ")", "# Resources", "for", "index", ",", "resource", "in", "enumerate", "(", "self", ".", "resources", ")", ":", "descriptor", "=", "resource", ".", "infer", "(", ")", "self", ".", "__current_descriptor", "[", "'resources'", "]", "[", "index", "]", "=", "descriptor", "self", ".", "__build", "(", ")", "# Profile", "if", "self", ".", "__next_descriptor", "[", "'profile'", "]", "==", "config", ".", "DEFAULT_DATA_PACKAGE_PROFILE", ":", "if", "self", ".", "resources", "and", "all", "(", "map", "(", "lambda", "resource", ":", "resource", ".", "tabular", ",", "self", ".", "resources", ")", ")", ":", "self", ".", "__current_descriptor", "[", "'profile'", "]", "=", "'tabular-data-package'", "self", ".", "__build", "(", ")", "return", "self", ".", "__current_descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.save
https://github.com/frictionlessdata/datapackage-py#package
datapackage/package.py
def save(self, target=None, storage=None, **options): """https://github.com/frictionlessdata/datapackage-py#package """ # Save package to storage if storage is not None: if not isinstance(storage, Storage): storage = Storage.connect(storage, **options) buckets = [] schemas = [] for resource in self.resources: if resource.tabular: resource.infer() buckets.append(_slugify_resource_name(resource.name)) schemas.append(resource.schema.descriptor) schemas = list(map(_slugify_foreign_key, schemas)) storage.create(buckets, schemas, force=True) for bucket in storage.buckets: resource = self.resources[buckets.index(bucket)] storage.write(bucket, resource.iter()) # Save descriptor to json elif str(target).endswith('.json'): mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None helpers.ensure_dir(target) with io.open(target, mode=mode, encoding=encoding) as file: json.dump(self.__current_descriptor, file, indent=4) # Save package to zip else: try: with zipfile.ZipFile(target, 'w') as z: descriptor = json.loads(json.dumps(self.__current_descriptor)) for index, resource in enumerate(self.resources): if not resource.name: continue if not resource.local: continue path = os.path.abspath(resource.source) basename = resource.descriptor.get('name') resource_format = resource.descriptor.get('format') if resource_format: basename = '.'.join([basename, resource_format.lower()]) path_inside_dp = os.path.join('data', basename) z.write(path, path_inside_dp) descriptor['resources'][index]['path'] = path_inside_dp z.writestr('datapackage.json', json.dumps(descriptor)) except (IOError, zipfile.BadZipfile, zipfile.LargeZipFile) as exception: six.raise_from(exceptions.DataPackageException(exception), exception) return True
def save(self, target=None, storage=None, **options): """https://github.com/frictionlessdata/datapackage-py#package """ # Save package to storage if storage is not None: if not isinstance(storage, Storage): storage = Storage.connect(storage, **options) buckets = [] schemas = [] for resource in self.resources: if resource.tabular: resource.infer() buckets.append(_slugify_resource_name(resource.name)) schemas.append(resource.schema.descriptor) schemas = list(map(_slugify_foreign_key, schemas)) storage.create(buckets, schemas, force=True) for bucket in storage.buckets: resource = self.resources[buckets.index(bucket)] storage.write(bucket, resource.iter()) # Save descriptor to json elif str(target).endswith('.json'): mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None helpers.ensure_dir(target) with io.open(target, mode=mode, encoding=encoding) as file: json.dump(self.__current_descriptor, file, indent=4) # Save package to zip else: try: with zipfile.ZipFile(target, 'w') as z: descriptor = json.loads(json.dumps(self.__current_descriptor)) for index, resource in enumerate(self.resources): if not resource.name: continue if not resource.local: continue path = os.path.abspath(resource.source) basename = resource.descriptor.get('name') resource_format = resource.descriptor.get('format') if resource_format: basename = '.'.join([basename, resource_format.lower()]) path_inside_dp = os.path.join('data', basename) z.write(path, path_inside_dp) descriptor['resources'][index]['path'] = path_inside_dp z.writestr('datapackage.json', json.dumps(descriptor)) except (IOError, zipfile.BadZipfile, zipfile.LargeZipFile) as exception: six.raise_from(exceptions.DataPackageException(exception), exception) return True
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#package" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L225-L279
[ "def", "save", "(", "self", ",", "target", "=", "None", ",", "storage", "=", "None", ",", "*", "*", "options", ")", ":", "# Save package to storage", "if", "storage", "is", "not", "None", ":", "if", "not", "isinstance", "(", "storage", ",", "Storage", ")", ":", "storage", "=", "Storage", ".", "connect", "(", "storage", ",", "*", "*", "options", ")", "buckets", "=", "[", "]", "schemas", "=", "[", "]", "for", "resource", "in", "self", ".", "resources", ":", "if", "resource", ".", "tabular", ":", "resource", ".", "infer", "(", ")", "buckets", ".", "append", "(", "_slugify_resource_name", "(", "resource", ".", "name", ")", ")", "schemas", ".", "append", "(", "resource", ".", "schema", ".", "descriptor", ")", "schemas", "=", "list", "(", "map", "(", "_slugify_foreign_key", ",", "schemas", ")", ")", "storage", ".", "create", "(", "buckets", ",", "schemas", ",", "force", "=", "True", ")", "for", "bucket", "in", "storage", ".", "buckets", ":", "resource", "=", "self", ".", "resources", "[", "buckets", ".", "index", "(", "bucket", ")", "]", "storage", ".", "write", "(", "bucket", ",", "resource", ".", "iter", "(", ")", ")", "# Save descriptor to json", "elif", "str", "(", "target", ")", ".", "endswith", "(", "'.json'", ")", ":", "mode", "=", "'w'", "encoding", "=", "'utf-8'", "if", "six", ".", "PY2", ":", "mode", "=", "'wb'", "encoding", "=", "None", "helpers", ".", "ensure_dir", "(", "target", ")", "with", "io", ".", "open", "(", "target", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")", "as", "file", ":", "json", ".", "dump", "(", "self", ".", "__current_descriptor", ",", "file", ",", "indent", "=", "4", ")", "# Save package to zip", "else", ":", "try", ":", "with", "zipfile", ".", "ZipFile", "(", "target", ",", "'w'", ")", "as", "z", ":", "descriptor", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "self", ".", "__current_descriptor", ")", ")", "for", "index", ",", "resource", "in", "enumerate", "(", "self", ".", "resources", ")", ":", "if", "not", "resource", ".", "name", ":", "continue", "if", "not", "resource", ".", "local", ":", "continue", "path", "=", "os", ".", "path", ".", "abspath", "(", "resource", ".", "source", ")", "basename", "=", "resource", ".", "descriptor", ".", "get", "(", "'name'", ")", "resource_format", "=", "resource", ".", "descriptor", ".", "get", "(", "'format'", ")", "if", "resource_format", ":", "basename", "=", "'.'", ".", "join", "(", "[", "basename", ",", "resource_format", ".", "lower", "(", ")", "]", ")", "path_inside_dp", "=", "os", ".", "path", ".", "join", "(", "'data'", ",", "basename", ")", "z", ".", "write", "(", "path", ",", "path_inside_dp", ")", "descriptor", "[", "'resources'", "]", "[", "index", "]", "[", "'path'", "]", "=", "path_inside_dp", "z", ".", "writestr", "(", "'datapackage.json'", ",", "json", ".", "dumps", "(", "descriptor", ")", ")", "except", "(", "IOError", ",", "zipfile", ".", "BadZipfile", ",", "zipfile", ".", "LargeZipFile", ")", "as", "exception", ":", "six", ".", "raise_from", "(", "exceptions", ".", "DataPackageException", "(", "exception", ")", ",", "exception", ")", "return", "True" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.attributes
tuple: Attributes defined in the schema and the data package.
datapackage/package.py
def attributes(self): """tuple: Attributes defined in the schema and the data package. """ # Deprecate warnings.warn( 'Property "package.attributes" is deprecated.', UserWarning) # Get attributes attributes = set(self.to_dict().keys()) try: attributes.update(self.profile.properties.keys()) except AttributeError: pass return tuple(attributes)
def attributes(self): """tuple: Attributes defined in the schema and the data package. """ # Deprecate warnings.warn( 'Property "package.attributes" is deprecated.', UserWarning) # Get attributes attributes = set(self.to_dict().keys()) try: attributes.update(self.profile.properties.keys()) except AttributeError: pass return tuple(attributes)
[ "tuple", ":", "Attributes", "defined", "in", "the", "schema", "and", "the", "data", "package", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L346-L362
[ "def", "attributes", "(", "self", ")", ":", "# Deprecate", "warnings", ".", "warn", "(", "'Property \"package.attributes\" is deprecated.'", ",", "UserWarning", ")", "# Get attributes", "attributes", "=", "set", "(", "self", ".", "to_dict", "(", ")", ".", "keys", "(", ")", ")", "try", ":", "attributes", ".", "update", "(", "self", ".", "profile", ".", "properties", ".", "keys", "(", ")", ")", "except", "AttributeError", ":", "pass", "return", "tuple", "(", "attributes", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.required_attributes
tuple: The schema's required attributed.
datapackage/package.py
def required_attributes(self): """tuple: The schema's required attributed. """ # Deprecate warnings.warn( 'Property "package.required_attributes" is deprecated.', UserWarning) required = () # Get required try: if self.profile.required is not None: required = tuple(self.profile.required) except AttributeError: pass return required
def required_attributes(self): """tuple: The schema's required attributed. """ # Deprecate warnings.warn( 'Property "package.required_attributes" is deprecated.', UserWarning) required = () # Get required try: if self.profile.required is not None: required = tuple(self.profile.required) except AttributeError: pass return required
[ "tuple", ":", "The", "schema", "s", "required", "attributed", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L365-L382
[ "def", "required_attributes", "(", "self", ")", ":", "# Deprecate", "warnings", ".", "warn", "(", "'Property \"package.required_attributes\" is deprecated.'", ",", "UserWarning", ")", "required", "=", "(", ")", "# Get required", "try", ":", "if", "self", ".", "profile", ".", "required", "is", "not", "None", ":", "required", "=", "tuple", "(", "self", ".", "profile", ".", "required", ")", "except", "AttributeError", ":", "pass", "return", "required" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.validate
Validate this Data Package.
datapackage/package.py
def validate(self): """"Validate this Data Package. """ # Deprecate warnings.warn( 'Property "package.validate" is deprecated.', UserWarning) descriptor = self.to_dict() self.profile.validate(descriptor)
def validate(self): """"Validate this Data Package. """ # Deprecate warnings.warn( 'Property "package.validate" is deprecated.', UserWarning) descriptor = self.to_dict() self.profile.validate(descriptor)
[ "Validate", "this", "Data", "Package", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L384-L394
[ "def", "validate", "(", "self", ")", ":", "# Deprecate", "warnings", ".", "warn", "(", "'Property \"package.validate\" is deprecated.'", ",", "UserWarning", ")", "descriptor", "=", "self", ".", "to_dict", "(", ")", "self", ".", "profile", ".", "validate", "(", "descriptor", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Package.iter_errors
Lazily yields each ValidationError for the received data dict.
datapackage/package.py
def iter_errors(self): """"Lazily yields each ValidationError for the received data dict. """ # Deprecate warnings.warn( 'Property "package.iter_errors" is deprecated.', UserWarning) return self.profile.iter_errors(self.to_dict())
def iter_errors(self): """"Lazily yields each ValidationError for the received data dict. """ # Deprecate warnings.warn( 'Property "package.iter_errors" is deprecated.', UserWarning) return self.profile.iter_errors(self.to_dict())
[ "Lazily", "yields", "each", "ValidationError", "for", "the", "received", "data", "dict", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/package.py#L396-L405
[ "def", "iter_errors", "(", "self", ")", ":", "# Deprecate", "warnings", ".", "warn", "(", "'Property \"package.iter_errors\" is deprecated.'", ",", "UserWarning", ")", "return", "self", ".", "profile", ".", "iter_errors", "(", "self", ".", "to_dict", "(", ")", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Profile.validate
https://github.com/frictionlessdata/datapackage-py#schema
datapackage/profile.py
def validate(self, descriptor): """https://github.com/frictionlessdata/datapackage-py#schema """ # Collect errors errors = [] for error in self._validator.iter_errors(descriptor): if isinstance(error, jsonschema.exceptions.ValidationError): message = str(error.message) if six.PY2: message = message.replace('u\'', '\'') descriptor_path = '/'.join(map(str, error.path)) profile_path = '/'.join(map(str, error.schema_path)) error = exceptions.ValidationError( 'Descriptor validation error: %s ' 'at "%s" in descriptor and ' 'at "%s" in profile' % (message, descriptor_path, profile_path)) errors.append(error) # Raise error if errors: message = 'There are %s validation errors (see exception.errors)' % len(errors) raise exceptions.ValidationError(message, errors=errors) return True
def validate(self, descriptor): """https://github.com/frictionlessdata/datapackage-py#schema """ # Collect errors errors = [] for error in self._validator.iter_errors(descriptor): if isinstance(error, jsonschema.exceptions.ValidationError): message = str(error.message) if six.PY2: message = message.replace('u\'', '\'') descriptor_path = '/'.join(map(str, error.path)) profile_path = '/'.join(map(str, error.schema_path)) error = exceptions.ValidationError( 'Descriptor validation error: %s ' 'at "%s" in descriptor and ' 'at "%s" in profile' % (message, descriptor_path, profile_path)) errors.append(error) # Raise error if errors: message = 'There are %s validation errors (see exception.errors)' % len(errors) raise exceptions.ValidationError(message, errors=errors) return True
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#schema" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/profile.py#L44-L69
[ "def", "validate", "(", "self", ",", "descriptor", ")", ":", "# Collect errors", "errors", "=", "[", "]", "for", "error", "in", "self", ".", "_validator", ".", "iter_errors", "(", "descriptor", ")", ":", "if", "isinstance", "(", "error", ",", "jsonschema", ".", "exceptions", ".", "ValidationError", ")", ":", "message", "=", "str", "(", "error", ".", "message", ")", "if", "six", ".", "PY2", ":", "message", "=", "message", ".", "replace", "(", "'u\\''", ",", "'\\''", ")", "descriptor_path", "=", "'/'", ".", "join", "(", "map", "(", "str", ",", "error", ".", "path", ")", ")", "profile_path", "=", "'/'", ".", "join", "(", "map", "(", "str", ",", "error", ".", "schema_path", ")", ")", "error", "=", "exceptions", ".", "ValidationError", "(", "'Descriptor validation error: %s '", "'at \"%s\" in descriptor and '", "'at \"%s\" in profile'", "%", "(", "message", ",", "descriptor_path", ",", "profile_path", ")", ")", "errors", ".", "append", "(", "error", ")", "# Raise error", "if", "errors", ":", "message", "=", "'There are %s validation errors (see exception.errors)'", "%", "len", "(", "errors", ")", "raise", "exceptions", ".", "ValidationError", "(", "message", ",", "errors", "=", "errors", ")", "return", "True" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Profile.iter_errors
Lazily yields each ValidationError for the received data dict.
datapackage/profile.py
def iter_errors(self, data): """Lazily yields each ValidationError for the received data dict. """ # Deprecate warnings.warn( 'Property "profile.iter_errors" is deprecated.', UserWarning) for error in self._validator.iter_errors(data): yield error
def iter_errors(self, data): """Lazily yields each ValidationError for the received data dict. """ # Deprecate warnings.warn( 'Property "profile.iter_errors" is deprecated.', UserWarning) for error in self._validator.iter_errors(data): yield error
[ "Lazily", "yields", "each", "ValidationError", "for", "the", "received", "data", "dict", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/profile.py#L135-L145
[ "def", "iter_errors", "(", "self", ",", "data", ")", ":", "# Deprecate", "warnings", ".", "warn", "(", "'Property \"profile.iter_errors\" is deprecated.'", ",", "UserWarning", ")", "for", "error", "in", "self", ".", "_validator", ".", "iter_errors", "(", "data", ")", ":", "yield", "error" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.tabular
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def tabular(self): """https://github.com/frictionlessdata/datapackage-py#resource """ if self.__current_descriptor.get('profile') == 'tabular-data-resource': return True if not self.__strict: if self.__current_descriptor.get('format') in config.TABULAR_FORMATS: return True if self.__source_inspection.get('tabular', False): return True return False
def tabular(self): """https://github.com/frictionlessdata/datapackage-py#resource """ if self.__current_descriptor.get('profile') == 'tabular-data-resource': return True if not self.__strict: if self.__current_descriptor.get('format') in config.TABULAR_FORMATS: return True if self.__source_inspection.get('tabular', False): return True return False
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L127-L137
[ "def", "tabular", "(", "self", ")", ":", "if", "self", ".", "__current_descriptor", ".", "get", "(", "'profile'", ")", "==", "'tabular-data-resource'", ":", "return", "True", "if", "not", "self", ".", "__strict", ":", "if", "self", ".", "__current_descriptor", ".", "get", "(", "'format'", ")", "in", "config", ".", "TABULAR_FORMATS", ":", "return", "True", "if", "self", ".", "__source_inspection", ".", "get", "(", "'tabular'", ",", "False", ")", ":", "return", "True", "return", "False" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.iter
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def iter(self, relations=False, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ # Error for non tabular if not self.tabular: message = 'Methods iter/read are not supported for non tabular data' raise exceptions.DataPackageException(message) # Get relations if relations: relations = self.__get_relations() return self.__get_table().iter(relations=relations, **options)
def iter(self, relations=False, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ # Error for non tabular if not self.tabular: message = 'Methods iter/read are not supported for non tabular data' raise exceptions.DataPackageException(message) # Get relations if relations: relations = self.__get_relations() return self.__get_table().iter(relations=relations, **options)
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L161-L174
[ "def", "iter", "(", "self", ",", "relations", "=", "False", ",", "*", "*", "options", ")", ":", "# Error for non tabular", "if", "not", "self", ".", "tabular", ":", "message", "=", "'Methods iter/read are not supported for non tabular data'", "raise", "exceptions", ".", "DataPackageException", "(", "message", ")", "# Get relations", "if", "relations", ":", "relations", "=", "self", ".", "__get_relations", "(", ")", "return", "self", ".", "__get_table", "(", ")", ".", "iter", "(", "relations", "=", "relations", ",", "*", "*", "options", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.raw_iter
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def raw_iter(self, stream=False): """https://github.com/frictionlessdata/datapackage-py#resource """ # Error for inline if self.inline: message = 'Methods raw_iter/raw_read are not supported for inline data' raise exceptions.DataPackageException(message) # Get filelike if self.multipart: filelike = _MultipartSource(self.source, remote=self.remote) elif self.remote: if self.__table_options.get('http_session'): http_session = self.__table_options['http_session'] else: http_session = requests.Session() http_session.headers = config.HTTP_HEADERS res = http_session.get(self.source, stream=True) filelike = res.raw else: filelike = io.open(self.source, 'rb') return filelike
def raw_iter(self, stream=False): """https://github.com/frictionlessdata/datapackage-py#resource """ # Error for inline if self.inline: message = 'Methods raw_iter/raw_read are not supported for inline data' raise exceptions.DataPackageException(message) # Get filelike if self.multipart: filelike = _MultipartSource(self.source, remote=self.remote) elif self.remote: if self.__table_options.get('http_session'): http_session = self.__table_options['http_session'] else: http_session = requests.Session() http_session.headers = config.HTTP_HEADERS res = http_session.get(self.source, stream=True) filelike = res.raw else: filelike = io.open(self.source, 'rb') return filelike
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L197-L220
[ "def", "raw_iter", "(", "self", ",", "stream", "=", "False", ")", ":", "# Error for inline", "if", "self", ".", "inline", ":", "message", "=", "'Methods raw_iter/raw_read are not supported for inline data'", "raise", "exceptions", ".", "DataPackageException", "(", "message", ")", "# Get filelike", "if", "self", ".", "multipart", ":", "filelike", "=", "_MultipartSource", "(", "self", ".", "source", ",", "remote", "=", "self", ".", "remote", ")", "elif", "self", ".", "remote", ":", "if", "self", ".", "__table_options", ".", "get", "(", "'http_session'", ")", ":", "http_session", "=", "self", ".", "__table_options", "[", "'http_session'", "]", "else", ":", "http_session", "=", "requests", ".", "Session", "(", ")", "http_session", ".", "headers", "=", "config", ".", "HTTP_HEADERS", "res", "=", "http_session", ".", "get", "(", "self", ".", "source", ",", "stream", "=", "True", ")", "filelike", "=", "res", ".", "raw", "else", ":", "filelike", "=", "io", ".", "open", "(", "self", ".", "source", ",", "'rb'", ")", "return", "filelike" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.raw_read
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def raw_read(self): """https://github.com/frictionlessdata/datapackage-py#resource """ contents = b'' with self.raw_iter() as filelike: for chunk in filelike: contents += chunk return contents
def raw_read(self): """https://github.com/frictionlessdata/datapackage-py#resource """ contents = b'' with self.raw_iter() as filelike: for chunk in filelike: contents += chunk return contents
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L222-L229
[ "def", "raw_read", "(", "self", ")", ":", "contents", "=", "b''", "with", "self", ".", "raw_iter", "(", ")", "as", "filelike", ":", "for", "chunk", "in", "filelike", ":", "contents", "+=", "chunk", "return", "contents" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.infer
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def infer(self, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ descriptor = deepcopy(self.__current_descriptor) # Blank -> Stop if self.__source_inspection.get('blank'): return descriptor # Name if not descriptor.get('name'): descriptor['name'] = self.__source_inspection['name'] # Only for non inline/storage if not self.inline and not self.__storage: # Format if not descriptor.get('format'): descriptor['format'] = self.__source_inspection['format'] # Mediatype if not descriptor.get('mediatype'): descriptor['mediatype'] = 'text/%s' % descriptor['format'] # Encoding if not descriptor.get('encoding'): contents = b'' with self.raw_iter(stream=True) as stream: for chunk in stream: contents += chunk if len(contents) > 1000: break encoding = cchardet.detect(contents)['encoding'] if encoding is not None: encoding = encoding.lower() descriptor['encoding'] = 'utf-8' if encoding == 'ascii' else encoding # Schema if not descriptor.get('schema'): if self.tabular: descriptor['schema'] = self.__get_table().infer(**options) # Profile if descriptor.get('profile') == config.DEFAULT_RESOURCE_PROFILE: if self.tabular: descriptor['profile'] = 'tabular-data-resource' # Save descriptor self.__current_descriptor = descriptor self.__build() return descriptor
def infer(self, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ descriptor = deepcopy(self.__current_descriptor) # Blank -> Stop if self.__source_inspection.get('blank'): return descriptor # Name if not descriptor.get('name'): descriptor['name'] = self.__source_inspection['name'] # Only for non inline/storage if not self.inline and not self.__storage: # Format if not descriptor.get('format'): descriptor['format'] = self.__source_inspection['format'] # Mediatype if not descriptor.get('mediatype'): descriptor['mediatype'] = 'text/%s' % descriptor['format'] # Encoding if not descriptor.get('encoding'): contents = b'' with self.raw_iter(stream=True) as stream: for chunk in stream: contents += chunk if len(contents) > 1000: break encoding = cchardet.detect(contents)['encoding'] if encoding is not None: encoding = encoding.lower() descriptor['encoding'] = 'utf-8' if encoding == 'ascii' else encoding # Schema if not descriptor.get('schema'): if self.tabular: descriptor['schema'] = self.__get_table().infer(**options) # Profile if descriptor.get('profile') == config.DEFAULT_RESOURCE_PROFILE: if self.tabular: descriptor['profile'] = 'tabular-data-resource' # Save descriptor self.__current_descriptor = descriptor self.__build() return descriptor
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L231-L281
[ "def", "infer", "(", "self", ",", "*", "*", "options", ")", ":", "descriptor", "=", "deepcopy", "(", "self", ".", "__current_descriptor", ")", "# Blank -> Stop", "if", "self", ".", "__source_inspection", ".", "get", "(", "'blank'", ")", ":", "return", "descriptor", "# Name", "if", "not", "descriptor", ".", "get", "(", "'name'", ")", ":", "descriptor", "[", "'name'", "]", "=", "self", ".", "__source_inspection", "[", "'name'", "]", "# Only for non inline/storage", "if", "not", "self", ".", "inline", "and", "not", "self", ".", "__storage", ":", "# Format", "if", "not", "descriptor", ".", "get", "(", "'format'", ")", ":", "descriptor", "[", "'format'", "]", "=", "self", ".", "__source_inspection", "[", "'format'", "]", "# Mediatype", "if", "not", "descriptor", ".", "get", "(", "'mediatype'", ")", ":", "descriptor", "[", "'mediatype'", "]", "=", "'text/%s'", "%", "descriptor", "[", "'format'", "]", "# Encoding", "if", "not", "descriptor", ".", "get", "(", "'encoding'", ")", ":", "contents", "=", "b''", "with", "self", ".", "raw_iter", "(", "stream", "=", "True", ")", "as", "stream", ":", "for", "chunk", "in", "stream", ":", "contents", "+=", "chunk", "if", "len", "(", "contents", ")", ">", "1000", ":", "break", "encoding", "=", "cchardet", ".", "detect", "(", "contents", ")", "[", "'encoding'", "]", "if", "encoding", "is", "not", "None", ":", "encoding", "=", "encoding", ".", "lower", "(", ")", "descriptor", "[", "'encoding'", "]", "=", "'utf-8'", "if", "encoding", "==", "'ascii'", "else", "encoding", "# Schema", "if", "not", "descriptor", ".", "get", "(", "'schema'", ")", ":", "if", "self", ".", "tabular", ":", "descriptor", "[", "'schema'", "]", "=", "self", ".", "__get_table", "(", ")", ".", "infer", "(", "*", "*", "options", ")", "# Profile", "if", "descriptor", ".", "get", "(", "'profile'", ")", "==", "config", ".", "DEFAULT_RESOURCE_PROFILE", ":", "if", "self", ".", "tabular", ":", "descriptor", "[", "'profile'", "]", "=", "'tabular-data-resource'", "# Save descriptor", "self", ".", "__current_descriptor", "=", "descriptor", "self", ".", "__build", "(", ")", "return", "descriptor" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.commit
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def commit(self, strict=None): """https://github.com/frictionlessdata/datapackage-py#resource """ if strict is not None: self.__strict = strict elif self.__current_descriptor == self.__next_descriptor: return False self.__current_descriptor = deepcopy(self.__next_descriptor) self.__table = None self.__build() return True
def commit(self, strict=None): """https://github.com/frictionlessdata/datapackage-py#resource """ if strict is not None: self.__strict = strict elif self.__current_descriptor == self.__next_descriptor: return False self.__current_descriptor = deepcopy(self.__next_descriptor) self.__table = None self.__build() return True
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L283-L293
[ "def", "commit", "(", "self", ",", "strict", "=", "None", ")", ":", "if", "strict", "is", "not", "None", ":", "self", ".", "__strict", "=", "strict", "elif", "self", ".", "__current_descriptor", "==", "self", ".", "__next_descriptor", ":", "return", "False", "self", ".", "__current_descriptor", "=", "deepcopy", "(", "self", ".", "__next_descriptor", ")", "self", ".", "__table", "=", "None", "self", ".", "__build", "(", ")", "return", "True" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
Resource.save
https://github.com/frictionlessdata/datapackage-py#resource
datapackage/resource.py
def save(self, target, storage=None, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ # Save resource to storage if storage is not None: if self.tabular: self.infer() storage.create(target, self.schema.descriptor, force=True) storage.write(target, self.iter()) # Save descriptor to json else: mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None helpers.ensure_dir(target) with io.open(target, mode=mode, encoding=encoding) as file: json.dump(self.__current_descriptor, file, indent=4)
def save(self, target, storage=None, **options): """https://github.com/frictionlessdata/datapackage-py#resource """ # Save resource to storage if storage is not None: if self.tabular: self.infer() storage.create(target, self.schema.descriptor, force=True) storage.write(target, self.iter()) # Save descriptor to json else: mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None helpers.ensure_dir(target) with io.open(target, mode=mode, encoding=encoding) as file: json.dump(self.__current_descriptor, file, indent=4)
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "datapackage", "-", "py#resource" ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/resource.py#L295-L315
[ "def", "save", "(", "self", ",", "target", ",", "storage", "=", "None", ",", "*", "*", "options", ")", ":", "# Save resource to storage", "if", "storage", "is", "not", "None", ":", "if", "self", ".", "tabular", ":", "self", ".", "infer", "(", ")", "storage", ".", "create", "(", "target", ",", "self", ".", "schema", ".", "descriptor", ",", "force", "=", "True", ")", "storage", ".", "write", "(", "target", ",", "self", ".", "iter", "(", ")", ")", "# Save descriptor to json", "else", ":", "mode", "=", "'w'", "encoding", "=", "'utf-8'", "if", "six", ".", "PY2", ":", "mode", "=", "'wb'", "encoding", "=", "None", "helpers", ".", "ensure_dir", "(", "target", ")", "with", "io", ".", "open", "(", "target", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")", "as", "file", ":", "json", ".", "dump", "(", "self", ".", "__current_descriptor", ",", "file", ",", "indent", "=", "4", ")" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
push_datapackage
Push Data Package to storage. All parameters should be used as keyword arguments. Args: descriptor (str): path to descriptor backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs
datapackage/pushpull.py
def push_datapackage(descriptor, backend, **backend_options): """Push Data Package to storage. All parameters should be used as keyword arguments. Args: descriptor (str): path to descriptor backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs """ # Deprecated warnings.warn( 'Functions "push/pull_datapackage" are deprecated. ' 'Please use "Package" class', UserWarning) # Init maps tables = [] schemas = [] datamap = {} mapping = {} # Init model model = Package(descriptor) # Get storage plugin = import_module('jsontableschema.plugins.%s' % backend) storage = plugin.Storage(**backend_options) # Collect tables/schemas/data for resource in model.resources: if not resource.tabular: continue name = resource.descriptor.get('name', None) table = _convert_path(resource.descriptor['path'], name) schema = resource.descriptor['schema'] data = resource.table.iter(keyed=True) # TODO: review def values(schema, data): for item in data: row = [] for field in schema['fields']: row.append(item.get(field['name'], None)) yield tuple(row) tables.append(table) schemas.append(schema) datamap[table] = values(schema, data) if name is not None: mapping[name] = table schemas = _convert_schemas(mapping, schemas) # Create tables for table in tables: if table in storage.buckets: storage.delete(table) storage.create(tables, schemas) # Write data to tables for table in storage.buckets: if table in datamap: storage.write(table, datamap[table]) return storage
def push_datapackage(descriptor, backend, **backend_options): """Push Data Package to storage. All parameters should be used as keyword arguments. Args: descriptor (str): path to descriptor backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs """ # Deprecated warnings.warn( 'Functions "push/pull_datapackage" are deprecated. ' 'Please use "Package" class', UserWarning) # Init maps tables = [] schemas = [] datamap = {} mapping = {} # Init model model = Package(descriptor) # Get storage plugin = import_module('jsontableschema.plugins.%s' % backend) storage = plugin.Storage(**backend_options) # Collect tables/schemas/data for resource in model.resources: if not resource.tabular: continue name = resource.descriptor.get('name', None) table = _convert_path(resource.descriptor['path'], name) schema = resource.descriptor['schema'] data = resource.table.iter(keyed=True) # TODO: review def values(schema, data): for item in data: row = [] for field in schema['fields']: row.append(item.get(field['name'], None)) yield tuple(row) tables.append(table) schemas.append(schema) datamap[table] = values(schema, data) if name is not None: mapping[name] = table schemas = _convert_schemas(mapping, schemas) # Create tables for table in tables: if table in storage.buckets: storage.delete(table) storage.create(tables, schemas) # Write data to tables for table in storage.buckets: if table in datamap: storage.write(table, datamap[table]) return storage
[ "Push", "Data", "Package", "to", "storage", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L23-L86
[ "def", "push_datapackage", "(", "descriptor", ",", "backend", ",", "*", "*", "backend_options", ")", ":", "# Deprecated", "warnings", ".", "warn", "(", "'Functions \"push/pull_datapackage\" are deprecated. '", "'Please use \"Package\" class'", ",", "UserWarning", ")", "# Init maps", "tables", "=", "[", "]", "schemas", "=", "[", "]", "datamap", "=", "{", "}", "mapping", "=", "{", "}", "# Init model", "model", "=", "Package", "(", "descriptor", ")", "# Get storage", "plugin", "=", "import_module", "(", "'jsontableschema.plugins.%s'", "%", "backend", ")", "storage", "=", "plugin", ".", "Storage", "(", "*", "*", "backend_options", ")", "# Collect tables/schemas/data", "for", "resource", "in", "model", ".", "resources", ":", "if", "not", "resource", ".", "tabular", ":", "continue", "name", "=", "resource", ".", "descriptor", ".", "get", "(", "'name'", ",", "None", ")", "table", "=", "_convert_path", "(", "resource", ".", "descriptor", "[", "'path'", "]", ",", "name", ")", "schema", "=", "resource", ".", "descriptor", "[", "'schema'", "]", "data", "=", "resource", ".", "table", ".", "iter", "(", "keyed", "=", "True", ")", "# TODO: review", "def", "values", "(", "schema", ",", "data", ")", ":", "for", "item", "in", "data", ":", "row", "=", "[", "]", "for", "field", "in", "schema", "[", "'fields'", "]", ":", "row", ".", "append", "(", "item", ".", "get", "(", "field", "[", "'name'", "]", ",", "None", ")", ")", "yield", "tuple", "(", "row", ")", "tables", ".", "append", "(", "table", ")", "schemas", ".", "append", "(", "schema", ")", "datamap", "[", "table", "]", "=", "values", "(", "schema", ",", "data", ")", "if", "name", "is", "not", "None", ":", "mapping", "[", "name", "]", "=", "table", "schemas", "=", "_convert_schemas", "(", "mapping", ",", "schemas", ")", "# Create tables", "for", "table", "in", "tables", ":", "if", "table", "in", "storage", ".", "buckets", ":", "storage", ".", "delete", "(", "table", ")", "storage", ".", "create", "(", "tables", ",", "schemas", ")", "# Write data to tables", "for", "table", "in", "storage", ".", "buckets", ":", "if", "table", "in", "datamap", ":", "storage", ".", "write", "(", "table", ",", "datamap", "[", "table", "]", ")", "return", "storage" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
pull_datapackage
Pull Data Package from storage. All parameters should be used as keyword arguments. Args: descriptor (str): path where to store descriptor name (str): name of the pulled datapackage backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs
datapackage/pushpull.py
def pull_datapackage(descriptor, name, backend, **backend_options): """Pull Data Package from storage. All parameters should be used as keyword arguments. Args: descriptor (str): path where to store descriptor name (str): name of the pulled datapackage backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs """ # Deprecated warnings.warn( 'Functions "push/pull_datapackage" are deprecated. ' 'Please use "Package" class', UserWarning) # Save datapackage name datapackage_name = name # Get storage plugin = import_module('jsontableschema.plugins.%s' % backend) storage = plugin.Storage(**backend_options) # Iterate over tables resources = [] for table in storage.buckets: # Prepare schema = storage.describe(table) base = os.path.dirname(descriptor) path, name = _restore_path(table) fullpath = os.path.join(base, path) # Write data helpers.ensure_dir(fullpath) with io.open(fullpath, 'wb') as file: model = Schema(deepcopy(schema)) data = storage.iter(table) writer = csv.writer(file, encoding='utf-8') writer.writerow(model.headers) for row in data: writer.writerow(row) # Add resource resource = {'schema': schema, 'path': path} if name is not None: resource['name'] = name resources.append(resource) # Write descriptor mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None resources = _restore_resources(resources) helpers.ensure_dir(descriptor) with io.open(descriptor, mode=mode, encoding=encoding) as file: descriptor = { 'name': datapackage_name, 'resources': resources, } json.dump(descriptor, file, indent=4) return storage
def pull_datapackage(descriptor, name, backend, **backend_options): """Pull Data Package from storage. All parameters should be used as keyword arguments. Args: descriptor (str): path where to store descriptor name (str): name of the pulled datapackage backend (str): backend name like `sql` or `bigquery` backend_options (dict): backend options mentioned in backend docs """ # Deprecated warnings.warn( 'Functions "push/pull_datapackage" are deprecated. ' 'Please use "Package" class', UserWarning) # Save datapackage name datapackage_name = name # Get storage plugin = import_module('jsontableschema.plugins.%s' % backend) storage = plugin.Storage(**backend_options) # Iterate over tables resources = [] for table in storage.buckets: # Prepare schema = storage.describe(table) base = os.path.dirname(descriptor) path, name = _restore_path(table) fullpath = os.path.join(base, path) # Write data helpers.ensure_dir(fullpath) with io.open(fullpath, 'wb') as file: model = Schema(deepcopy(schema)) data = storage.iter(table) writer = csv.writer(file, encoding='utf-8') writer.writerow(model.headers) for row in data: writer.writerow(row) # Add resource resource = {'schema': schema, 'path': path} if name is not None: resource['name'] = name resources.append(resource) # Write descriptor mode = 'w' encoding = 'utf-8' if six.PY2: mode = 'wb' encoding = None resources = _restore_resources(resources) helpers.ensure_dir(descriptor) with io.open(descriptor, mode=mode, encoding=encoding) as file: descriptor = { 'name': datapackage_name, 'resources': resources, } json.dump(descriptor, file, indent=4) return storage
[ "Pull", "Data", "Package", "from", "storage", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L89-L157
[ "def", "pull_datapackage", "(", "descriptor", ",", "name", ",", "backend", ",", "*", "*", "backend_options", ")", ":", "# Deprecated", "warnings", ".", "warn", "(", "'Functions \"push/pull_datapackage\" are deprecated. '", "'Please use \"Package\" class'", ",", "UserWarning", ")", "# Save datapackage name", "datapackage_name", "=", "name", "# Get storage", "plugin", "=", "import_module", "(", "'jsontableschema.plugins.%s'", "%", "backend", ")", "storage", "=", "plugin", ".", "Storage", "(", "*", "*", "backend_options", ")", "# Iterate over tables", "resources", "=", "[", "]", "for", "table", "in", "storage", ".", "buckets", ":", "# Prepare", "schema", "=", "storage", ".", "describe", "(", "table", ")", "base", "=", "os", ".", "path", ".", "dirname", "(", "descriptor", ")", "path", ",", "name", "=", "_restore_path", "(", "table", ")", "fullpath", "=", "os", ".", "path", ".", "join", "(", "base", ",", "path", ")", "# Write data", "helpers", ".", "ensure_dir", "(", "fullpath", ")", "with", "io", ".", "open", "(", "fullpath", ",", "'wb'", ")", "as", "file", ":", "model", "=", "Schema", "(", "deepcopy", "(", "schema", ")", ")", "data", "=", "storage", ".", "iter", "(", "table", ")", "writer", "=", "csv", ".", "writer", "(", "file", ",", "encoding", "=", "'utf-8'", ")", "writer", ".", "writerow", "(", "model", ".", "headers", ")", "for", "row", "in", "data", ":", "writer", ".", "writerow", "(", "row", ")", "# Add resource", "resource", "=", "{", "'schema'", ":", "schema", ",", "'path'", ":", "path", "}", "if", "name", "is", "not", "None", ":", "resource", "[", "'name'", "]", "=", "name", "resources", ".", "append", "(", "resource", ")", "# Write descriptor", "mode", "=", "'w'", "encoding", "=", "'utf-8'", "if", "six", ".", "PY2", ":", "mode", "=", "'wb'", "encoding", "=", "None", "resources", "=", "_restore_resources", "(", "resources", ")", "helpers", ".", "ensure_dir", "(", "descriptor", ")", "with", "io", ".", "open", "(", "descriptor", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ")", "as", "file", ":", "descriptor", "=", "{", "'name'", ":", "datapackage_name", ",", "'resources'", ":", "resources", ",", "}", "json", ".", "dump", "(", "descriptor", ",", "file", ",", "indent", "=", "4", ")", "return", "storage" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_convert_path
Convert resource's path and name to storage's table name. Args: path (str): resource path name (str): resource name Returns: str: table name
datapackage/pushpull.py
def _convert_path(path, name): """Convert resource's path and name to storage's table name. Args: path (str): resource path name (str): resource name Returns: str: table name """ table = os.path.splitext(path)[0] table = table.replace(os.path.sep, '__') if name is not None: table = '___'.join([table, name]) table = re.sub('[^0-9a-zA-Z_]+', '_', table) table = table.lower() return table
def _convert_path(path, name): """Convert resource's path and name to storage's table name. Args: path (str): resource path name (str): resource name Returns: str: table name """ table = os.path.splitext(path)[0] table = table.replace(os.path.sep, '__') if name is not None: table = '___'.join([table, name]) table = re.sub('[^0-9a-zA-Z_]+', '_', table) table = table.lower() return table
[ "Convert", "resource", "s", "path", "and", "name", "to", "storage", "s", "table", "name", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L162-L179
[ "def", "_convert_path", "(", "path", ",", "name", ")", ":", "table", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "0", "]", "table", "=", "table", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "'__'", ")", "if", "name", "is", "not", "None", ":", "table", "=", "'___'", ".", "join", "(", "[", "table", ",", "name", "]", ")", "table", "=", "re", ".", "sub", "(", "'[^0-9a-zA-Z_]+'", ",", "'_'", ",", "table", ")", "table", "=", "table", ".", "lower", "(", ")", "return", "table" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_restore_path
Restore resource's path and name from storage's table. Args: table (str): table name Returns: (str, str): resource path and name
datapackage/pushpull.py
def _restore_path(table): """Restore resource's path and name from storage's table. Args: table (str): table name Returns: (str, str): resource path and name """ name = None splited = table.split('___') path = splited[0] if len(splited) == 2: name = splited[1] path = path.replace('__', os.path.sep) path += '.csv' return path, name
def _restore_path(table): """Restore resource's path and name from storage's table. Args: table (str): table name Returns: (str, str): resource path and name """ name = None splited = table.split('___') path = splited[0] if len(splited) == 2: name = splited[1] path = path.replace('__', os.path.sep) path += '.csv' return path, name
[ "Restore", "resource", "s", "path", "and", "name", "from", "storage", "s", "table", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L182-L199
[ "def", "_restore_path", "(", "table", ")", ":", "name", "=", "None", "splited", "=", "table", ".", "split", "(", "'___'", ")", "path", "=", "splited", "[", "0", "]", "if", "len", "(", "splited", ")", "==", "2", ":", "name", "=", "splited", "[", "1", "]", "path", "=", "path", ".", "replace", "(", "'__'", ",", "os", ".", "path", ".", "sep", ")", "path", "+=", "'.csv'", "return", "path", ",", "name" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_convert_schemas
Convert schemas to be compatible with storage schemas. Foreign keys related operations. Args: mapping (dict): mapping between resource name and table name schemas (list): schemas Raises: ValueError: if there is no resource for some foreign key in given mapping Returns: list: converted schemas
datapackage/pushpull.py
def _convert_schemas(mapping, schemas): """Convert schemas to be compatible with storage schemas. Foreign keys related operations. Args: mapping (dict): mapping between resource name and table name schemas (list): schemas Raises: ValueError: if there is no resource for some foreign key in given mapping Returns: list: converted schemas """ schemas = deepcopy(schemas) for schema in schemas: for fk in schema.get('foreignKeys', []): resource = fk['reference']['resource'] if resource != 'self': if resource not in mapping: message = 'Not resource "%s" for foreign key "%s"' message = message % (resource, fk) raise ValueError(message) fk['reference']['resource'] = mapping[resource] return schemas
def _convert_schemas(mapping, schemas): """Convert schemas to be compatible with storage schemas. Foreign keys related operations. Args: mapping (dict): mapping between resource name and table name schemas (list): schemas Raises: ValueError: if there is no resource for some foreign key in given mapping Returns: list: converted schemas """ schemas = deepcopy(schemas) for schema in schemas: for fk in schema.get('foreignKeys', []): resource = fk['reference']['resource'] if resource != 'self': if resource not in mapping: message = 'Not resource "%s" for foreign key "%s"' message = message % (resource, fk) raise ValueError(message) fk['reference']['resource'] = mapping[resource] return schemas
[ "Convert", "schemas", "to", "be", "compatible", "with", "storage", "schemas", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L202-L229
[ "def", "_convert_schemas", "(", "mapping", ",", "schemas", ")", ":", "schemas", "=", "deepcopy", "(", "schemas", ")", "for", "schema", "in", "schemas", ":", "for", "fk", "in", "schema", ".", "get", "(", "'foreignKeys'", ",", "[", "]", ")", ":", "resource", "=", "fk", "[", "'reference'", "]", "[", "'resource'", "]", "if", "resource", "!=", "'self'", ":", "if", "resource", "not", "in", "mapping", ":", "message", "=", "'Not resource \"%s\" for foreign key \"%s\"'", "message", "=", "message", "%", "(", "resource", ",", "fk", ")", "raise", "ValueError", "(", "message", ")", "fk", "[", "'reference'", "]", "[", "'resource'", "]", "=", "mapping", "[", "resource", "]", "return", "schemas" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_restore_resources
Restore schemas from being compatible with storage schemas. Foreign keys related operations. Args: list: resources from storage Returns: list: restored resources
datapackage/pushpull.py
def _restore_resources(resources): """Restore schemas from being compatible with storage schemas. Foreign keys related operations. Args: list: resources from storage Returns: list: restored resources """ resources = deepcopy(resources) for resource in resources: schema = resource['schema'] for fk in schema.get('foreignKeys', []): _, name = _restore_path(fk['reference']['resource']) fk['reference']['resource'] = name return resources
def _restore_resources(resources): """Restore schemas from being compatible with storage schemas. Foreign keys related operations. Args: list: resources from storage Returns: list: restored resources """ resources = deepcopy(resources) for resource in resources: schema = resource['schema'] for fk in schema.get('foreignKeys', []): _, name = _restore_path(fk['reference']['resource']) fk['reference']['resource'] = name return resources
[ "Restore", "schemas", "from", "being", "compatible", "with", "storage", "schemas", "." ]
frictionlessdata/datapackage-py
python
https://github.com/frictionlessdata/datapackage-py/blob/aca085ea54541b087140b58a81332f8728baeeb2/datapackage/pushpull.py#L232-L250
[ "def", "_restore_resources", "(", "resources", ")", ":", "resources", "=", "deepcopy", "(", "resources", ")", "for", "resource", "in", "resources", ":", "schema", "=", "resource", "[", "'schema'", "]", "for", "fk", "in", "schema", ".", "get", "(", "'foreignKeys'", ",", "[", "]", ")", ":", "_", ",", "name", "=", "_restore_path", "(", "fk", "[", "'reference'", "]", "[", "'resource'", "]", ")", "fk", "[", "'reference'", "]", "[", "'resource'", "]", "=", "name", "return", "resources" ]
aca085ea54541b087140b58a81332f8728baeeb2
valid
_buffer_incomplete_responses
It is possible for some of gdb's output to be read before it completely finished its response. In that case, a partial mi response was read, which cannot be parsed into structured data. We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's output if the output did not end in a newline. Args: raw_output: Contents of the gdb mi output buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to gdb's next output. Returns: (raw_output, buf)
pygdbmi/gdbcontroller.py
def _buffer_incomplete_responses(raw_output, buf): """It is possible for some of gdb's output to be read before it completely finished its response. In that case, a partial mi response was read, which cannot be parsed into structured data. We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's output if the output did not end in a newline. Args: raw_output: Contents of the gdb mi output buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to gdb's next output. Returns: (raw_output, buf) """ if raw_output: if buf: # concatenate buffer and new output raw_output = b"".join([buf, raw_output]) buf = None if b"\n" not in raw_output: # newline was not found, so assume output is incomplete and store in buffer buf = raw_output raw_output = None elif not raw_output.endswith(b"\n"): # raw output doesn't end in a newline, so store everything after the last newline (if anything) # in the buffer, and parse everything before it remainder_offset = raw_output.rindex(b"\n") + 1 buf = raw_output[remainder_offset:] raw_output = raw_output[:remainder_offset] return (raw_output, buf)
def _buffer_incomplete_responses(raw_output, buf): """It is possible for some of gdb's output to be read before it completely finished its response. In that case, a partial mi response was read, which cannot be parsed into structured data. We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's output if the output did not end in a newline. Args: raw_output: Contents of the gdb mi output buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to gdb's next output. Returns: (raw_output, buf) """ if raw_output: if buf: # concatenate buffer and new output raw_output = b"".join([buf, raw_output]) buf = None if b"\n" not in raw_output: # newline was not found, so assume output is incomplete and store in buffer buf = raw_output raw_output = None elif not raw_output.endswith(b"\n"): # raw output doesn't end in a newline, so store everything after the last newline (if anything) # in the buffer, and parse everything before it remainder_offset = raw_output.rindex(b"\n") + 1 buf = raw_output[remainder_offset:] raw_output = raw_output[:remainder_offset] return (raw_output, buf)
[ "It", "is", "possible", "for", "some", "of", "gdb", "s", "output", "to", "be", "read", "before", "it", "completely", "finished", "its", "response", ".", "In", "that", "case", "a", "partial", "mi", "response", "was", "read", "which", "cannot", "be", "parsed", "into", "structured", "data", ".", "We", "want", "to", "ALWAYS", "parse", "complete", "mi", "records", ".", "To", "do", "this", "we", "store", "a", "buffer", "of", "gdb", "s", "output", "if", "the", "output", "did", "not", "end", "in", "a", "newline", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L444-L477
[ "def", "_buffer_incomplete_responses", "(", "raw_output", ",", "buf", ")", ":", "if", "raw_output", ":", "if", "buf", ":", "# concatenate buffer and new output", "raw_output", "=", "b\"\"", ".", "join", "(", "[", "buf", ",", "raw_output", "]", ")", "buf", "=", "None", "if", "b\"\\n\"", "not", "in", "raw_output", ":", "# newline was not found, so assume output is incomplete and store in buffer", "buf", "=", "raw_output", "raw_output", "=", "None", "elif", "not", "raw_output", ".", "endswith", "(", "b\"\\n\"", ")", ":", "# raw output doesn't end in a newline, so store everything after the last newline (if anything)", "# in the buffer, and parse everything before it", "remainder_offset", "=", "raw_output", ".", "rindex", "(", "b\"\\n\"", ")", "+", "1", "buf", "=", "raw_output", "[", "remainder_offset", ":", "]", "raw_output", "=", "raw_output", "[", ":", "remainder_offset", "]", "return", "(", "raw_output", ",", "buf", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_make_non_blocking
make file object non-blocking Windows doesn't have the fcntl module, but someone on stack overflow supplied this code as an answer, and it works http://stackoverflow.com/a/34504971/2893090
pygdbmi/gdbcontroller.py
def _make_non_blocking(file_obj): """make file object non-blocking Windows doesn't have the fcntl module, but someone on stack overflow supplied this code as an answer, and it works http://stackoverflow.com/a/34504971/2893090""" if USING_WINDOWS: LPDWORD = POINTER(DWORD) PIPE_NOWAIT = wintypes.DWORD(0x00000001) SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD] SetNamedPipeHandleState.restype = BOOL h = msvcrt.get_osfhandle(file_obj.fileno()) res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None) if res == 0: raise ValueError(WinError()) else: # Set the file status flag (F_SETFL) on the pipes to be non-blocking # so we can attempt to read from a pipe with no new data without locking # the program up fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
def _make_non_blocking(file_obj): """make file object non-blocking Windows doesn't have the fcntl module, but someone on stack overflow supplied this code as an answer, and it works http://stackoverflow.com/a/34504971/2893090""" if USING_WINDOWS: LPDWORD = POINTER(DWORD) PIPE_NOWAIT = wintypes.DWORD(0x00000001) SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD] SetNamedPipeHandleState.restype = BOOL h = msvcrt.get_osfhandle(file_obj.fileno()) res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None) if res == 0: raise ValueError(WinError()) else: # Set the file status flag (F_SETFL) on the pipes to be non-blocking # so we can attempt to read from a pipe with no new data without locking # the program up fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
[ "make", "file", "object", "non", "-", "blocking", "Windows", "doesn", "t", "have", "the", "fcntl", "module", "but", "someone", "on", "stack", "overflow", "supplied", "this", "code", "as", "an", "answer", "and", "it", "works", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "34504971", "/", "2893090" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L480-L504
[ "def", "_make_non_blocking", "(", "file_obj", ")", ":", "if", "USING_WINDOWS", ":", "LPDWORD", "=", "POINTER", "(", "DWORD", ")", "PIPE_NOWAIT", "=", "wintypes", ".", "DWORD", "(", "0x00000001", ")", "SetNamedPipeHandleState", "=", "windll", ".", "kernel32", ".", "SetNamedPipeHandleState", "SetNamedPipeHandleState", ".", "argtypes", "=", "[", "HANDLE", ",", "LPDWORD", ",", "LPDWORD", ",", "LPDWORD", "]", "SetNamedPipeHandleState", ".", "restype", "=", "BOOL", "h", "=", "msvcrt", ".", "get_osfhandle", "(", "file_obj", ".", "fileno", "(", ")", ")", "res", "=", "windll", ".", "kernel32", ".", "SetNamedPipeHandleState", "(", "h", ",", "byref", "(", "PIPE_NOWAIT", ")", ",", "None", ",", "None", ")", "if", "res", "==", "0", ":", "raise", "ValueError", "(", "WinError", "(", ")", ")", "else", ":", "# Set the file status flag (F_SETFL) on the pipes to be non-blocking", "# so we can attempt to read from a pipe with no new data without locking", "# the program up", "fcntl", ".", "fcntl", "(", "file_obj", ",", "fcntl", ".", "F_SETFL", ",", "os", ".", "O_NONBLOCK", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.spawn_new_gdb_subprocess
Spawn a new gdb subprocess with the arguments supplied to the object during initialization. If gdb subprocess already exists, terminate it before spanwing a new one. Return int: gdb process id
pygdbmi/gdbcontroller.py
def spawn_new_gdb_subprocess(self): """Spawn a new gdb subprocess with the arguments supplied to the object during initialization. If gdb subprocess already exists, terminate it before spanwing a new one. Return int: gdb process id """ if self.gdb_process: self.logger.debug( "Killing current gdb subprocess (pid %d)" % self.gdb_process.pid ) self.exit() self.logger.debug('Launching gdb: "%s"' % " ".join(self.cmd)) # Use pipes to the standard streams self.gdb_process = subprocess.Popen( self.cmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, ) _make_non_blocking(self.gdb_process.stdout) _make_non_blocking(self.gdb_process.stderr) # save file numbers for use later self.stdout_fileno = self.gdb_process.stdout.fileno() self.stderr_fileno = self.gdb_process.stderr.fileno() self.stdin_fileno = self.gdb_process.stdin.fileno() self.read_list = [self.stdout_fileno, self.stderr_fileno] self.write_list = [self.stdin_fileno] # string buffers for unifinished gdb output self._incomplete_output = {"stdout": None, "stderr": None} return self.gdb_process.pid
def spawn_new_gdb_subprocess(self): """Spawn a new gdb subprocess with the arguments supplied to the object during initialization. If gdb subprocess already exists, terminate it before spanwing a new one. Return int: gdb process id """ if self.gdb_process: self.logger.debug( "Killing current gdb subprocess (pid %d)" % self.gdb_process.pid ) self.exit() self.logger.debug('Launching gdb: "%s"' % " ".join(self.cmd)) # Use pipes to the standard streams self.gdb_process = subprocess.Popen( self.cmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, ) _make_non_blocking(self.gdb_process.stdout) _make_non_blocking(self.gdb_process.stderr) # save file numbers for use later self.stdout_fileno = self.gdb_process.stdout.fileno() self.stderr_fileno = self.gdb_process.stderr.fileno() self.stdin_fileno = self.gdb_process.stdin.fileno() self.read_list = [self.stdout_fileno, self.stderr_fileno] self.write_list = [self.stdin_fileno] # string buffers for unifinished gdb output self._incomplete_output = {"stdout": None, "stderr": None} return self.gdb_process.pid
[ "Spawn", "a", "new", "gdb", "subprocess", "with", "the", "arguments", "supplied", "to", "the", "object", "during", "initialization", ".", "If", "gdb", "subprocess", "already", "exists", "terminate", "it", "before", "spanwing", "a", "new", "one", ".", "Return", "int", ":", "gdb", "process", "id" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L129-L166
[ "def", "spawn_new_gdb_subprocess", "(", "self", ")", ":", "if", "self", ".", "gdb_process", ":", "self", ".", "logger", ".", "debug", "(", "\"Killing current gdb subprocess (pid %d)\"", "%", "self", ".", "gdb_process", ".", "pid", ")", "self", ".", "exit", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Launching gdb: \"%s\"'", "%", "\" \"", ".", "join", "(", "self", ".", "cmd", ")", ")", "# Use pipes to the standard streams", "self", ".", "gdb_process", "=", "subprocess", ".", "Popen", "(", "self", ".", "cmd", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "0", ",", ")", "_make_non_blocking", "(", "self", ".", "gdb_process", ".", "stdout", ")", "_make_non_blocking", "(", "self", ".", "gdb_process", ".", "stderr", ")", "# save file numbers for use later", "self", ".", "stdout_fileno", "=", "self", ".", "gdb_process", ".", "stdout", ".", "fileno", "(", ")", "self", ".", "stderr_fileno", "=", "self", ".", "gdb_process", ".", "stderr", ".", "fileno", "(", ")", "self", ".", "stdin_fileno", "=", "self", ".", "gdb_process", ".", "stdin", ".", "fileno", "(", ")", "self", ".", "read_list", "=", "[", "self", ".", "stdout_fileno", ",", "self", ".", "stderr_fileno", "]", "self", ".", "write_list", "=", "[", "self", ".", "stdin_fileno", "]", "# string buffers for unifinished gdb output", "self", ".", "_incomplete_output", "=", "{", "\"stdout\"", ":", "None", ",", "\"stderr\"", ":", "None", "}", "return", "self", ".", "gdb_process", ".", "pid" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.verify_valid_gdb_subprocess
Verify there is a process object, and that it is still running. Raise NoGdbProcessError if either of the above are not true.
pygdbmi/gdbcontroller.py
def verify_valid_gdb_subprocess(self): """Verify there is a process object, and that it is still running. Raise NoGdbProcessError if either of the above are not true.""" if not self.gdb_process: raise NoGdbProcessError("gdb process is not attached") elif self.gdb_process.poll() is not None: raise NoGdbProcessError( "gdb process has already finished with return code: %s" % str(self.gdb_process.poll()) )
def verify_valid_gdb_subprocess(self): """Verify there is a process object, and that it is still running. Raise NoGdbProcessError if either of the above are not true.""" if not self.gdb_process: raise NoGdbProcessError("gdb process is not attached") elif self.gdb_process.poll() is not None: raise NoGdbProcessError( "gdb process has already finished with return code: %s" % str(self.gdb_process.poll()) )
[ "Verify", "there", "is", "a", "process", "object", "and", "that", "it", "is", "still", "running", ".", "Raise", "NoGdbProcessError", "if", "either", "of", "the", "above", "are", "not", "true", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L168-L178
[ "def", "verify_valid_gdb_subprocess", "(", "self", ")", ":", "if", "not", "self", ".", "gdb_process", ":", "raise", "NoGdbProcessError", "(", "\"gdb process is not attached\"", ")", "elif", "self", ".", "gdb_process", ".", "poll", "(", ")", "is", "not", "None", ":", "raise", "NoGdbProcessError", "(", "\"gdb process has already finished with return code: %s\"", "%", "str", "(", "self", ".", "gdb_process", ".", "poll", "(", ")", ")", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.write
Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec. Args: mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines. timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0. raise_error_on_timeout (bool): If read_response is True, raise error if no response is received read_response (bool): Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output. Returns: List of parsed gdb responses if read_response is True, otherwise [] Raises: NoGdbProcessError if there is no gdb subprocess running TypeError if mi_cmd_to_write is not valid
pygdbmi/gdbcontroller.py
def write( self, mi_cmd_to_write, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True, read_response=True, ): """Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec. Args: mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines. timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0. raise_error_on_timeout (bool): If read_response is True, raise error if no response is received read_response (bool): Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output. Returns: List of parsed gdb responses if read_response is True, otherwise [] Raises: NoGdbProcessError if there is no gdb subprocess running TypeError if mi_cmd_to_write is not valid """ self.verify_valid_gdb_subprocess() if timeout_sec < 0: self.logger.warning("timeout_sec was negative, replacing with 0") timeout_sec = 0 # Ensure proper type of the mi command if type(mi_cmd_to_write) in [str, unicode]: pass elif type(mi_cmd_to_write) == list: mi_cmd_to_write = "\n".join(mi_cmd_to_write) else: raise TypeError( "The gdb mi command must a be str or list. Got " + str(type(mi_cmd_to_write)) ) self.logger.debug("writing: %s", mi_cmd_to_write) if not mi_cmd_to_write.endswith("\n"): mi_cmd_to_write_nl = mi_cmd_to_write + "\n" else: mi_cmd_to_write_nl = mi_cmd_to_write if USING_WINDOWS: # select not implemented in windows for pipes # assume it's always ready outputready = [self.stdin_fileno] else: _, outputready, _ = select.select([], self.write_list, [], timeout_sec) for fileno in outputready: if fileno == self.stdin_fileno: # ready to write self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode()) # don't forget to flush for Python3, otherwise gdb won't realize there is data # to evaluate, and we won't get a response self.gdb_process.stdin.flush() else: self.logger.error("got unexpected fileno %d" % fileno) if read_response is True: return self.get_gdb_response( timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout ) else: return []
def write( self, mi_cmd_to_write, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True, read_response=True, ): """Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec. Args: mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines. timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0. raise_error_on_timeout (bool): If read_response is True, raise error if no response is received read_response (bool): Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output. Returns: List of parsed gdb responses if read_response is True, otherwise [] Raises: NoGdbProcessError if there is no gdb subprocess running TypeError if mi_cmd_to_write is not valid """ self.verify_valid_gdb_subprocess() if timeout_sec < 0: self.logger.warning("timeout_sec was negative, replacing with 0") timeout_sec = 0 # Ensure proper type of the mi command if type(mi_cmd_to_write) in [str, unicode]: pass elif type(mi_cmd_to_write) == list: mi_cmd_to_write = "\n".join(mi_cmd_to_write) else: raise TypeError( "The gdb mi command must a be str or list. Got " + str(type(mi_cmd_to_write)) ) self.logger.debug("writing: %s", mi_cmd_to_write) if not mi_cmd_to_write.endswith("\n"): mi_cmd_to_write_nl = mi_cmd_to_write + "\n" else: mi_cmd_to_write_nl = mi_cmd_to_write if USING_WINDOWS: # select not implemented in windows for pipes # assume it's always ready outputready = [self.stdin_fileno] else: _, outputready, _ = select.select([], self.write_list, [], timeout_sec) for fileno in outputready: if fileno == self.stdin_fileno: # ready to write self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode()) # don't forget to flush for Python3, otherwise gdb won't realize there is data # to evaluate, and we won't get a response self.gdb_process.stdin.flush() else: self.logger.error("got unexpected fileno %d" % fileno) if read_response is True: return self.get_gdb_response( timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout ) else: return []
[ "Write", "to", "gdb", "process", ".", "Block", "while", "parsing", "responses", "from", "gdb", "for", "a", "maximum", "of", "timeout_sec", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L180-L246
[ "def", "write", "(", "self", ",", "mi_cmd_to_write", ",", "timeout_sec", "=", "DEFAULT_GDB_TIMEOUT_SEC", ",", "raise_error_on_timeout", "=", "True", ",", "read_response", "=", "True", ",", ")", ":", "self", ".", "verify_valid_gdb_subprocess", "(", ")", "if", "timeout_sec", "<", "0", ":", "self", ".", "logger", ".", "warning", "(", "\"timeout_sec was negative, replacing with 0\"", ")", "timeout_sec", "=", "0", "# Ensure proper type of the mi command", "if", "type", "(", "mi_cmd_to_write", ")", "in", "[", "str", ",", "unicode", "]", ":", "pass", "elif", "type", "(", "mi_cmd_to_write", ")", "==", "list", ":", "mi_cmd_to_write", "=", "\"\\n\"", ".", "join", "(", "mi_cmd_to_write", ")", "else", ":", "raise", "TypeError", "(", "\"The gdb mi command must a be str or list. Got \"", "+", "str", "(", "type", "(", "mi_cmd_to_write", ")", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"writing: %s\"", ",", "mi_cmd_to_write", ")", "if", "not", "mi_cmd_to_write", ".", "endswith", "(", "\"\\n\"", ")", ":", "mi_cmd_to_write_nl", "=", "mi_cmd_to_write", "+", "\"\\n\"", "else", ":", "mi_cmd_to_write_nl", "=", "mi_cmd_to_write", "if", "USING_WINDOWS", ":", "# select not implemented in windows for pipes", "# assume it's always ready", "outputready", "=", "[", "self", ".", "stdin_fileno", "]", "else", ":", "_", ",", "outputready", ",", "_", "=", "select", ".", "select", "(", "[", "]", ",", "self", ".", "write_list", ",", "[", "]", ",", "timeout_sec", ")", "for", "fileno", "in", "outputready", ":", "if", "fileno", "==", "self", ".", "stdin_fileno", ":", "# ready to write", "self", ".", "gdb_process", ".", "stdin", ".", "write", "(", "mi_cmd_to_write_nl", ".", "encode", "(", ")", ")", "# don't forget to flush for Python3, otherwise gdb won't realize there is data", "# to evaluate, and we won't get a response", "self", ".", "gdb_process", ".", "stdin", ".", "flush", "(", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "\"got unexpected fileno %d\"", "%", "fileno", ")", "if", "read_response", "is", "True", ":", "return", "self", ".", "get_gdb_response", "(", "timeout_sec", "=", "timeout_sec", ",", "raise_error_on_timeout", "=", "raise_error_on_timeout", ")", "else", ":", "return", "[", "]" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.get_gdb_response
Get response from GDB, and block while doing so. If GDB does not have any response ready to be read by timeout_sec, an exception is raised. Args: timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after raise_error_on_timeout (bool): Whether an exception should be raised if no response was found after timeout_sec Returns: List of parsed GDB responses, returned from gdbmiparser.parse_response, with the additional key 'stream' which is either 'stdout' or 'stderr' Raises: GdbTimeoutError if response is not received within timeout_sec ValueError if select returned unexpected file number NoGdbProcessError if there is no gdb subprocess running
pygdbmi/gdbcontroller.py
def get_gdb_response( self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True ): """Get response from GDB, and block while doing so. If GDB does not have any response ready to be read by timeout_sec, an exception is raised. Args: timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after raise_error_on_timeout (bool): Whether an exception should be raised if no response was found after timeout_sec Returns: List of parsed GDB responses, returned from gdbmiparser.parse_response, with the additional key 'stream' which is either 'stdout' or 'stderr' Raises: GdbTimeoutError if response is not received within timeout_sec ValueError if select returned unexpected file number NoGdbProcessError if there is no gdb subprocess running """ self.verify_valid_gdb_subprocess() if timeout_sec < 0: self.logger.warning("timeout_sec was negative, replacing with 0") timeout_sec = 0 if USING_WINDOWS: retval = self._get_responses_windows(timeout_sec) else: retval = self._get_responses_unix(timeout_sec) if not retval and raise_error_on_timeout: raise GdbTimeoutError( "Did not get response from gdb after %s seconds" % timeout_sec ) else: return retval
def get_gdb_response( self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True ): """Get response from GDB, and block while doing so. If GDB does not have any response ready to be read by timeout_sec, an exception is raised. Args: timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after raise_error_on_timeout (bool): Whether an exception should be raised if no response was found after timeout_sec Returns: List of parsed GDB responses, returned from gdbmiparser.parse_response, with the additional key 'stream' which is either 'stdout' or 'stderr' Raises: GdbTimeoutError if response is not received within timeout_sec ValueError if select returned unexpected file number NoGdbProcessError if there is no gdb subprocess running """ self.verify_valid_gdb_subprocess() if timeout_sec < 0: self.logger.warning("timeout_sec was negative, replacing with 0") timeout_sec = 0 if USING_WINDOWS: retval = self._get_responses_windows(timeout_sec) else: retval = self._get_responses_unix(timeout_sec) if not retval and raise_error_on_timeout: raise GdbTimeoutError( "Did not get response from gdb after %s seconds" % timeout_sec ) else: return retval
[ "Get", "response", "from", "GDB", "and", "block", "while", "doing", "so", ".", "If", "GDB", "does", "not", "have", "any", "response", "ready", "to", "be", "read", "by", "timeout_sec", "an", "exception", "is", "raised", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L248-L285
[ "def", "get_gdb_response", "(", "self", ",", "timeout_sec", "=", "DEFAULT_GDB_TIMEOUT_SEC", ",", "raise_error_on_timeout", "=", "True", ")", ":", "self", ".", "verify_valid_gdb_subprocess", "(", ")", "if", "timeout_sec", "<", "0", ":", "self", ".", "logger", ".", "warning", "(", "\"timeout_sec was negative, replacing with 0\"", ")", "timeout_sec", "=", "0", "if", "USING_WINDOWS", ":", "retval", "=", "self", ".", "_get_responses_windows", "(", "timeout_sec", ")", "else", ":", "retval", "=", "self", ".", "_get_responses_unix", "(", "timeout_sec", ")", "if", "not", "retval", "and", "raise_error_on_timeout", ":", "raise", "GdbTimeoutError", "(", "\"Did not get response from gdb after %s seconds\"", "%", "timeout_sec", ")", "else", ":", "return", "retval" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController._get_responses_windows
Get responses on windows. Assume no support for select and use a while loop.
pygdbmi/gdbcontroller.py
def _get_responses_windows(self, timeout_sec): """Get responses on windows. Assume no support for select and use a while loop.""" timeout_time_sec = time.time() + timeout_sec responses = [] while True: try: self.gdb_process.stdout.flush() if PYTHON3: raw_output = self.gdb_process.stdout.readline().replace( b"\r", b"\n" ) else: raw_output = self.gdb_process.stdout.read().replace(b"\r", b"\n") responses += self._get_responses_list(raw_output, "stdout") except IOError: pass try: self.gdb_process.stderr.flush() if PYTHON3: raw_output = self.gdb_process.stderr.readline().replace( b"\r", b"\n" ) else: raw_output = self.gdb_process.stderr.read().replace(b"\r", b"\n") responses += self._get_responses_list(raw_output, "stderr") except IOError: pass if time.time() > timeout_time_sec: break return responses
def _get_responses_windows(self, timeout_sec): """Get responses on windows. Assume no support for select and use a while loop.""" timeout_time_sec = time.time() + timeout_sec responses = [] while True: try: self.gdb_process.stdout.flush() if PYTHON3: raw_output = self.gdb_process.stdout.readline().replace( b"\r", b"\n" ) else: raw_output = self.gdb_process.stdout.read().replace(b"\r", b"\n") responses += self._get_responses_list(raw_output, "stdout") except IOError: pass try: self.gdb_process.stderr.flush() if PYTHON3: raw_output = self.gdb_process.stderr.readline().replace( b"\r", b"\n" ) else: raw_output = self.gdb_process.stderr.read().replace(b"\r", b"\n") responses += self._get_responses_list(raw_output, "stderr") except IOError: pass if time.time() > timeout_time_sec: break return responses
[ "Get", "responses", "on", "windows", ".", "Assume", "no", "support", "for", "select", "and", "use", "a", "while", "loop", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L287-L319
[ "def", "_get_responses_windows", "(", "self", ",", "timeout_sec", ")", ":", "timeout_time_sec", "=", "time", ".", "time", "(", ")", "+", "timeout_sec", "responses", "=", "[", "]", "while", "True", ":", "try", ":", "self", ".", "gdb_process", ".", "stdout", ".", "flush", "(", ")", "if", "PYTHON3", ":", "raw_output", "=", "self", ".", "gdb_process", ".", "stdout", ".", "readline", "(", ")", ".", "replace", "(", "b\"\\r\"", ",", "b\"\\n\"", ")", "else", ":", "raw_output", "=", "self", ".", "gdb_process", ".", "stdout", ".", "read", "(", ")", ".", "replace", "(", "b\"\\r\"", ",", "b\"\\n\"", ")", "responses", "+=", "self", ".", "_get_responses_list", "(", "raw_output", ",", "\"stdout\"", ")", "except", "IOError", ":", "pass", "try", ":", "self", ".", "gdb_process", ".", "stderr", ".", "flush", "(", ")", "if", "PYTHON3", ":", "raw_output", "=", "self", ".", "gdb_process", ".", "stderr", ".", "readline", "(", ")", ".", "replace", "(", "b\"\\r\"", ",", "b\"\\n\"", ")", "else", ":", "raw_output", "=", "self", ".", "gdb_process", ".", "stderr", ".", "read", "(", ")", ".", "replace", "(", "b\"\\r\"", ",", "b\"\\n\"", ")", "responses", "+=", "self", ".", "_get_responses_list", "(", "raw_output", ",", "\"stderr\"", ")", "except", "IOError", ":", "pass", "if", "time", ".", "time", "(", ")", ">", "timeout_time_sec", ":", "break", "return", "responses" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController._get_responses_unix
Get responses on unix-like system. Use select to wait for output.
pygdbmi/gdbcontroller.py
def _get_responses_unix(self, timeout_sec): """Get responses on unix-like system. Use select to wait for output.""" timeout_time_sec = time.time() + timeout_sec responses = [] while True: select_timeout = timeout_time_sec - time.time() # I prefer to not pass a negative value to select if select_timeout <= 0: select_timeout = 0 events, _, _ = select.select(self.read_list, [], [], select_timeout) responses_list = None # to avoid infinite loop if using Python 2 try: for fileno in events: # new data is ready to read if fileno == self.stdout_fileno: self.gdb_process.stdout.flush() raw_output = self.gdb_process.stdout.read() stream = "stdout" elif fileno == self.stderr_fileno: self.gdb_process.stderr.flush() raw_output = self.gdb_process.stderr.read() stream = "stderr" else: raise ValueError( "Developer error. Got unexpected file number %d" % fileno ) responses_list = self._get_responses_list(raw_output, stream) responses += responses_list except IOError: # only occurs in python 2.7 pass if timeout_sec == 0: # just exit immediately break elif responses_list and self._allow_overwrite_timeout_times: # update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb timeout_time_sec = min( time.time() + self.time_to_check_for_additional_output_sec, timeout_time_sec, ) elif time.time() > timeout_time_sec: break return responses
def _get_responses_unix(self, timeout_sec): """Get responses on unix-like system. Use select to wait for output.""" timeout_time_sec = time.time() + timeout_sec responses = [] while True: select_timeout = timeout_time_sec - time.time() # I prefer to not pass a negative value to select if select_timeout <= 0: select_timeout = 0 events, _, _ = select.select(self.read_list, [], [], select_timeout) responses_list = None # to avoid infinite loop if using Python 2 try: for fileno in events: # new data is ready to read if fileno == self.stdout_fileno: self.gdb_process.stdout.flush() raw_output = self.gdb_process.stdout.read() stream = "stdout" elif fileno == self.stderr_fileno: self.gdb_process.stderr.flush() raw_output = self.gdb_process.stderr.read() stream = "stderr" else: raise ValueError( "Developer error. Got unexpected file number %d" % fileno ) responses_list = self._get_responses_list(raw_output, stream) responses += responses_list except IOError: # only occurs in python 2.7 pass if timeout_sec == 0: # just exit immediately break elif responses_list and self._allow_overwrite_timeout_times: # update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb timeout_time_sec = min( time.time() + self.time_to_check_for_additional_output_sec, timeout_time_sec, ) elif time.time() > timeout_time_sec: break return responses
[ "Get", "responses", "on", "unix", "-", "like", "system", ".", "Use", "select", "to", "wait", "for", "output", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L321-L369
[ "def", "_get_responses_unix", "(", "self", ",", "timeout_sec", ")", ":", "timeout_time_sec", "=", "time", ".", "time", "(", ")", "+", "timeout_sec", "responses", "=", "[", "]", "while", "True", ":", "select_timeout", "=", "timeout_time_sec", "-", "time", ".", "time", "(", ")", "# I prefer to not pass a negative value to select", "if", "select_timeout", "<=", "0", ":", "select_timeout", "=", "0", "events", ",", "_", ",", "_", "=", "select", ".", "select", "(", "self", ".", "read_list", ",", "[", "]", ",", "[", "]", ",", "select_timeout", ")", "responses_list", "=", "None", "# to avoid infinite loop if using Python 2", "try", ":", "for", "fileno", "in", "events", ":", "# new data is ready to read", "if", "fileno", "==", "self", ".", "stdout_fileno", ":", "self", ".", "gdb_process", ".", "stdout", ".", "flush", "(", ")", "raw_output", "=", "self", ".", "gdb_process", ".", "stdout", ".", "read", "(", ")", "stream", "=", "\"stdout\"", "elif", "fileno", "==", "self", ".", "stderr_fileno", ":", "self", ".", "gdb_process", ".", "stderr", ".", "flush", "(", ")", "raw_output", "=", "self", ".", "gdb_process", ".", "stderr", ".", "read", "(", ")", "stream", "=", "\"stderr\"", "else", ":", "raise", "ValueError", "(", "\"Developer error. Got unexpected file number %d\"", "%", "fileno", ")", "responses_list", "=", "self", ".", "_get_responses_list", "(", "raw_output", ",", "stream", ")", "responses", "+=", "responses_list", "except", "IOError", ":", "# only occurs in python 2.7", "pass", "if", "timeout_sec", "==", "0", ":", "# just exit immediately", "break", "elif", "responses_list", "and", "self", ".", "_allow_overwrite_timeout_times", ":", "# update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb", "timeout_time_sec", "=", "min", "(", "time", ".", "time", "(", ")", "+", "self", ".", "time_to_check_for_additional_output_sec", ",", "timeout_time_sec", ",", ")", "elif", "time", ".", "time", "(", ")", ">", "timeout_time_sec", ":", "break", "return", "responses" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController._get_responses_list
Get parsed response list from string output Args: raw_output (unicode): gdb output to parse stream (str): either stdout or stderr
pygdbmi/gdbcontroller.py
def _get_responses_list(self, raw_output, stream): """Get parsed response list from string output Args: raw_output (unicode): gdb output to parse stream (str): either stdout or stderr """ responses = [] raw_output, self._incomplete_output[stream] = _buffer_incomplete_responses( raw_output, self._incomplete_output.get(stream) ) if not raw_output: return responses response_list = list( filter(lambda x: x, raw_output.decode(errors="replace").split("\n")) ) # remove blank lines # parse each response from gdb into a dict, and store in a list for response in response_list: if gdbmiparser.response_is_finished(response): pass else: parsed_response = gdbmiparser.parse_response(response) parsed_response["stream"] = stream self.logger.debug("%s", pformat(parsed_response)) responses.append(parsed_response) return responses
def _get_responses_list(self, raw_output, stream): """Get parsed response list from string output Args: raw_output (unicode): gdb output to parse stream (str): either stdout or stderr """ responses = [] raw_output, self._incomplete_output[stream] = _buffer_incomplete_responses( raw_output, self._incomplete_output.get(stream) ) if not raw_output: return responses response_list = list( filter(lambda x: x, raw_output.decode(errors="replace").split("\n")) ) # remove blank lines # parse each response from gdb into a dict, and store in a list for response in response_list: if gdbmiparser.response_is_finished(response): pass else: parsed_response = gdbmiparser.parse_response(response) parsed_response["stream"] = stream self.logger.debug("%s", pformat(parsed_response)) responses.append(parsed_response) return responses
[ "Get", "parsed", "response", "list", "from", "string", "output", "Args", ":", "raw_output", "(", "unicode", ")", ":", "gdb", "output", "to", "parse", "stream", "(", "str", ")", ":", "either", "stdout", "or", "stderr" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L371-L402
[ "def", "_get_responses_list", "(", "self", ",", "raw_output", ",", "stream", ")", ":", "responses", "=", "[", "]", "raw_output", ",", "self", ".", "_incomplete_output", "[", "stream", "]", "=", "_buffer_incomplete_responses", "(", "raw_output", ",", "self", ".", "_incomplete_output", ".", "get", "(", "stream", ")", ")", "if", "not", "raw_output", ":", "return", "responses", "response_list", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", ",", "raw_output", ".", "decode", "(", "errors", "=", "\"replace\"", ")", ".", "split", "(", "\"\\n\"", ")", ")", ")", "# remove blank lines", "# parse each response from gdb into a dict, and store in a list", "for", "response", "in", "response_list", ":", "if", "gdbmiparser", ".", "response_is_finished", "(", "response", ")", ":", "pass", "else", ":", "parsed_response", "=", "gdbmiparser", ".", "parse_response", "(", "response", ")", "parsed_response", "[", "\"stream\"", "]", "=", "stream", "self", ".", "logger", ".", "debug", "(", "\"%s\"", ",", "pformat", "(", "parsed_response", ")", ")", "responses", ".", "append", "(", "parsed_response", ")", "return", "responses" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.send_signal_to_gdb
Send signal name (case insensitive) or number to gdb subprocess gdbmi.send_signal_to_gdb(2) # valid gdbmi.send_signal_to_gdb('sigint') # also valid gdbmi.send_signal_to_gdb('SIGINT') # also valid raises ValueError if signal_input is invalie raises NoGdbProcessError if there is no gdb process to send a signal to
pygdbmi/gdbcontroller.py
def send_signal_to_gdb(self, signal_input): """Send signal name (case insensitive) or number to gdb subprocess gdbmi.send_signal_to_gdb(2) # valid gdbmi.send_signal_to_gdb('sigint') # also valid gdbmi.send_signal_to_gdb('SIGINT') # also valid raises ValueError if signal_input is invalie raises NoGdbProcessError if there is no gdb process to send a signal to """ try: signal = int(signal_input) except Exception: signal = SIGNAL_NAME_TO_NUM.get(signal_input.upper()) if not signal: raise ValueError( 'Could not find signal corresponding to "%s"' % str(signal) ) if self.gdb_process: os.kill(self.gdb_process.pid, signal) else: raise NoGdbProcessError( "Cannot send signal to gdb process because no process exists." )
def send_signal_to_gdb(self, signal_input): """Send signal name (case insensitive) or number to gdb subprocess gdbmi.send_signal_to_gdb(2) # valid gdbmi.send_signal_to_gdb('sigint') # also valid gdbmi.send_signal_to_gdb('SIGINT') # also valid raises ValueError if signal_input is invalie raises NoGdbProcessError if there is no gdb process to send a signal to """ try: signal = int(signal_input) except Exception: signal = SIGNAL_NAME_TO_NUM.get(signal_input.upper()) if not signal: raise ValueError( 'Could not find signal corresponding to "%s"' % str(signal) ) if self.gdb_process: os.kill(self.gdb_process.pid, signal) else: raise NoGdbProcessError( "Cannot send signal to gdb process because no process exists." )
[ "Send", "signal", "name", "(", "case", "insensitive", ")", "or", "number", "to", "gdb", "subprocess", "gdbmi", ".", "send_signal_to_gdb", "(", "2", ")", "#", "valid", "gdbmi", ".", "send_signal_to_gdb", "(", "sigint", ")", "#", "also", "valid", "gdbmi", ".", "send_signal_to_gdb", "(", "SIGINT", ")", "#", "also", "valid" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L404-L428
[ "def", "send_signal_to_gdb", "(", "self", ",", "signal_input", ")", ":", "try", ":", "signal", "=", "int", "(", "signal_input", ")", "except", "Exception", ":", "signal", "=", "SIGNAL_NAME_TO_NUM", ".", "get", "(", "signal_input", ".", "upper", "(", ")", ")", "if", "not", "signal", ":", "raise", "ValueError", "(", "'Could not find signal corresponding to \"%s\"'", "%", "str", "(", "signal", ")", ")", "if", "self", ".", "gdb_process", ":", "os", ".", "kill", "(", "self", ".", "gdb_process", ".", "pid", ",", "signal", ")", "else", ":", "raise", "NoGdbProcessError", "(", "\"Cannot send signal to gdb process because no process exists.\"", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
GdbController.exit
Terminate gdb process Returns: None
pygdbmi/gdbcontroller.py
def exit(self): """Terminate gdb process Returns: None""" if self.gdb_process: self.gdb_process.terminate() self.gdb_process.communicate() self.gdb_process = None return None
def exit(self): """Terminate gdb process Returns: None""" if self.gdb_process: self.gdb_process.terminate() self.gdb_process.communicate() self.gdb_process = None return None
[ "Terminate", "gdb", "process", "Returns", ":", "None" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbcontroller.py#L434-L441
[ "def", "exit", "(", "self", ")", ":", "if", "self", ".", "gdb_process", ":", "self", ".", "gdb_process", ".", "terminate", "(", ")", "self", ".", "gdb_process", ".", "communicate", "(", ")", "self", ".", "gdb_process", "=", "None", "return", "None" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
main
Build and debug an application programatically For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html
example.py
def main(verbose=True): """Build and debug an application programatically For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html """ # Build C program find_executable(MAKE_CMD) if not find_executable(MAKE_CMD): print( 'Could not find executable "%s". Ensure it is installed and on your $PATH.' % MAKE_CMD ) exit(1) subprocess.check_output([MAKE_CMD, "-C", SAMPLE_C_CODE_DIR, "--quiet"]) # Initialize object that manages gdb subprocess gdbmi = GdbController(verbose=verbose) # Send gdb commands. Gdb machine interface commands are easier to script around, # hence the name "machine interface". # Responses are automatically printed as they are received if verbose is True. # Responses are returned after writing, by default. # Load the file responses = gdbmi.write("-file-exec-and-symbols %s" % SAMPLE_C_BINARY) # Get list of source files used to compile the binary responses = gdbmi.write("-file-list-exec-source-files") # Add breakpoint responses = gdbmi.write("-break-insert main") # Run responses = gdbmi.write("-exec-run") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-continue") # noqa: F841 # gdbmi.gdb_process will be None because the gdb subprocess (and its inferior # program) will be terminated gdbmi.exit()
def main(verbose=True): """Build and debug an application programatically For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html """ # Build C program find_executable(MAKE_CMD) if not find_executable(MAKE_CMD): print( 'Could not find executable "%s". Ensure it is installed and on your $PATH.' % MAKE_CMD ) exit(1) subprocess.check_output([MAKE_CMD, "-C", SAMPLE_C_CODE_DIR, "--quiet"]) # Initialize object that manages gdb subprocess gdbmi = GdbController(verbose=verbose) # Send gdb commands. Gdb machine interface commands are easier to script around, # hence the name "machine interface". # Responses are automatically printed as they are received if verbose is True. # Responses are returned after writing, by default. # Load the file responses = gdbmi.write("-file-exec-and-symbols %s" % SAMPLE_C_BINARY) # Get list of source files used to compile the binary responses = gdbmi.write("-file-list-exec-source-files") # Add breakpoint responses = gdbmi.write("-break-insert main") # Run responses = gdbmi.write("-exec-run") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-next") responses = gdbmi.write("-exec-continue") # noqa: F841 # gdbmi.gdb_process will be None because the gdb subprocess (and its inferior # program) will be terminated gdbmi.exit()
[ "Build", "and", "debug", "an", "application", "programatically" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/example.py#L26-L64
[ "def", "main", "(", "verbose", "=", "True", ")", ":", "# Build C program", "find_executable", "(", "MAKE_CMD", ")", "if", "not", "find_executable", "(", "MAKE_CMD", ")", ":", "print", "(", "'Could not find executable \"%s\". Ensure it is installed and on your $PATH.'", "%", "MAKE_CMD", ")", "exit", "(", "1", ")", "subprocess", ".", "check_output", "(", "[", "MAKE_CMD", ",", "\"-C\"", ",", "SAMPLE_C_CODE_DIR", ",", "\"--quiet\"", "]", ")", "# Initialize object that manages gdb subprocess", "gdbmi", "=", "GdbController", "(", "verbose", "=", "verbose", ")", "# Send gdb commands. Gdb machine interface commands are easier to script around,", "# hence the name \"machine interface\".", "# Responses are automatically printed as they are received if verbose is True.", "# Responses are returned after writing, by default.", "# Load the file", "responses", "=", "gdbmi", ".", "write", "(", "\"-file-exec-and-symbols %s\"", "%", "SAMPLE_C_BINARY", ")", "# Get list of source files used to compile the binary", "responses", "=", "gdbmi", ".", "write", "(", "\"-file-list-exec-source-files\"", ")", "# Add breakpoint", "responses", "=", "gdbmi", ".", "write", "(", "\"-break-insert main\"", ")", "# Run", "responses", "=", "gdbmi", ".", "write", "(", "\"-exec-run\"", ")", "responses", "=", "gdbmi", ".", "write", "(", "\"-exec-next\"", ")", "responses", "=", "gdbmi", ".", "write", "(", "\"-exec-next\"", ")", "responses", "=", "gdbmi", ".", "write", "(", "\"-exec-continue\"", ")", "# noqa: F841", "# gdbmi.gdb_process will be None because the gdb subprocess (and its inferior", "# program) will be terminated", "gdbmi", ".", "exit", "(", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
StringStream.read
Read count characters starting at self.index, and return those characters as a string
pygdbmi/StringStream.py
def read(self, count): """Read count characters starting at self.index, and return those characters as a string """ new_index = self.index + count if new_index > self.len: buf = self.raw_text[self.index :] # return to the end, don't fail else: buf = self.raw_text[self.index : new_index] self.index = new_index return buf
def read(self, count): """Read count characters starting at self.index, and return those characters as a string """ new_index = self.index + count if new_index > self.len: buf = self.raw_text[self.index :] # return to the end, don't fail else: buf = self.raw_text[self.index : new_index] self.index = new_index return buf
[ "Read", "count", "characters", "starting", "at", "self", ".", "index", "and", "return", "those", "characters", "as", "a", "string" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/StringStream.py#L25-L36
[ "def", "read", "(", "self", ",", "count", ")", ":", "new_index", "=", "self", ".", "index", "+", "count", "if", "new_index", ">", "self", ".", "len", ":", "buf", "=", "self", ".", "raw_text", "[", "self", ".", "index", ":", "]", "# return to the end, don't fail", "else", ":", "buf", "=", "self", ".", "raw_text", "[", "self", ".", "index", ":", "new_index", "]", "self", ".", "index", "=", "new_index", "return", "buf" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
StringStream.advance_past_chars
Advance the index past specific chars Args chars (list): list of characters to advance past Return substring that was advanced past
pygdbmi/StringStream.py
def advance_past_chars(self, chars): """Advance the index past specific chars Args chars (list): list of characters to advance past Return substring that was advanced past """ start_index = self.index while True: current_char = self.raw_text[self.index] self.index += 1 if current_char in chars: break elif self.index == self.len: break return self.raw_text[start_index : self.index - 1]
def advance_past_chars(self, chars): """Advance the index past specific chars Args chars (list): list of characters to advance past Return substring that was advanced past """ start_index = self.index while True: current_char = self.raw_text[self.index] self.index += 1 if current_char in chars: break elif self.index == self.len: break return self.raw_text[start_index : self.index - 1]
[ "Advance", "the", "index", "past", "specific", "chars", "Args", "chars", "(", "list", ")", ":", "list", "of", "characters", "to", "advance", "past" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/StringStream.py#L42-L58
[ "def", "advance_past_chars", "(", "self", ",", "chars", ")", ":", "start_index", "=", "self", ".", "index", "while", "True", ":", "current_char", "=", "self", ".", "raw_text", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "if", "current_char", "in", "chars", ":", "break", "elif", "self", ".", "index", "==", "self", ".", "len", ":", "break", "return", "self", ".", "raw_text", "[", "start_index", ":", "self", ".", "index", "-", "1", "]" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
StringStream.advance_past_string_with_gdb_escapes
characters that gdb escapes that should not be escaped by this parser
pygdbmi/StringStream.py
def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None): """characters that gdb escapes that should not be escaped by this parser """ if chars_to_remove_gdb_escape is None: chars_to_remove_gdb_escape = ['"'] buf = "" while True: c = self.raw_text[self.index] self.index += 1 logging.debug("%s", fmt_cyan(c)) if c == "\\": # We are on a backslash and there is another character after the backslash # to parse. Handle this case specially since gdb escaped it for us # Get the next char that is being escaped c2 = self.raw_text[self.index] self.index += 1 # only store the escaped character in the buffer; don't store the backslash # (don't leave it escaped) buf += c2 elif c == '"': # Quote is closed. Exit (and don't include the end quote). break else: # capture this character, and keep capturing buf += c return buf
def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None): """characters that gdb escapes that should not be escaped by this parser """ if chars_to_remove_gdb_escape is None: chars_to_remove_gdb_escape = ['"'] buf = "" while True: c = self.raw_text[self.index] self.index += 1 logging.debug("%s", fmt_cyan(c)) if c == "\\": # We are on a backslash and there is another character after the backslash # to parse. Handle this case specially since gdb escaped it for us # Get the next char that is being escaped c2 = self.raw_text[self.index] self.index += 1 # only store the escaped character in the buffer; don't store the backslash # (don't leave it escaped) buf += c2 elif c == '"': # Quote is closed. Exit (and don't include the end quote). break else: # capture this character, and keep capturing buf += c return buf
[ "characters", "that", "gdb", "escapes", "that", "should", "not", "be", "escaped", "by", "this", "parser" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/StringStream.py#L60-L92
[ "def", "advance_past_string_with_gdb_escapes", "(", "self", ",", "chars_to_remove_gdb_escape", "=", "None", ")", ":", "if", "chars_to_remove_gdb_escape", "is", "None", ":", "chars_to_remove_gdb_escape", "=", "[", "'\"'", "]", "buf", "=", "\"\"", "while", "True", ":", "c", "=", "self", ".", "raw_text", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "logging", ".", "debug", "(", "\"%s\"", ",", "fmt_cyan", "(", "c", ")", ")", "if", "c", "==", "\"\\\\\"", ":", "# We are on a backslash and there is another character after the backslash", "# to parse. Handle this case specially since gdb escaped it for us", "# Get the next char that is being escaped", "c2", "=", "self", ".", "raw_text", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "# only store the escaped character in the buffer; don't store the backslash", "# (don't leave it escaped)", "buf", "+=", "c2", "elif", "c", "==", "'\"'", ":", "# Quote is closed. Exit (and don't include the end quote).", "break", "else", ":", "# capture this character, and keep capturing", "buf", "+=", "c", "return", "buf" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
parse_response
Parse gdb mi text and turn it into a dictionary. See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records for details on types of gdb mi output. Args: gdb_mi_text (str): String output from gdb Returns: dict with the following keys: type (either 'notify', 'result', 'console', 'log', 'target', 'done'), message (str or None), payload (str, list, dict, or None)
pygdbmi/gdbmiparser.py
def parse_response(gdb_mi_text): """Parse gdb mi text and turn it into a dictionary. See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records for details on types of gdb mi output. Args: gdb_mi_text (str): String output from gdb Returns: dict with the following keys: type (either 'notify', 'result', 'console', 'log', 'target', 'done'), message (str or None), payload (str, list, dict, or None) """ stream = StringStream(gdb_mi_text, debug=_DEBUG) if _GDB_MI_NOTIFY_RE.match(gdb_mi_text): token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream) return { "type": "notify", "message": message, "payload": payload, "token": token, } elif _GDB_MI_RESULT_RE.match(gdb_mi_text): token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream) return { "type": "result", "message": message, "payload": payload, "token": token, } elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text): return { "type": "console", "message": None, "payload": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[0], } elif _GDB_MI_LOG_RE.match(gdb_mi_text): return { "type": "log", "message": None, "payload": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[0], } elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text): return { "type": "target", "message": None, "payload": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[0], } elif response_is_finished(gdb_mi_text): return {"type": "done", "message": None, "payload": None} else: # This was not gdb mi output, so it must have just been printed by # the inferior program that's being debugged return {"type": "output", "message": None, "payload": gdb_mi_text}
def parse_response(gdb_mi_text): """Parse gdb mi text and turn it into a dictionary. See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records for details on types of gdb mi output. Args: gdb_mi_text (str): String output from gdb Returns: dict with the following keys: type (either 'notify', 'result', 'console', 'log', 'target', 'done'), message (str or None), payload (str, list, dict, or None) """ stream = StringStream(gdb_mi_text, debug=_DEBUG) if _GDB_MI_NOTIFY_RE.match(gdb_mi_text): token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream) return { "type": "notify", "message": message, "payload": payload, "token": token, } elif _GDB_MI_RESULT_RE.match(gdb_mi_text): token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream) return { "type": "result", "message": message, "payload": payload, "token": token, } elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text): return { "type": "console", "message": None, "payload": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[0], } elif _GDB_MI_LOG_RE.match(gdb_mi_text): return { "type": "log", "message": None, "payload": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[0], } elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text): return { "type": "target", "message": None, "payload": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[0], } elif response_is_finished(gdb_mi_text): return {"type": "done", "message": None, "payload": None} else: # This was not gdb mi output, so it must have just been printed by # the inferior program that's being debugged return {"type": "output", "message": None, "payload": gdb_mi_text}
[ "Parse", "gdb", "mi", "text", "and", "turn", "it", "into", "a", "dictionary", "." ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L40-L102
[ "def", "parse_response", "(", "gdb_mi_text", ")", ":", "stream", "=", "StringStream", "(", "gdb_mi_text", ",", "debug", "=", "_DEBUG", ")", "if", "_GDB_MI_NOTIFY_RE", ".", "match", "(", "gdb_mi_text", ")", ":", "token", ",", "message", ",", "payload", "=", "_get_notify_msg_and_payload", "(", "gdb_mi_text", ",", "stream", ")", "return", "{", "\"type\"", ":", "\"notify\"", ",", "\"message\"", ":", "message", ",", "\"payload\"", ":", "payload", ",", "\"token\"", ":", "token", ",", "}", "elif", "_GDB_MI_RESULT_RE", ".", "match", "(", "gdb_mi_text", ")", ":", "token", ",", "message", ",", "payload", "=", "_get_result_msg_and_payload", "(", "gdb_mi_text", ",", "stream", ")", "return", "{", "\"type\"", ":", "\"result\"", ",", "\"message\"", ":", "message", ",", "\"payload\"", ":", "payload", ",", "\"token\"", ":", "token", ",", "}", "elif", "_GDB_MI_CONSOLE_RE", ".", "match", "(", "gdb_mi_text", ")", ":", "return", "{", "\"type\"", ":", "\"console\"", ",", "\"message\"", ":", "None", ",", "\"payload\"", ":", "_GDB_MI_CONSOLE_RE", ".", "match", "(", "gdb_mi_text", ")", ".", "groups", "(", ")", "[", "0", "]", ",", "}", "elif", "_GDB_MI_LOG_RE", ".", "match", "(", "gdb_mi_text", ")", ":", "return", "{", "\"type\"", ":", "\"log\"", ",", "\"message\"", ":", "None", ",", "\"payload\"", ":", "_GDB_MI_LOG_RE", ".", "match", "(", "gdb_mi_text", ")", ".", "groups", "(", ")", "[", "0", "]", ",", "}", "elif", "_GDB_MI_TARGET_OUTPUT_RE", ".", "match", "(", "gdb_mi_text", ")", ":", "return", "{", "\"type\"", ":", "\"target\"", ",", "\"message\"", ":", "None", ",", "\"payload\"", ":", "_GDB_MI_TARGET_OUTPUT_RE", ".", "match", "(", "gdb_mi_text", ")", ".", "groups", "(", ")", "[", "0", "]", ",", "}", "elif", "response_is_finished", "(", "gdb_mi_text", ")", ":", "return", "{", "\"type\"", ":", "\"done\"", ",", "\"message\"", ":", "None", ",", "\"payload\"", ":", "None", "}", "else", ":", "# This was not gdb mi output, so it must have just been printed by", "# the inferior program that's being debugged", "return", "{", "\"type\"", ":", "\"output\"", ",", "\"message\"", ":", "None", ",", "\"payload\"", ":", "gdb_mi_text", "}" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
assert_match
If values don't match, print them and raise a ValueError, otherwise, continue Raises: ValueError if argumetns do not match
pygdbmi/gdbmiparser.py
def assert_match(actual_char_or_str, expected_char_or_str): """If values don't match, print them and raise a ValueError, otherwise, continue Raises: ValueError if argumetns do not match""" if expected_char_or_str != actual_char_or_str: print("Expected") pprint(expected_char_or_str) print("") print("Got") pprint(actual_char_or_str) raise ValueError()
def assert_match(actual_char_or_str, expected_char_or_str): """If values don't match, print them and raise a ValueError, otherwise, continue Raises: ValueError if argumetns do not match""" if expected_char_or_str != actual_char_or_str: print("Expected") pprint(expected_char_or_str) print("") print("Got") pprint(actual_char_or_str) raise ValueError()
[ "If", "values", "don", "t", "match", "print", "them", "and", "raise", "a", "ValueError", "otherwise", "continue", "Raises", ":", "ValueError", "if", "argumetns", "do", "not", "match" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L115-L125
[ "def", "assert_match", "(", "actual_char_or_str", ",", "expected_char_or_str", ")", ":", "if", "expected_char_or_str", "!=", "actual_char_or_str", ":", "print", "(", "\"Expected\"", ")", "pprint", "(", "expected_char_or_str", ")", "print", "(", "\"\"", ")", "print", "(", "\"Got\"", ")", "pprint", "(", "actual_char_or_str", ")", "raise", "ValueError", "(", ")" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_get_notify_msg_and_payload
Get notify message and payload dict
pygdbmi/gdbmiparser.py
def _get_notify_msg_and_payload(result, stream): """Get notify message and payload dict""" token = stream.advance_past_chars(["=", "*"]) token = int(token) if token != "" else None logger.debug("%s", fmt_green("parsing message")) message = stream.advance_past_chars([","]) logger.debug("parsed message") logger.debug("%s", fmt_green(message)) payload = _parse_dict(stream) return token, message.strip(), payload
def _get_notify_msg_and_payload(result, stream): """Get notify message and payload dict""" token = stream.advance_past_chars(["=", "*"]) token = int(token) if token != "" else None logger.debug("%s", fmt_green("parsing message")) message = stream.advance_past_chars([","]) logger.debug("parsed message") logger.debug("%s", fmt_green(message)) payload = _parse_dict(stream) return token, message.strip(), payload
[ "Get", "notify", "message", "and", "payload", "dict" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L182-L193
[ "def", "_get_notify_msg_and_payload", "(", "result", ",", "stream", ")", ":", "token", "=", "stream", ".", "advance_past_chars", "(", "[", "\"=\"", ",", "\"*\"", "]", ")", "token", "=", "int", "(", "token", ")", "if", "token", "!=", "\"\"", "else", "None", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "\"parsing message\"", ")", ")", "message", "=", "stream", ".", "advance_past_chars", "(", "[", "\",\"", "]", ")", "logger", ".", "debug", "(", "\"parsed message\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "message", ")", ")", "payload", "=", "_parse_dict", "(", "stream", ")", "return", "token", ",", "message", ".", "strip", "(", ")", ",", "payload" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_get_result_msg_and_payload
Get result message and payload dict
pygdbmi/gdbmiparser.py
def _get_result_msg_and_payload(result, stream): """Get result message and payload dict""" groups = _GDB_MI_RESULT_RE.match(result).groups() token = int(groups[0]) if groups[0] != "" else None message = groups[1] if groups[2] is None: payload = None else: stream.advance_past_chars([","]) payload = _parse_dict(stream) return token, message, payload
def _get_result_msg_and_payload(result, stream): """Get result message and payload dict""" groups = _GDB_MI_RESULT_RE.match(result).groups() token = int(groups[0]) if groups[0] != "" else None message = groups[1] if groups[2] is None: payload = None else: stream.advance_past_chars([","]) payload = _parse_dict(stream) return token, message, payload
[ "Get", "result", "message", "and", "payload", "dict" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L196-L209
[ "def", "_get_result_msg_and_payload", "(", "result", ",", "stream", ")", ":", "groups", "=", "_GDB_MI_RESULT_RE", ".", "match", "(", "result", ")", ".", "groups", "(", ")", "token", "=", "int", "(", "groups", "[", "0", "]", ")", "if", "groups", "[", "0", "]", "!=", "\"\"", "else", "None", "message", "=", "groups", "[", "1", "]", "if", "groups", "[", "2", "]", "is", "None", ":", "payload", "=", "None", "else", ":", "stream", ".", "advance_past_chars", "(", "[", "\",\"", "]", ")", "payload", "=", "_parse_dict", "(", "stream", ")", "return", "token", ",", "message", ",", "payload" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_parse_dict
Parse dictionary, with optional starting character '{' return (tuple): Number of characters parsed from to_parse Parsed dictionary
pygdbmi/gdbmiparser.py
def _parse_dict(stream): """Parse dictionary, with optional starting character '{' return (tuple): Number of characters parsed from to_parse Parsed dictionary """ obj = {} logger.debug("%s", fmt_green("parsing dict")) while True: c = stream.read(1) if c in _WHITESPACE: pass elif c in ["{", ","]: pass elif c in ["}", ""]: # end of object, exit loop break else: stream.seek(-1) key, val = _parse_key_val(stream) if key in obj: # This is a gdb bug. We should never get repeated keys in a dict! # See https://sourceware.org/bugzilla/show_bug.cgi?id=22217 # and https://github.com/cs01/pygdbmi/issues/19 # Example: # thread-ids={thread-id="1",thread-id="2"} # Results in: # thread-ids: {{'thread-id': ['1', '2']}} # Rather than the lossy # thread-ids: {'thread-id': 2} # '1' got overwritten! if isinstance(obj[key], list): obj[key].append(val) else: obj[key] = [obj[key], val] else: obj[key] = val look_ahead_for_garbage = True c = stream.read(1) while look_ahead_for_garbage: if c in ["}", ",", ""]: look_ahead_for_garbage = False else: # got some garbage text, skip it. for example: # name="gdb"gargage # skip over 'garbage' # name="gdb"\n # skip over '\n' logger.debug("skipping unexpected charcter: " + c) c = stream.read(1) stream.seek(-1) logger.debug("parsed dict") logger.debug("%s", fmt_green(obj)) return obj
def _parse_dict(stream): """Parse dictionary, with optional starting character '{' return (tuple): Number of characters parsed from to_parse Parsed dictionary """ obj = {} logger.debug("%s", fmt_green("parsing dict")) while True: c = stream.read(1) if c in _WHITESPACE: pass elif c in ["{", ","]: pass elif c in ["}", ""]: # end of object, exit loop break else: stream.seek(-1) key, val = _parse_key_val(stream) if key in obj: # This is a gdb bug. We should never get repeated keys in a dict! # See https://sourceware.org/bugzilla/show_bug.cgi?id=22217 # and https://github.com/cs01/pygdbmi/issues/19 # Example: # thread-ids={thread-id="1",thread-id="2"} # Results in: # thread-ids: {{'thread-id': ['1', '2']}} # Rather than the lossy # thread-ids: {'thread-id': 2} # '1' got overwritten! if isinstance(obj[key], list): obj[key].append(val) else: obj[key] = [obj[key], val] else: obj[key] = val look_ahead_for_garbage = True c = stream.read(1) while look_ahead_for_garbage: if c in ["}", ",", ""]: look_ahead_for_garbage = False else: # got some garbage text, skip it. for example: # name="gdb"gargage # skip over 'garbage' # name="gdb"\n # skip over '\n' logger.debug("skipping unexpected charcter: " + c) c = stream.read(1) stream.seek(-1) logger.debug("parsed dict") logger.debug("%s", fmt_green(obj)) return obj
[ "Parse", "dictionary", "with", "optional", "starting", "character", "{", "return", "(", "tuple", ")", ":", "Number", "of", "characters", "parsed", "from", "to_parse", "Parsed", "dictionary" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L212-L267
[ "def", "_parse_dict", "(", "stream", ")", ":", "obj", "=", "{", "}", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "\"parsing dict\"", ")", ")", "while", "True", ":", "c", "=", "stream", ".", "read", "(", "1", ")", "if", "c", "in", "_WHITESPACE", ":", "pass", "elif", "c", "in", "[", "\"{\"", ",", "\",\"", "]", ":", "pass", "elif", "c", "in", "[", "\"}\"", ",", "\"\"", "]", ":", "# end of object, exit loop", "break", "else", ":", "stream", ".", "seek", "(", "-", "1", ")", "key", ",", "val", "=", "_parse_key_val", "(", "stream", ")", "if", "key", "in", "obj", ":", "# This is a gdb bug. We should never get repeated keys in a dict!", "# See https://sourceware.org/bugzilla/show_bug.cgi?id=22217", "# and https://github.com/cs01/pygdbmi/issues/19", "# Example:", "# thread-ids={thread-id=\"1\",thread-id=\"2\"}", "# Results in:", "# thread-ids: {{'thread-id': ['1', '2']}}", "# Rather than the lossy", "# thread-ids: {'thread-id': 2} # '1' got overwritten!", "if", "isinstance", "(", "obj", "[", "key", "]", ",", "list", ")", ":", "obj", "[", "key", "]", ".", "append", "(", "val", ")", "else", ":", "obj", "[", "key", "]", "=", "[", "obj", "[", "key", "]", ",", "val", "]", "else", ":", "obj", "[", "key", "]", "=", "val", "look_ahead_for_garbage", "=", "True", "c", "=", "stream", ".", "read", "(", "1", ")", "while", "look_ahead_for_garbage", ":", "if", "c", "in", "[", "\"}\"", ",", "\",\"", ",", "\"\"", "]", ":", "look_ahead_for_garbage", "=", "False", "else", ":", "# got some garbage text, skip it. for example:", "# name=\"gdb\"gargage # skip over 'garbage'", "# name=\"gdb\"\\n # skip over '\\n'", "logger", ".", "debug", "(", "\"skipping unexpected charcter: \"", "+", "c", ")", "c", "=", "stream", ".", "read", "(", "1", ")", "stream", ".", "seek", "(", "-", "1", ")", "logger", ".", "debug", "(", "\"parsed dict\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "obj", ")", ")", "return", "obj" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_parse_key_val
Parse key, value combination return (tuple): Parsed key (string) Parsed value (either a string, array, or dict)
pygdbmi/gdbmiparser.py
def _parse_key_val(stream): """Parse key, value combination return (tuple): Parsed key (string) Parsed value (either a string, array, or dict) """ logger.debug("parsing key/val") key = _parse_key(stream) val = _parse_val(stream) logger.debug("parsed key/val") logger.debug("%s", fmt_green(key)) logger.debug("%s", fmt_green(val)) return key, val
def _parse_key_val(stream): """Parse key, value combination return (tuple): Parsed key (string) Parsed value (either a string, array, or dict) """ logger.debug("parsing key/val") key = _parse_key(stream) val = _parse_val(stream) logger.debug("parsed key/val") logger.debug("%s", fmt_green(key)) logger.debug("%s", fmt_green(val)) return key, val
[ "Parse", "key", "value", "combination", "return", "(", "tuple", ")", ":", "Parsed", "key", "(", "string", ")", "Parsed", "value", "(", "either", "a", "string", "array", "or", "dict", ")" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L270-L285
[ "def", "_parse_key_val", "(", "stream", ")", ":", "logger", ".", "debug", "(", "\"parsing key/val\"", ")", "key", "=", "_parse_key", "(", "stream", ")", "val", "=", "_parse_val", "(", "stream", ")", "logger", ".", "debug", "(", "\"parsed key/val\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "key", ")", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "val", ")", ")", "return", "key", ",", "val" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_parse_key
Parse key, value combination returns : Parsed key (string)
pygdbmi/gdbmiparser.py
def _parse_key(stream): """Parse key, value combination returns : Parsed key (string) """ logger.debug("parsing key") key = stream.advance_past_chars(["="]) logger.debug("parsed key:") logger.debug("%s", fmt_green(key)) return key
def _parse_key(stream): """Parse key, value combination returns : Parsed key (string) """ logger.debug("parsing key") key = stream.advance_past_chars(["="]) logger.debug("parsed key:") logger.debug("%s", fmt_green(key)) return key
[ "Parse", "key", "value", "combination", "returns", ":", "Parsed", "key", "(", "string", ")" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L288-L299
[ "def", "_parse_key", "(", "stream", ")", ":", "logger", ".", "debug", "(", "\"parsing key\"", ")", "key", "=", "stream", ".", "advance_past_chars", "(", "[", "\"=\"", "]", ")", "logger", ".", "debug", "(", "\"parsed key:\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "key", ")", ")", "return", "key" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_parse_val
Parse value from string returns: Parsed value (either a string, array, or dict)
pygdbmi/gdbmiparser.py
def _parse_val(stream): """Parse value from string returns: Parsed value (either a string, array, or dict) """ logger.debug("parsing value") while True: c = stream.read(1) if c == "{": # Start object val = _parse_dict(stream) break elif c == "[": # Start of an array val = _parse_array(stream) break elif c == '"': # Start of a string val = stream.advance_past_string_with_gdb_escapes() break elif _DEBUG: raise ValueError("unexpected character: %s" % c) else: print( 'pygdbmi warning: encountered unexpected character: "%s". Continuing.' % c ) val = "" # this will be overwritten if there are more characters to be read logger.debug("parsed value:") logger.debug("%s", fmt_green(val)) return val
def _parse_val(stream): """Parse value from string returns: Parsed value (either a string, array, or dict) """ logger.debug("parsing value") while True: c = stream.read(1) if c == "{": # Start object val = _parse_dict(stream) break elif c == "[": # Start of an array val = _parse_array(stream) break elif c == '"': # Start of a string val = stream.advance_past_string_with_gdb_escapes() break elif _DEBUG: raise ValueError("unexpected character: %s" % c) else: print( 'pygdbmi warning: encountered unexpected character: "%s". Continuing.' % c ) val = "" # this will be overwritten if there are more characters to be read logger.debug("parsed value:") logger.debug("%s", fmt_green(val)) return val
[ "Parse", "value", "from", "string", "returns", ":", "Parsed", "value", "(", "either", "a", "string", "array", "or", "dict", ")" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L302-L341
[ "def", "_parse_val", "(", "stream", ")", ":", "logger", ".", "debug", "(", "\"parsing value\"", ")", "while", "True", ":", "c", "=", "stream", ".", "read", "(", "1", ")", "if", "c", "==", "\"{\"", ":", "# Start object", "val", "=", "_parse_dict", "(", "stream", ")", "break", "elif", "c", "==", "\"[\"", ":", "# Start of an array", "val", "=", "_parse_array", "(", "stream", ")", "break", "elif", "c", "==", "'\"'", ":", "# Start of a string", "val", "=", "stream", ".", "advance_past_string_with_gdb_escapes", "(", ")", "break", "elif", "_DEBUG", ":", "raise", "ValueError", "(", "\"unexpected character: %s\"", "%", "c", ")", "else", ":", "print", "(", "'pygdbmi warning: encountered unexpected character: \"%s\". Continuing.'", "%", "c", ")", "val", "=", "\"\"", "# this will be overwritten if there are more characters to be read", "logger", ".", "debug", "(", "\"parsed value:\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "val", ")", ")", "return", "val" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
_parse_array
Parse an array, stream should be passed the initial [ returns: Parsed array
pygdbmi/gdbmiparser.py
def _parse_array(stream): """Parse an array, stream should be passed the initial [ returns: Parsed array """ logger.debug("parsing array") arr = [] while True: c = stream.read(1) if c in _GDB_MI_VALUE_START_CHARS: stream.seek(-1) val = _parse_val(stream) arr.append(val) elif c in _WHITESPACE: pass elif c == ",": pass elif c == "]": # Stop when this array has finished. Note # that elements of this array can be also be arrays. break logger.debug("parsed array:") logger.debug("%s", fmt_green(arr)) return arr
def _parse_array(stream): """Parse an array, stream should be passed the initial [ returns: Parsed array """ logger.debug("parsing array") arr = [] while True: c = stream.read(1) if c in _GDB_MI_VALUE_START_CHARS: stream.seek(-1) val = _parse_val(stream) arr.append(val) elif c in _WHITESPACE: pass elif c == ",": pass elif c == "]": # Stop when this array has finished. Note # that elements of this array can be also be arrays. break logger.debug("parsed array:") logger.debug("%s", fmt_green(arr)) return arr
[ "Parse", "an", "array", "stream", "should", "be", "passed", "the", "initial", "[", "returns", ":", "Parsed", "array" ]
cs01/pygdbmi
python
https://github.com/cs01/pygdbmi/blob/709c781794d3c3b903891f83da011d2d995895d1/pygdbmi/gdbmiparser.py#L344-L370
[ "def", "_parse_array", "(", "stream", ")", ":", "logger", ".", "debug", "(", "\"parsing array\"", ")", "arr", "=", "[", "]", "while", "True", ":", "c", "=", "stream", ".", "read", "(", "1", ")", "if", "c", "in", "_GDB_MI_VALUE_START_CHARS", ":", "stream", ".", "seek", "(", "-", "1", ")", "val", "=", "_parse_val", "(", "stream", ")", "arr", ".", "append", "(", "val", ")", "elif", "c", "in", "_WHITESPACE", ":", "pass", "elif", "c", "==", "\",\"", ":", "pass", "elif", "c", "==", "\"]\"", ":", "# Stop when this array has finished. Note", "# that elements of this array can be also be arrays.", "break", "logger", ".", "debug", "(", "\"parsed array:\"", ")", "logger", ".", "debug", "(", "\"%s\"", ",", "fmt_green", "(", "arr", ")", ")", "return", "arr" ]
709c781794d3c3b903891f83da011d2d995895d1
valid
JsDumper.as_parameters
Dump python list as the parameter of javascript function :param parameters: :param variables: :return:
django_echarts/utils/interfaces.py
def as_parameters(*parameters, variables=None): """ Dump python list as the parameter of javascript function :param parameters: :param variables: :return: """ s = json.dumps(parameters) s = s[1:-1] if variables: for v in variables: if v in parameters: s = s.replace('"' + v + '"', v) return s
def as_parameters(*parameters, variables=None): """ Dump python list as the parameter of javascript function :param parameters: :param variables: :return: """ s = json.dumps(parameters) s = s[1:-1] if variables: for v in variables: if v in parameters: s = s.replace('"' + v + '"', v) return s
[ "Dump", "python", "list", "as", "the", "parameter", "of", "javascript", "function", ":", "param", "parameters", ":", ":", "param", "variables", ":", ":", "return", ":" ]
kinegratii/django-echarts
python
https://github.com/kinegratii/django-echarts/blob/50f9ebb60ccd5e96aeb88176b6e8c789a66b7677/django_echarts/utils/interfaces.py#L81-L94
[ "def", "as_parameters", "(", "*", "parameters", ",", "variables", "=", "None", ")", ":", "s", "=", "json", ".", "dumps", "(", "parameters", ")", "s", "=", "s", "[", "1", ":", "-", "1", "]", "if", "variables", ":", "for", "v", "in", "variables", ":", "if", "v", "in", "parameters", ":", "s", "=", "s", ".", "replace", "(", "'\"'", "+", "v", "+", "'\"'", ",", "v", ")", "return", "s" ]
50f9ebb60ccd5e96aeb88176b6e8c789a66b7677
valid
SettingsStore.generate_local_url
Generate the local url for a js file. :param js_name: :return:
django_echarts/plugins/store.py
def generate_local_url(self, js_name): """ Generate the local url for a js file. :param js_name: :return: """ host = self._settings['local_host'].format(**self._host_context).rstrip('/') return '{}/{}.js'.format(host, js_name)
def generate_local_url(self, js_name): """ Generate the local url for a js file. :param js_name: :return: """ host = self._settings['local_host'].format(**self._host_context).rstrip('/') return '{}/{}.js'.format(host, js_name)
[ "Generate", "the", "local", "url", "for", "a", "js", "file", ".", ":", "param", "js_name", ":", ":", "return", ":" ]
kinegratii/django-echarts
python
https://github.com/kinegratii/django-echarts/blob/50f9ebb60ccd5e96aeb88176b6e8c789a66b7677/django_echarts/plugins/store.py#L74-L81
[ "def", "generate_local_url", "(", "self", ",", "js_name", ")", ":", "host", "=", "self", ".", "_settings", "[", "'local_host'", "]", ".", "format", "(", "*", "*", "self", ".", "_host_context", ")", ".", "rstrip", "(", "'/'", ")", "return", "'{}/{}.js'", ".", "format", "(", "host", ",", "js_name", ")" ]
50f9ebb60ccd5e96aeb88176b6e8c789a66b7677
valid
ifetch_single
getter() g(item, key):pass
django_echarts/datasets/fetch.py
def ifetch_single(iterable, key, default=EMPTY, getter=None): """ getter() g(item, key):pass """ def _getter(item): if getter: custom_getter = partial(getter, key=key) return custom_getter(item) else: try: attrgetter = operator.attrgetter(key) return attrgetter(item) except AttributeError: pass try: itemgetter = operator.itemgetter(key) return itemgetter(item) except KeyError: pass if default is not EMPTY: return default raise ValueError('Item %r has no attr or key for %r' % (item, key)) return map(_getter, iterable)
def ifetch_single(iterable, key, default=EMPTY, getter=None): """ getter() g(item, key):pass """ def _getter(item): if getter: custom_getter = partial(getter, key=key) return custom_getter(item) else: try: attrgetter = operator.attrgetter(key) return attrgetter(item) except AttributeError: pass try: itemgetter = operator.itemgetter(key) return itemgetter(item) except KeyError: pass if default is not EMPTY: return default raise ValueError('Item %r has no attr or key for %r' % (item, key)) return map(_getter, iterable)
[ "getter", "()", "g", "(", "item", "key", ")", ":", "pass" ]
kinegratii/django-echarts
python
https://github.com/kinegratii/django-echarts/blob/50f9ebb60ccd5e96aeb88176b6e8c789a66b7677/django_echarts/datasets/fetch.py#L19-L46
[ "def", "ifetch_single", "(", "iterable", ",", "key", ",", "default", "=", "EMPTY", ",", "getter", "=", "None", ")", ":", "def", "_getter", "(", "item", ")", ":", "if", "getter", ":", "custom_getter", "=", "partial", "(", "getter", ",", "key", "=", "key", ")", "return", "custom_getter", "(", "item", ")", "else", ":", "try", ":", "attrgetter", "=", "operator", ".", "attrgetter", "(", "key", ")", "return", "attrgetter", "(", "item", ")", "except", "AttributeError", ":", "pass", "try", ":", "itemgetter", "=", "operator", ".", "itemgetter", "(", "key", ")", "return", "itemgetter", "(", "item", ")", "except", "KeyError", ":", "pass", "if", "default", "is", "not", "EMPTY", ":", "return", "default", "raise", "ValueError", "(", "'Item %r has no attr or key for %r'", "%", "(", "item", ",", "key", ")", ")", "return", "map", "(", "_getter", ",", "iterable", ")" ]
50f9ebb60ccd5e96aeb88176b6e8c789a66b7677