partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
DataFrame.sort
Return a sorted DataFrame, sorted by the expression 'by' {note_copy} {note_filter} Example: >>> import vaex, numpy as np >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5)) >>> df['y'] = (df.x-1.8)**2 >>> df # s x y 0 a 1 0.64 1 b 2 0.04 2 c 3 1.44 3 d 4 4.84 >>> df.sort('y', ascending=False) # Note: passing '(x-1.8)**2' gives the same result # s x y 0 d 4 4.84 1 c 3 1.44 2 a 1 0.64 3 b 2 0.04 :param str or expression by: expression to sort by :param bool ascending: ascending (default, True) or descending (False) :param str kind: kind of algorithm to use (passed to numpy.argsort)
packages/vaex-core/vaex/dataframe.py
def sort(self, by, ascending=True, kind='quicksort'): '''Return a sorted DataFrame, sorted by the expression 'by' {note_copy} {note_filter} Example: >>> import vaex, numpy as np >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5)) >>> df['y'] = (df.x-1.8)**2 >>> df # s x y 0 a 1 0.64 1 b 2 0.04 2 c 3 1.44 3 d 4 4.84 >>> df.sort('y', ascending=False) # Note: passing '(x-1.8)**2' gives the same result # s x y 0 d 4 4.84 1 c 3 1.44 2 a 1 0.64 3 b 2 0.04 :param str or expression by: expression to sort by :param bool ascending: ascending (default, True) or descending (False) :param str kind: kind of algorithm to use (passed to numpy.argsort) ''' self = self.trim() values = self.evaluate(by, filtered=False) indices = np.argsort(values, kind=kind) if not ascending: indices = indices[::-1].copy() # this may be used a lot, so copy for performance return self.take(indices)
def sort(self, by, ascending=True, kind='quicksort'): '''Return a sorted DataFrame, sorted by the expression 'by' {note_copy} {note_filter} Example: >>> import vaex, numpy as np >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5)) >>> df['y'] = (df.x-1.8)**2 >>> df # s x y 0 a 1 0.64 1 b 2 0.04 2 c 3 1.44 3 d 4 4.84 >>> df.sort('y', ascending=False) # Note: passing '(x-1.8)**2' gives the same result # s x y 0 d 4 4.84 1 c 3 1.44 2 a 1 0.64 3 b 2 0.04 :param str or expression by: expression to sort by :param bool ascending: ascending (default, True) or descending (False) :param str kind: kind of algorithm to use (passed to numpy.argsort) ''' self = self.trim() values = self.evaluate(by, filtered=False) indices = np.argsort(values, kind=kind) if not ascending: indices = indices[::-1].copy() # this may be used a lot, so copy for performance return self.take(indices)
[ "Return", "a", "sorted", "DataFrame", "sorted", "by", "the", "expression", "by" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L3969-L4003
[ "def", "sort", "(", "self", ",", "by", ",", "ascending", "=", "True", ",", "kind", "=", "'quicksort'", ")", ":", "self", "=", "self", ".", "trim", "(", ")", "values", "=", "self", ".", "evaluate", "(", "by", ",", "filtered", "=", "False", ")", "indices", "=", "np", ".", "argsort", "(", "values", ",", "kind", "=", "kind", ")", "if", "not", "ascending", ":", "indices", "=", "indices", "[", ":", ":", "-", "1", "]", ".", "copy", "(", ")", "# this may be used a lot, so copy for performance", "return", "self", ".", "take", "(", "indices", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.fillna
Return a DataFrame, where missing values/NaN are filled with 'value'. The original columns will be renamed, and by default they will be hidden columns. No data is lost. {note_copy} {note_filter} Example: >>> import vaex >>> import numpy as np >>> x = np.array([3, 1, np.nan, 10, np.nan]) >>> df = vaex.from_arrays(x=x) >>> df_filled = df.fillna(value=-1, column_names=['x']) >>> df_filled # x 0 3 1 1 2 -1 3 10 4 -1 :param float value: The value to use for filling nan or masked values. :param bool fill_na: If True, fill np.nan values with `value`. :param bool fill_masked: If True, fill masked values with `values`. :param list column_names: List of column names in which to fill missing values. :param str prefix: The prefix to give the original columns. :param inplace: {inplace}
packages/vaex-core/vaex/dataframe.py
def fillna(self, value, fill_nan=True, fill_masked=True, column_names=None, prefix='__original_', inplace=False): '''Return a DataFrame, where missing values/NaN are filled with 'value'. The original columns will be renamed, and by default they will be hidden columns. No data is lost. {note_copy} {note_filter} Example: >>> import vaex >>> import numpy as np >>> x = np.array([3, 1, np.nan, 10, np.nan]) >>> df = vaex.from_arrays(x=x) >>> df_filled = df.fillna(value=-1, column_names=['x']) >>> df_filled # x 0 3 1 1 2 -1 3 10 4 -1 :param float value: The value to use for filling nan or masked values. :param bool fill_na: If True, fill np.nan values with `value`. :param bool fill_masked: If True, fill masked values with `values`. :param list column_names: List of column names in which to fill missing values. :param str prefix: The prefix to give the original columns. :param inplace: {inplace} ''' df = self.trim(inplace=inplace) column_names = column_names or list(self) for name in column_names: column = df.columns.get(name) if column is not None: new_name = df.rename_column(name, prefix + name) expr = df[new_name] df[name] = df.func.fillna(expr, value, fill_nan=fill_nan, fill_masked=fill_masked) else: df[name] = df.func.fillna(df[name], value, fill_nan=fill_nan, fill_masked=fill_masked) return df
def fillna(self, value, fill_nan=True, fill_masked=True, column_names=None, prefix='__original_', inplace=False): '''Return a DataFrame, where missing values/NaN are filled with 'value'. The original columns will be renamed, and by default they will be hidden columns. No data is lost. {note_copy} {note_filter} Example: >>> import vaex >>> import numpy as np >>> x = np.array([3, 1, np.nan, 10, np.nan]) >>> df = vaex.from_arrays(x=x) >>> df_filled = df.fillna(value=-1, column_names=['x']) >>> df_filled # x 0 3 1 1 2 -1 3 10 4 -1 :param float value: The value to use for filling nan or masked values. :param bool fill_na: If True, fill np.nan values with `value`. :param bool fill_masked: If True, fill masked values with `values`. :param list column_names: List of column names in which to fill missing values. :param str prefix: The prefix to give the original columns. :param inplace: {inplace} ''' df = self.trim(inplace=inplace) column_names = column_names or list(self) for name in column_names: column = df.columns.get(name) if column is not None: new_name = df.rename_column(name, prefix + name) expr = df[new_name] df[name] = df.func.fillna(expr, value, fill_nan=fill_nan, fill_masked=fill_masked) else: df[name] = df.func.fillna(df[name], value, fill_nan=fill_nan, fill_masked=fill_masked) return df
[ "Return", "a", "DataFrame", "where", "missing", "values", "/", "NaN", "are", "filled", "with", "value", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4006-L4047
[ "def", "fillna", "(", "self", ",", "value", ",", "fill_nan", "=", "True", ",", "fill_masked", "=", "True", ",", "column_names", "=", "None", ",", "prefix", "=", "'__original_'", ",", "inplace", "=", "False", ")", ":", "df", "=", "self", ".", "trim", "(", "inplace", "=", "inplace", ")", "column_names", "=", "column_names", "or", "list", "(", "self", ")", "for", "name", "in", "column_names", ":", "column", "=", "df", ".", "columns", ".", "get", "(", "name", ")", "if", "column", "is", "not", "None", ":", "new_name", "=", "df", ".", "rename_column", "(", "name", ",", "prefix", "+", "name", ")", "expr", "=", "df", "[", "new_name", "]", "df", "[", "name", "]", "=", "df", ".", "func", ".", "fillna", "(", "expr", ",", "value", ",", "fill_nan", "=", "fill_nan", ",", "fill_masked", "=", "fill_masked", ")", "else", ":", "df", "[", "name", "]", "=", "df", ".", "func", ".", "fillna", "(", "df", "[", "name", "]", ",", "value", ",", "fill_nan", "=", "fill_nan", ",", "fill_masked", "=", "fill_masked", ")", "return", "df" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.materialize
Returns a new DataFrame where the virtual column is turned into an in memory numpy array. Example: >>> x = np.arange(1,4) >>> y = np.arange(2,5) >>> df = vaex.from_arrays(x=x, y=y) >>> df['r'] = (df.x**2 + df.y**2)**0.5 # 'r' is a virtual column (computed on the fly) >>> df = df.materialize('r') # now 'r' is a 'real' column (i.e. a numpy array) :param inplace: {inplace}
packages/vaex-core/vaex/dataframe.py
def materialize(self, virtual_column, inplace=False): '''Returns a new DataFrame where the virtual column is turned into an in memory numpy array. Example: >>> x = np.arange(1,4) >>> y = np.arange(2,5) >>> df = vaex.from_arrays(x=x, y=y) >>> df['r'] = (df.x**2 + df.y**2)**0.5 # 'r' is a virtual column (computed on the fly) >>> df = df.materialize('r') # now 'r' is a 'real' column (i.e. a numpy array) :param inplace: {inplace} ''' df = self.trim(inplace=inplace) virtual_column = _ensure_string_from_expression(virtual_column) if virtual_column not in df.virtual_columns: raise KeyError('Virtual column not found: %r' % virtual_column) ar = df.evaluate(virtual_column, filtered=False) del df[virtual_column] df.add_column(virtual_column, ar) return df
def materialize(self, virtual_column, inplace=False): '''Returns a new DataFrame where the virtual column is turned into an in memory numpy array. Example: >>> x = np.arange(1,4) >>> y = np.arange(2,5) >>> df = vaex.from_arrays(x=x, y=y) >>> df['r'] = (df.x**2 + df.y**2)**0.5 # 'r' is a virtual column (computed on the fly) >>> df = df.materialize('r') # now 'r' is a 'real' column (i.e. a numpy array) :param inplace: {inplace} ''' df = self.trim(inplace=inplace) virtual_column = _ensure_string_from_expression(virtual_column) if virtual_column not in df.virtual_columns: raise KeyError('Virtual column not found: %r' % virtual_column) ar = df.evaluate(virtual_column, filtered=False) del df[virtual_column] df.add_column(virtual_column, ar) return df
[ "Returns", "a", "new", "DataFrame", "where", "the", "virtual", "column", "is", "turned", "into", "an", "in", "memory", "numpy", "array", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4049-L4069
[ "def", "materialize", "(", "self", ",", "virtual_column", ",", "inplace", "=", "False", ")", ":", "df", "=", "self", ".", "trim", "(", "inplace", "=", "inplace", ")", "virtual_column", "=", "_ensure_string_from_expression", "(", "virtual_column", ")", "if", "virtual_column", "not", "in", "df", ".", "virtual_columns", ":", "raise", "KeyError", "(", "'Virtual column not found: %r'", "%", "virtual_column", ")", "ar", "=", "df", ".", "evaluate", "(", "virtual_column", ",", "filtered", "=", "False", ")", "del", "df", "[", "virtual_column", "]", "df", ".", "add_column", "(", "virtual_column", ",", "ar", ")", "return", "df" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.get_selection
Get the current selection object (mostly for internal use atm).
packages/vaex-core/vaex/dataframe.py
def get_selection(self, name="default"): """Get the current selection object (mostly for internal use atm).""" name = _normalize_selection_name(name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] if index == -1: return None else: return selection_history[index]
def get_selection(self, name="default"): """Get the current selection object (mostly for internal use atm).""" name = _normalize_selection_name(name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] if index == -1: return None else: return selection_history[index]
[ "Get", "the", "current", "selection", "object", "(", "mostly", "for", "internal", "use", "atm", ")", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4071-L4079
[ "def", "get_selection", "(", "self", ",", "name", "=", "\"default\"", ")", ":", "name", "=", "_normalize_selection_name", "(", "name", ")", "selection_history", "=", "self", ".", "selection_histories", "[", "name", "]", "index", "=", "self", ".", "selection_history_indices", "[", "name", "]", "if", "index", "==", "-", "1", ":", "return", "None", "else", ":", "return", "selection_history", "[", "index", "]" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.selection_undo
Undo selection, for the name.
packages/vaex-core/vaex/dataframe.py
def selection_undo(self, name="default", executor=None): """Undo selection, for the name.""" logger.debug("undo") executor = executor or self.executor assert self.selection_can_undo(name=name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] self.selection_history_indices[name] -= 1 self.signal_selection_changed.emit(self) logger.debug("undo: selection history is %r, index is %r", selection_history, self.selection_history_indices[name])
def selection_undo(self, name="default", executor=None): """Undo selection, for the name.""" logger.debug("undo") executor = executor or self.executor assert self.selection_can_undo(name=name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] self.selection_history_indices[name] -= 1 self.signal_selection_changed.emit(self) logger.debug("undo: selection history is %r, index is %r", selection_history, self.selection_history_indices[name])
[ "Undo", "selection", "for", "the", "name", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4081-L4090
[ "def", "selection_undo", "(", "self", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"undo\"", ")", "executor", "=", "executor", "or", "self", ".", "executor", "assert", "self", ".", "selection_can_undo", "(", "name", "=", "name", ")", "selection_history", "=", "self", ".", "selection_histories", "[", "name", "]", "index", "=", "self", ".", "selection_history_indices", "[", "name", "]", "self", ".", "selection_history_indices", "[", "name", "]", "-=", "1", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "logger", ".", "debug", "(", "\"undo: selection history is %r, index is %r\"", ",", "selection_history", ",", "self", ".", "selection_history_indices", "[", "name", "]", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.selection_redo
Redo selection, for the name.
packages/vaex-core/vaex/dataframe.py
def selection_redo(self, name="default", executor=None): """Redo selection, for the name.""" logger.debug("redo") executor = executor or self.executor assert self.selection_can_redo(name=name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] next = selection_history[index + 1] self.selection_history_indices[name] += 1 self.signal_selection_changed.emit(self) logger.debug("redo: selection history is %r, index is %r", selection_history, index)
def selection_redo(self, name="default", executor=None): """Redo selection, for the name.""" logger.debug("redo") executor = executor or self.executor assert self.selection_can_redo(name=name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] next = selection_history[index + 1] self.selection_history_indices[name] += 1 self.signal_selection_changed.emit(self) logger.debug("redo: selection history is %r, index is %r", selection_history, index)
[ "Redo", "selection", "for", "the", "name", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4092-L4102
[ "def", "selection_redo", "(", "self", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"redo\"", ")", "executor", "=", "executor", "or", "self", ".", "executor", "assert", "self", ".", "selection_can_redo", "(", "name", "=", "name", ")", "selection_history", "=", "self", ".", "selection_histories", "[", "name", "]", "index", "=", "self", ".", "selection_history_indices", "[", "name", "]", "next", "=", "selection_history", "[", "index", "+", "1", "]", "self", ".", "selection_history_indices", "[", "name", "]", "+=", "1", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "logger", ".", "debug", "(", "\"redo: selection history is %r, index is %r\"", ",", "selection_history", ",", "index", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.selection_can_redo
Can selection name be redone?
packages/vaex-core/vaex/dataframe.py
def selection_can_redo(self, name="default"): """Can selection name be redone?""" return (self.selection_history_indices[name] + 1) < len(self.selection_histories[name])
def selection_can_redo(self, name="default"): """Can selection name be redone?""" return (self.selection_history_indices[name] + 1) < len(self.selection_histories[name])
[ "Can", "selection", "name", "be", "redone?" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4108-L4110
[ "def", "selection_can_redo", "(", "self", ",", "name", "=", "\"default\"", ")", ":", "return", "(", "self", ".", "selection_history_indices", "[", "name", "]", "+", "1", ")", "<", "len", "(", "self", ".", "selection_histories", "[", "name", "]", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select
Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode. Selections are recorded in a history tree, per name, undo/redo can be done for them separately. :param str boolean_expression: Any valid column expression, with comparison operators :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :param executor: :return:
packages/vaex-core/vaex/dataframe.py
def select(self, boolean_expression, mode="replace", name="default", executor=None): """Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode. Selections are recorded in a history tree, per name, undo/redo can be done for them separately. :param str boolean_expression: Any valid column expression, with comparison operators :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :param executor: :return: """ boolean_expression = _ensure_string_from_expression(boolean_expression) if boolean_expression is None and not self.has_selection(name=name): pass # we don't want to pollute the history with many None selections self.signal_selection_changed.emit(self) # TODO: unittest want to know, does this make sense? else: def create(current): return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else None self._selection(create, name)
def select(self, boolean_expression, mode="replace", name="default", executor=None): """Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode. Selections are recorded in a history tree, per name, undo/redo can be done for them separately. :param str boolean_expression: Any valid column expression, with comparison operators :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :param executor: :return: """ boolean_expression = _ensure_string_from_expression(boolean_expression) if boolean_expression is None and not self.has_selection(name=name): pass # we don't want to pollute the history with many None selections self.signal_selection_changed.emit(self) # TODO: unittest want to know, does this make sense? else: def create(current): return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else None self._selection(create, name)
[ "Perform", "a", "selection", "defined", "by", "the", "boolean", "expression", "and", "combined", "with", "the", "previous", "selection", "using", "the", "given", "mode", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4112-L4130
[ "def", "select", "(", "self", ",", "boolean_expression", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "boolean_expression", "=", "_ensure_string_from_expression", "(", "boolean_expression", ")", "if", "boolean_expression", "is", "None", "and", "not", "self", ".", "has_selection", "(", "name", "=", "name", ")", ":", "pass", "# we don't want to pollute the history with many None selections", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "# TODO: unittest want to know, does this make sense?", "else", ":", "def", "create", "(", "current", ")", ":", "return", "selections", ".", "SelectionExpression", "(", "boolean_expression", ",", "current", ",", "mode", ")", "if", "boolean_expression", "else", "None", "self", ".", "_selection", "(", "create", ",", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_non_missing
Create a selection that selects rows having non missing values for all columns in column_names. The name reflect Panda's, no rows are really dropped, but a mask is kept to keep track of the selection :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :return:
packages/vaex-core/vaex/dataframe.py
def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode="replace", name="default"): """Create a selection that selects rows having non missing values for all columns in column_names. The name reflect Panda's, no rows are really dropped, but a mask is kept to keep track of the selection :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :return: """ column_names = column_names or self.get_column_names(virtual=False) def create(current): return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode) self._selection(create, name)
def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode="replace", name="default"): """Create a selection that selects rows having non missing values for all columns in column_names. The name reflect Panda's, no rows are really dropped, but a mask is kept to keep track of the selection :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: history tree or selection 'slot' to use :return: """ column_names = column_names or self.get_column_names(virtual=False) def create(current): return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode) self._selection(create, name)
[ "Create", "a", "selection", "that", "selects", "rows", "having", "non", "missing", "values", "for", "all", "columns", "in", "column_names", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4132-L4148
[ "def", "select_non_missing", "(", "self", ",", "drop_nan", "=", "True", ",", "drop_masked", "=", "True", ",", "column_names", "=", "None", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ")", ":", "column_names", "=", "column_names", "or", "self", ".", "get_column_names", "(", "virtual", "=", "False", ")", "def", "create", "(", "current", ")", ":", "return", "selections", ".", "SelectionDropNa", "(", "drop_nan", ",", "drop_masked", ",", "column_names", ",", "current", ",", "mode", ")", "self", ".", "_selection", "(", "create", ",", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.dropna
Create a shallow copy of a DataFrame, with filtering set using select_non_missing. :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :rtype: DataFrame
packages/vaex-core/vaex/dataframe.py
def dropna(self, drop_nan=True, drop_masked=True, column_names=None): """Create a shallow copy of a DataFrame, with filtering set using select_non_missing. :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :rtype: DataFrame """ copy = self.copy() copy.select_non_missing(drop_nan=drop_nan, drop_masked=drop_masked, column_names=column_names, name=FILTER_SELECTION_NAME, mode='and') return copy
def dropna(self, drop_nan=True, drop_masked=True, column_names=None): """Create a shallow copy of a DataFrame, with filtering set using select_non_missing. :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values) :param drop_masked: drop rows when there is a masked value in any of the columns :param column_names: The columns to consider, default: all (real, non-virtual) columns :rtype: DataFrame """ copy = self.copy() copy.select_non_missing(drop_nan=drop_nan, drop_masked=drop_masked, column_names=column_names, name=FILTER_SELECTION_NAME, mode='and') return copy
[ "Create", "a", "shallow", "copy", "of", "a", "DataFrame", "with", "filtering", "set", "using", "select_non_missing", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4150-L4161
[ "def", "dropna", "(", "self", ",", "drop_nan", "=", "True", ",", "drop_masked", "=", "True", ",", "column_names", "=", "None", ")", ":", "copy", "=", "self", ".", "copy", "(", ")", "copy", ".", "select_non_missing", "(", "drop_nan", "=", "drop_nan", ",", "drop_masked", "=", "drop_masked", ",", "column_names", "=", "column_names", ",", "name", "=", "FILTER_SELECTION_NAME", ",", "mode", "=", "'and'", ")", "return", "copy" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_rectangle
Select a 2d rectangular box in the space given by x and y, bounds by limits. Example: >>> df.select_box('x', 'y', [(0, 10), (0, 1)]) :param x: expression for the x space :param y: expression fo the y space :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode:
packages/vaex-core/vaex/dataframe.py
def select_rectangle(self, x, y, limits, mode="replace", name="default"): """Select a 2d rectangular box in the space given by x and y, bounds by limits. Example: >>> df.select_box('x', 'y', [(0, 10), (0, 1)]) :param x: expression for the x space :param y: expression fo the y space :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode: """ self.select_box([x, y], limits, mode=mode, name=name)
def select_rectangle(self, x, y, limits, mode="replace", name="default"): """Select a 2d rectangular box in the space given by x and y, bounds by limits. Example: >>> df.select_box('x', 'y', [(0, 10), (0, 1)]) :param x: expression for the x space :param y: expression fo the y space :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode: """ self.select_box([x, y], limits, mode=mode, name=name)
[ "Select", "a", "2d", "rectangular", "box", "in", "the", "space", "given", "by", "x", "and", "y", "bounds", "by", "limits", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4169-L4181
[ "def", "select_rectangle", "(", "self", ",", "x", ",", "y", ",", "limits", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ")", ":", "self", ".", "select_box", "(", "[", "x", ",", "y", "]", ",", "limits", ",", "mode", "=", "mode", ",", "name", "=", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_box
Select a n-dimensional rectangular box bounded by limits. The following examples are equivalent: >>> df.select_box(['x', 'y'], [(0, 10), (0, 1)]) >>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)]) :param spaces: list of expressions :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode: :param name: :return:
packages/vaex-core/vaex/dataframe.py
def select_box(self, spaces, limits, mode="replace", name="default"): """Select a n-dimensional rectangular box bounded by limits. The following examples are equivalent: >>> df.select_box(['x', 'y'], [(0, 10), (0, 1)]) >>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)]) :param spaces: list of expressions :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode: :param name: :return: """ sorted_limits = [(min(l), max(l)) for l in limits] expressions = ["((%s) >= %f) & ((%s) <= %f)" % (expression, lmin, expression, lmax) for (expression, (lmin, lmax)) in zip(spaces, sorted_limits)] self.select("&".join(expressions), mode=mode, name=name)
def select_box(self, spaces, limits, mode="replace", name="default"): """Select a n-dimensional rectangular box bounded by limits. The following examples are equivalent: >>> df.select_box(['x', 'y'], [(0, 10), (0, 1)]) >>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)]) :param spaces: list of expressions :param limits: sequence of shape [(x1, x2), (y1, y2)] :param mode: :param name: :return: """ sorted_limits = [(min(l), max(l)) for l in limits] expressions = ["((%s) >= %f) & ((%s) <= %f)" % (expression, lmin, expression, lmax) for (expression, (lmin, lmax)) in zip(spaces, sorted_limits)] self.select("&".join(expressions), mode=mode, name=name)
[ "Select", "a", "n", "-", "dimensional", "rectangular", "box", "bounded", "by", "limits", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4183-L4200
[ "def", "select_box", "(", "self", ",", "spaces", ",", "limits", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ")", ":", "sorted_limits", "=", "[", "(", "min", "(", "l", ")", ",", "max", "(", "l", ")", ")", "for", "l", "in", "limits", "]", "expressions", "=", "[", "\"((%s) >= %f) & ((%s) <= %f)\"", "%", "(", "expression", ",", "lmin", ",", "expression", ",", "lmax", ")", "for", "(", "expression", ",", "(", "lmin", ",", "lmax", ")", ")", "in", "zip", "(", "spaces", ",", "sorted_limits", ")", "]", "self", ".", "select", "(", "\"&\"", ".", "join", "(", "expressions", ")", ",", "mode", "=", "mode", ",", "name", "=", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_circle
Select a circular region centred on xc, yc, with a radius of r. Example: >>> df.select_circle('x','y',2,3,1) :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the circle in x :param yc: location of the centre of the circle in y :param r: the radius of the circle :param name: name of the selection :param mode: :return:
packages/vaex-core/vaex/dataframe.py
def select_circle(self, x, y, xc, yc, r, mode="replace", name="default", inclusive=True): """ Select a circular region centred on xc, yc, with a radius of r. Example: >>> df.select_circle('x','y',2,3,1) :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the circle in x :param yc: location of the centre of the circle in y :param r: the radius of the circle :param name: name of the selection :param mode: :return: """ # expr = "({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2".format(**locals()) if inclusive: expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2 else: expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2 self.select(boolean_expression=expr, mode=mode, name=name)
def select_circle(self, x, y, xc, yc, r, mode="replace", name="default", inclusive=True): """ Select a circular region centred on xc, yc, with a radius of r. Example: >>> df.select_circle('x','y',2,3,1) :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the circle in x :param yc: location of the centre of the circle in y :param r: the radius of the circle :param name: name of the selection :param mode: :return: """ # expr = "({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2".format(**locals()) if inclusive: expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2 else: expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2 self.select(boolean_expression=expr, mode=mode, name=name)
[ "Select", "a", "circular", "region", "centred", "on", "xc", "yc", "with", "a", "radius", "of", "r", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4202-L4226
[ "def", "select_circle", "(", "self", ",", "x", ",", "y", ",", "xc", ",", "yc", ",", "r", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ",", "inclusive", "=", "True", ")", ":", "# expr = \"({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2\".format(**locals())", "if", "inclusive", ":", "expr", "=", "(", "self", "[", "x", "]", "-", "xc", ")", "**", "2", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "**", "2", "<=", "r", "**", "2", "else", ":", "expr", "=", "(", "self", "[", "x", "]", "-", "xc", ")", "**", "2", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "**", "2", "<", "r", "**", "2", "self", ".", "select", "(", "boolean_expression", "=", "expr", ",", "mode", "=", "mode", ",", "name", "=", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_ellipse
Select an elliptical region centred on xc, yc, with a certain width, height and angle. Example: >>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse') :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the ellipse in x :param yc: location of the centre of the ellipse in y :param width: the width of the ellipse (diameter) :param height: the width of the ellipse (diameter) :param angle: (degrees) orientation of the ellipse, counter-clockwise measured from the y axis :param name: name of the selection :param mode: :return:
packages/vaex-core/vaex/dataframe.py
def select_ellipse(self, x, y, xc, yc, width, height, angle=0, mode="replace", name="default", radians=False, inclusive=True): """ Select an elliptical region centred on xc, yc, with a certain width, height and angle. Example: >>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse') :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the ellipse in x :param yc: location of the centre of the ellipse in y :param width: the width of the ellipse (diameter) :param height: the width of the ellipse (diameter) :param angle: (degrees) orientation of the ellipse, counter-clockwise measured from the y axis :param name: name of the selection :param mode: :return: """ # Computing the properties of the ellipse prior to selection if radians: pass else: alpha = np.deg2rad(angle) xr = width / 2 yr = height / 2 r = max(xr, yr) a = xr / r b = yr / r expr = "(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2".format(**locals()) if inclusive: expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 <= r**2 else: expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 < r**2 self.select(boolean_expression=expr, mode=mode, name=name)
def select_ellipse(self, x, y, xc, yc, width, height, angle=0, mode="replace", name="default", radians=False, inclusive=True): """ Select an elliptical region centred on xc, yc, with a certain width, height and angle. Example: >>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse') :param x: expression for the x space :param y: expression for the y space :param xc: location of the centre of the ellipse in x :param yc: location of the centre of the ellipse in y :param width: the width of the ellipse (diameter) :param height: the width of the ellipse (diameter) :param angle: (degrees) orientation of the ellipse, counter-clockwise measured from the y axis :param name: name of the selection :param mode: :return: """ # Computing the properties of the ellipse prior to selection if radians: pass else: alpha = np.deg2rad(angle) xr = width / 2 yr = height / 2 r = max(xr, yr) a = xr / r b = yr / r expr = "(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2".format(**locals()) if inclusive: expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 <= r**2 else: expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 < r**2 self.select(boolean_expression=expr, mode=mode, name=name)
[ "Select", "an", "elliptical", "region", "centred", "on", "xc", "yc", "with", "a", "certain", "width", "height", "and", "angle", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4228-L4269
[ "def", "select_ellipse", "(", "self", ",", "x", ",", "y", ",", "xc", ",", "yc", ",", "width", ",", "height", ",", "angle", "=", "0", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ",", "radians", "=", "False", ",", "inclusive", "=", "True", ")", ":", "# Computing the properties of the ellipse prior to selection", "if", "radians", ":", "pass", "else", ":", "alpha", "=", "np", ".", "deg2rad", "(", "angle", ")", "xr", "=", "width", "/", "2", "yr", "=", "height", "/", "2", "r", "=", "max", "(", "xr", ",", "yr", ")", "a", "=", "xr", "/", "r", "b", "=", "yr", "/", "r", "expr", "=", "\"(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "if", "inclusive", ":", "expr", "=", "(", "(", "self", "[", "x", "]", "-", "xc", ")", "*", "np", ".", "cos", "(", "alpha", ")", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "*", "np", ".", "sin", "(", "alpha", ")", ")", "**", "2", "/", "a", "**", "2", "+", "(", "(", "self", "[", "x", "]", "-", "xc", ")", "*", "np", ".", "sin", "(", "alpha", ")", "-", "(", "self", "[", "y", "]", "-", "yc", ")", "*", "np", ".", "cos", "(", "alpha", ")", ")", "**", "2", "/", "b", "**", "2", "<=", "r", "**", "2", "else", ":", "expr", "=", "(", "(", "self", "[", "x", "]", "-", "xc", ")", "*", "np", ".", "cos", "(", "alpha", ")", "+", "(", "self", "[", "y", "]", "-", "yc", ")", "*", "np", ".", "sin", "(", "alpha", ")", ")", "**", "2", "/", "a", "**", "2", "+", "(", "(", "self", "[", "x", "]", "-", "xc", ")", "*", "np", ".", "sin", "(", "alpha", ")", "-", "(", "self", "[", "y", "]", "-", "yc", ")", "*", "np", ".", "cos", "(", "alpha", ")", ")", "**", "2", "/", "b", "**", "2", "<", "r", "**", "2", "self", ".", "select", "(", "boolean_expression", "=", "expr", ",", "mode", "=", "mode", ",", "name", "=", "name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_lasso
For performance reasons, a lasso selection is handled differently. :param str expression_x: Name/expression for the x coordinate :param str expression_y: Name/expression for the y coordinate :param xsequence: list of x numbers defining the lasso, together with y :param ysequence: :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: :param executor: :return:
packages/vaex-core/vaex/dataframe.py
def select_lasso(self, expression_x, expression_y, xsequence, ysequence, mode="replace", name="default", executor=None): """For performance reasons, a lasso selection is handled differently. :param str expression_x: Name/expression for the x coordinate :param str expression_y: Name/expression for the y coordinate :param xsequence: list of x numbers defining the lasso, together with y :param ysequence: :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: :param executor: :return: """ def create(current): return selections.SelectionLasso(expression_x, expression_y, xsequence, ysequence, current, mode) self._selection(create, name, executor=executor)
def select_lasso(self, expression_x, expression_y, xsequence, ysequence, mode="replace", name="default", executor=None): """For performance reasons, a lasso selection is handled differently. :param str expression_x: Name/expression for the x coordinate :param str expression_y: Name/expression for the y coordinate :param xsequence: list of x numbers defining the lasso, together with y :param ysequence: :param str mode: Possible boolean operator: replace/and/or/xor/subtract :param str name: :param executor: :return: """ def create(current): return selections.SelectionLasso(expression_x, expression_y, xsequence, ysequence, current, mode) self._selection(create, name, executor=executor)
[ "For", "performance", "reasons", "a", "lasso", "selection", "is", "handled", "differently", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4271-L4286
[ "def", "select_lasso", "(", "self", ",", "expression_x", ",", "expression_y", ",", "xsequence", ",", "ysequence", ",", "mode", "=", "\"replace\"", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "def", "create", "(", "current", ")", ":", "return", "selections", ".", "SelectionLasso", "(", "expression_x", ",", "expression_y", ",", "xsequence", ",", "ysequence", ",", "current", ",", "mode", ")", "self", ".", "_selection", "(", "create", ",", "name", ",", "executor", "=", "executor", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.select_inverse
Invert the selection, i.e. what is selected will not be, and vice versa :param str name: :param executor: :return:
packages/vaex-core/vaex/dataframe.py
def select_inverse(self, name="default", executor=None): """Invert the selection, i.e. what is selected will not be, and vice versa :param str name: :param executor: :return: """ def create(current): return selections.SelectionInvert(current) self._selection(create, name, executor=executor)
def select_inverse(self, name="default", executor=None): """Invert the selection, i.e. what is selected will not be, and vice versa :param str name: :param executor: :return: """ def create(current): return selections.SelectionInvert(current) self._selection(create, name, executor=executor)
[ "Invert", "the", "selection", "i", ".", "e", ".", "what", "is", "selected", "will", "not", "be", "and", "vice", "versa" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4288-L4298
[ "def", "select_inverse", "(", "self", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "def", "create", "(", "current", ")", ":", "return", "selections", ".", "SelectionInvert", "(", "current", ")", "self", ".", "_selection", "(", "create", ",", "name", ",", "executor", "=", "executor", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.set_selection
Sets the selection object :param selection: Selection object :param name: selection 'slot' :param executor: :return:
packages/vaex-core/vaex/dataframe.py
def set_selection(self, selection, name="default", executor=None): """Sets the selection object :param selection: Selection object :param name: selection 'slot' :param executor: :return: """ def create(current): return selection self._selection(create, name, executor=executor, execute_fully=True)
def set_selection(self, selection, name="default", executor=None): """Sets the selection object :param selection: Selection object :param name: selection 'slot' :param executor: :return: """ def create(current): return selection self._selection(create, name, executor=executor, execute_fully=True)
[ "Sets", "the", "selection", "object" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4300-L4310
[ "def", "set_selection", "(", "self", ",", "selection", ",", "name", "=", "\"default\"", ",", "executor", "=", "None", ")", ":", "def", "create", "(", "current", ")", ":", "return", "selection", "self", ".", "_selection", "(", "create", ",", "name", ",", "executor", "=", "executor", ",", "execute_fully", "=", "True", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._selection
select_lasso and select almost share the same code
packages/vaex-core/vaex/dataframe.py
def _selection(self, create_selection, name, executor=None, execute_fully=False): """select_lasso and select almost share the same code""" selection_history = self.selection_histories[name] previous_index = self.selection_history_indices[name] current = selection_history[previous_index] if selection_history else None selection = create_selection(current) executor = executor or self.executor selection_history.append(selection) self.selection_history_indices[name] += 1 # clip any redo history del selection_history[self.selection_history_indices[name]:-1] if 0: if self.is_local(): if selection: # result = selection.execute(executor=executor, execute_fully=execute_fully) result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) else: result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) else: self.signal_selection_changed.emit(self) result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) result = vaex.promise.Promise.fulfilled(None) logger.debug("select selection history is %r, index is %r", selection_history, self.selection_history_indices[name]) return result
def _selection(self, create_selection, name, executor=None, execute_fully=False): """select_lasso and select almost share the same code""" selection_history = self.selection_histories[name] previous_index = self.selection_history_indices[name] current = selection_history[previous_index] if selection_history else None selection = create_selection(current) executor = executor or self.executor selection_history.append(selection) self.selection_history_indices[name] += 1 # clip any redo history del selection_history[self.selection_history_indices[name]:-1] if 0: if self.is_local(): if selection: # result = selection.execute(executor=executor, execute_fully=execute_fully) result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) else: result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) else: self.signal_selection_changed.emit(self) result = vaex.promise.Promise.fulfilled(None) self.signal_selection_changed.emit(self) result = vaex.promise.Promise.fulfilled(None) logger.debug("select selection history is %r, index is %r", selection_history, self.selection_history_indices[name]) return result
[ "select_lasso", "and", "select", "almost", "share", "the", "same", "code" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4312-L4338
[ "def", "_selection", "(", "self", ",", "create_selection", ",", "name", ",", "executor", "=", "None", ",", "execute_fully", "=", "False", ")", ":", "selection_history", "=", "self", ".", "selection_histories", "[", "name", "]", "previous_index", "=", "self", ".", "selection_history_indices", "[", "name", "]", "current", "=", "selection_history", "[", "previous_index", "]", "if", "selection_history", "else", "None", "selection", "=", "create_selection", "(", "current", ")", "executor", "=", "executor", "or", "self", ".", "executor", "selection_history", ".", "append", "(", "selection", ")", "self", ".", "selection_history_indices", "[", "name", "]", "+=", "1", "# clip any redo history", "del", "selection_history", "[", "self", ".", "selection_history_indices", "[", "name", "]", ":", "-", "1", "]", "if", "0", ":", "if", "self", ".", "is_local", "(", ")", ":", "if", "selection", ":", "# result = selection.execute(executor=executor, execute_fully=execute_fully)", "result", "=", "vaex", ".", "promise", ".", "Promise", ".", "fulfilled", "(", "None", ")", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "else", ":", "result", "=", "vaex", ".", "promise", ".", "Promise", ".", "fulfilled", "(", "None", ")", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "else", ":", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "result", "=", "vaex", ".", "promise", ".", "Promise", ".", "fulfilled", "(", "None", ")", "self", ".", "signal_selection_changed", ".", "emit", "(", "self", ")", "result", "=", "vaex", ".", "promise", ".", "Promise", ".", "fulfilled", "(", "None", ")", "logger", ".", "debug", "(", "\"select selection history is %r, index is %r\"", ",", "selection_history", ",", "self", ".", "selection_history_indices", "[", "name", "]", ")", "return", "result" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame.drop
Drop columns (or a single column). :param columns: List of columns or a single column name :param inplace: {inplace} :param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead.
packages/vaex-core/vaex/dataframe.py
def drop(self, columns, inplace=False, check=True): """Drop columns (or a single column). :param columns: List of columns or a single column name :param inplace: {inplace} :param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead. """ columns = _ensure_list(columns) columns = _ensure_strings_from_expressions(columns) df = self if inplace else self.copy() depending_columns = df._depending_columns(columns_exclude=columns) for column in columns: if check and column in depending_columns: df._hide_column(column) else: del df[column] return df
def drop(self, columns, inplace=False, check=True): """Drop columns (or a single column). :param columns: List of columns or a single column name :param inplace: {inplace} :param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead. """ columns = _ensure_list(columns) columns = _ensure_strings_from_expressions(columns) df = self if inplace else self.copy() depending_columns = df._depending_columns(columns_exclude=columns) for column in columns: if check and column in depending_columns: df._hide_column(column) else: del df[column] return df
[ "Drop", "columns", "(", "or", "a", "single", "column", ")", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4429-L4445
[ "def", "drop", "(", "self", ",", "columns", ",", "inplace", "=", "False", ",", "check", "=", "True", ")", ":", "columns", "=", "_ensure_list", "(", "columns", ")", "columns", "=", "_ensure_strings_from_expressions", "(", "columns", ")", "df", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "depending_columns", "=", "df", ".", "_depending_columns", "(", "columns_exclude", "=", "columns", ")", "for", "column", "in", "columns", ":", "if", "check", "and", "column", "in", "depending_columns", ":", "df", ".", "_hide_column", "(", "column", ")", "else", ":", "del", "df", "[", "column", "]", "return", "df" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._hide_column
Hides a column by prefixing the name with \'__\
packages/vaex-core/vaex/dataframe.py
def _hide_column(self, column): '''Hides a column by prefixing the name with \'__\'''' column = _ensure_string_from_expression(column) new_name = self._find_valid_name('__' + column) self._rename(column, new_name)
def _hide_column(self, column): '''Hides a column by prefixing the name with \'__\'''' column = _ensure_string_from_expression(column) new_name = self._find_valid_name('__' + column) self._rename(column, new_name)
[ "Hides", "a", "column", "by", "prefixing", "the", "name", "with", "\\", "__", "\\" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4447-L4451
[ "def", "_hide_column", "(", "self", ",", "column", ")", ":", "column", "=", "_ensure_string_from_expression", "(", "column", ")", "new_name", "=", "self", ".", "_find_valid_name", "(", "'__'", "+", "column", ")", "self", ".", "_rename", "(", "column", ",", "new_name", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._find_valid_name
Finds a non-colliding name by optional postfixing
packages/vaex-core/vaex/dataframe.py
def _find_valid_name(self, initial_name): '''Finds a non-colliding name by optional postfixing''' return vaex.utils.find_valid_name(initial_name, used=self.get_column_names(hidden=True))
def _find_valid_name(self, initial_name): '''Finds a non-colliding name by optional postfixing''' return vaex.utils.find_valid_name(initial_name, used=self.get_column_names(hidden=True))
[ "Finds", "a", "non", "-", "colliding", "name", "by", "optional", "postfixing" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4453-L4455
[ "def", "_find_valid_name", "(", "self", ",", "initial_name", ")", ":", "return", "vaex", ".", "utils", ".", "find_valid_name", "(", "initial_name", ",", "used", "=", "self", ".", "get_column_names", "(", "hidden", "=", "True", ")", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._depending_columns
Find all depending column for a set of column (default all), minus the excluded ones
packages/vaex-core/vaex/dataframe.py
def _depending_columns(self, columns=None, columns_exclude=None, check_filter=True): '''Find all depending column for a set of column (default all), minus the excluded ones''' columns = set(columns or self.get_column_names(hidden=True)) if columns_exclude: columns -= set(columns_exclude) depending_columns = set() for column in columns: expression = self._expr(column) depending_columns |= expression.variables() depending_columns -= set(columns) if check_filter: if self.filtered: selection = self.get_selection(FILTER_SELECTION_NAME) depending_columns |= selection._depending_columns(self) return depending_columns
def _depending_columns(self, columns=None, columns_exclude=None, check_filter=True): '''Find all depending column for a set of column (default all), minus the excluded ones''' columns = set(columns or self.get_column_names(hidden=True)) if columns_exclude: columns -= set(columns_exclude) depending_columns = set() for column in columns: expression = self._expr(column) depending_columns |= expression.variables() depending_columns -= set(columns) if check_filter: if self.filtered: selection = self.get_selection(FILTER_SELECTION_NAME) depending_columns |= selection._depending_columns(self) return depending_columns
[ "Find", "all", "depending", "column", "for", "a", "set", "of", "column", "(", "default", "all", ")", "minus", "the", "excluded", "ones" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4457-L4471
[ "def", "_depending_columns", "(", "self", ",", "columns", "=", "None", ",", "columns_exclude", "=", "None", ",", "check_filter", "=", "True", ")", ":", "columns", "=", "set", "(", "columns", "or", "self", ".", "get_column_names", "(", "hidden", "=", "True", ")", ")", "if", "columns_exclude", ":", "columns", "-=", "set", "(", "columns_exclude", ")", "depending_columns", "=", "set", "(", ")", "for", "column", "in", "columns", ":", "expression", "=", "self", ".", "_expr", "(", "column", ")", "depending_columns", "|=", "expression", ".", "variables", "(", ")", "depending_columns", "-=", "set", "(", "columns", ")", "if", "check_filter", ":", "if", "self", ".", "filtered", ":", "selection", "=", "self", ".", "get_selection", "(", "FILTER_SELECTION_NAME", ")", "depending_columns", "|=", "selection", ".", "_depending_columns", "(", "self", ")", "return", "depending_columns" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._root_nodes
Returns a list of string which are the virtual columns that are not used in any other virtual column.
packages/vaex-core/vaex/dataframe.py
def _root_nodes(self): """Returns a list of string which are the virtual columns that are not used in any other virtual column.""" # these lists (~used as ordered set) keep track of leafes and root nodes # root nodes root_nodes = [] leafes = [] def walk(node): # this function recursively walks the expression graph if isinstance(node, six.string_types): # we end up at a leaf leafes.append(node) if node in root_nodes: # so it cannot be a root node root_nodes.remove(node) else: node_repr, fname, fobj, deps = node if node_repr in self.virtual_columns: # we encountered a virtual column, similar behaviour as leaf leafes.append(node_repr) if node_repr in root_nodes: root_nodes.remove(node_repr) # resursive part for dep in deps: walk(dep) for column in self.virtual_columns.keys(): if column not in leafes: root_nodes.append(column) node = self[column]._graph() # we don't do the virtual column itself, just it's depedencies node_repr, fname, fobj, deps = node for dep in deps: walk(dep) return root_nodes
def _root_nodes(self): """Returns a list of string which are the virtual columns that are not used in any other virtual column.""" # these lists (~used as ordered set) keep track of leafes and root nodes # root nodes root_nodes = [] leafes = [] def walk(node): # this function recursively walks the expression graph if isinstance(node, six.string_types): # we end up at a leaf leafes.append(node) if node in root_nodes: # so it cannot be a root node root_nodes.remove(node) else: node_repr, fname, fobj, deps = node if node_repr in self.virtual_columns: # we encountered a virtual column, similar behaviour as leaf leafes.append(node_repr) if node_repr in root_nodes: root_nodes.remove(node_repr) # resursive part for dep in deps: walk(dep) for column in self.virtual_columns.keys(): if column not in leafes: root_nodes.append(column) node = self[column]._graph() # we don't do the virtual column itself, just it's depedencies node_repr, fname, fobj, deps = node for dep in deps: walk(dep) return root_nodes
[ "Returns", "a", "list", "of", "string", "which", "are", "the", "virtual", "columns", "that", "are", "not", "used", "in", "any", "other", "virtual", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4483-L4514
[ "def", "_root_nodes", "(", "self", ")", ":", "# these lists (~used as ordered set) keep track of leafes and root nodes", "# root nodes", "root_nodes", "=", "[", "]", "leafes", "=", "[", "]", "def", "walk", "(", "node", ")", ":", "# this function recursively walks the expression graph", "if", "isinstance", "(", "node", ",", "six", ".", "string_types", ")", ":", "# we end up at a leaf", "leafes", ".", "append", "(", "node", ")", "if", "node", "in", "root_nodes", ":", "# so it cannot be a root node", "root_nodes", ".", "remove", "(", "node", ")", "else", ":", "node_repr", ",", "fname", ",", "fobj", ",", "deps", "=", "node", "if", "node_repr", "in", "self", ".", "virtual_columns", ":", "# we encountered a virtual column, similar behaviour as leaf", "leafes", ".", "append", "(", "node_repr", ")", "if", "node_repr", "in", "root_nodes", ":", "root_nodes", ".", "remove", "(", "node_repr", ")", "# resursive part", "for", "dep", "in", "deps", ":", "walk", "(", "dep", ")", "for", "column", "in", "self", ".", "virtual_columns", ".", "keys", "(", ")", ":", "if", "column", "not", "in", "leafes", ":", "root_nodes", ".", "append", "(", "column", ")", "node", "=", "self", "[", "column", "]", ".", "_graph", "(", ")", "# we don't do the virtual column itself, just it's depedencies", "node_repr", ",", "fname", ",", "fobj", ",", "deps", "=", "node", "for", "dep", "in", "deps", ":", "walk", "(", "dep", ")", "return", "root_nodes" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrame._graphviz
Return a graphviz.Digraph object with a graph of all virtual columns
packages/vaex-core/vaex/dataframe.py
def _graphviz(self, dot=None): """Return a graphviz.Digraph object with a graph of all virtual columns""" from graphviz import Digraph dot = dot or Digraph(comment='whole dataframe') root_nodes = self._root_nodes() for column in root_nodes: self[column]._graphviz(dot=dot) return dot
def _graphviz(self, dot=None): """Return a graphviz.Digraph object with a graph of all virtual columns""" from graphviz import Digraph dot = dot or Digraph(comment='whole dataframe') root_nodes = self._root_nodes() for column in root_nodes: self[column]._graphviz(dot=dot) return dot
[ "Return", "a", "graphviz", ".", "Digraph", "object", "with", "a", "graph", "of", "all", "virtual", "columns" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4516-L4523
[ "def", "_graphviz", "(", "self", ",", "dot", "=", "None", ")", ":", "from", "graphviz", "import", "Digraph", "dot", "=", "dot", "or", "Digraph", "(", "comment", "=", "'whole dataframe'", ")", "root_nodes", "=", "self", ".", "_root_nodes", "(", ")", "for", "column", "in", "root_nodes", ":", "self", "[", "column", "]", ".", "_graphviz", "(", "dot", "=", "dot", ")", "return", "dot" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.categorize
Mark column as categorical, with given labels, assuming zero indexing
packages/vaex-core/vaex/dataframe.py
def categorize(self, column, labels=None, check=True): """Mark column as categorical, with given labels, assuming zero indexing""" column = _ensure_string_from_expression(column) if check: vmin, vmax = self.minmax(column) if labels is None: N = int(vmax + 1) labels = list(map(str, range(N))) if (vmax - vmin) >= len(labels): raise ValueError('value of {} found, which is larger than number of labels {}'.format(vmax, len(labels))) self._categories[column] = dict(labels=labels, N=len(labels))
def categorize(self, column, labels=None, check=True): """Mark column as categorical, with given labels, assuming zero indexing""" column = _ensure_string_from_expression(column) if check: vmin, vmax = self.minmax(column) if labels is None: N = int(vmax + 1) labels = list(map(str, range(N))) if (vmax - vmin) >= len(labels): raise ValueError('value of {} found, which is larger than number of labels {}'.format(vmax, len(labels))) self._categories[column] = dict(labels=labels, N=len(labels))
[ "Mark", "column", "as", "categorical", "with", "given", "labels", "assuming", "zero", "indexing" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4557-L4567
[ "def", "categorize", "(", "self", ",", "column", ",", "labels", "=", "None", ",", "check", "=", "True", ")", ":", "column", "=", "_ensure_string_from_expression", "(", "column", ")", "if", "check", ":", "vmin", ",", "vmax", "=", "self", ".", "minmax", "(", "column", ")", "if", "labels", "is", "None", ":", "N", "=", "int", "(", "vmax", "+", "1", ")", "labels", "=", "list", "(", "map", "(", "str", ",", "range", "(", "N", ")", ")", ")", "if", "(", "vmax", "-", "vmin", ")", ">=", "len", "(", "labels", ")", ":", "raise", "ValueError", "(", "'value of {} found, which is larger than number of labels {}'", ".", "format", "(", "vmax", ",", "len", "(", "labels", ")", ")", ")", "self", ".", "_categories", "[", "column", "]", "=", "dict", "(", "labels", "=", "labels", ",", "N", "=", "len", "(", "labels", ")", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.ordinal_encode
Encode column as ordinal values and mark it as categorical. The existing column is renamed to a hidden column and replaced by a numerical columns with values between [0, len(values)-1].
packages/vaex-core/vaex/dataframe.py
def ordinal_encode(self, column, values=None, inplace=False): """Encode column as ordinal values and mark it as categorical. The existing column is renamed to a hidden column and replaced by a numerical columns with values between [0, len(values)-1]. """ column = _ensure_string_from_expression(column) df = self if inplace else self.copy() # for the codes, we need to work on the unfiltered dataset, since the filter # may change, and we also cannot add an array that is smaller in length df_unfiltered = df.copy() # maybe we need some filter manipulation methods df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME) df_unfiltered._length_unfiltered = df._length_original df_unfiltered.set_active_range(0, df._length_original) # codes point to the index of found_values # meaning: found_values[codes[0]] == ds[column].values[0] found_values, codes = df_unfiltered.unique(column, return_inverse=True) if values is None: values = found_values else: # we have specified which values we should support, anything # not found will be masked translation = np.zeros(len(found_values), dtype=np.uint64) # mark values that are in the column, but not in values with a special value missing_value = len(found_values) for i, found_value in enumerate(found_values): try: found_value = found_value.decode('ascii') except: pass if found_value not in values: # not present, we need a missing value translation[i] = missing_value else: translation[i] = values.index(found_value) codes = translation[codes] if missing_value in translation: # all special values will be marked as missing codes = np.ma.masked_array(codes, codes==missing_value) original_column = df.rename_column(column, '__original_' + column, unique=True) labels = [str(k) for k in values] df.add_column(column, codes) df._categories[column] = dict(labels=labels, N=len(values), values=values) return df
def ordinal_encode(self, column, values=None, inplace=False): """Encode column as ordinal values and mark it as categorical. The existing column is renamed to a hidden column and replaced by a numerical columns with values between [0, len(values)-1]. """ column = _ensure_string_from_expression(column) df = self if inplace else self.copy() # for the codes, we need to work on the unfiltered dataset, since the filter # may change, and we also cannot add an array that is smaller in length df_unfiltered = df.copy() # maybe we need some filter manipulation methods df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME) df_unfiltered._length_unfiltered = df._length_original df_unfiltered.set_active_range(0, df._length_original) # codes point to the index of found_values # meaning: found_values[codes[0]] == ds[column].values[0] found_values, codes = df_unfiltered.unique(column, return_inverse=True) if values is None: values = found_values else: # we have specified which values we should support, anything # not found will be masked translation = np.zeros(len(found_values), dtype=np.uint64) # mark values that are in the column, but not in values with a special value missing_value = len(found_values) for i, found_value in enumerate(found_values): try: found_value = found_value.decode('ascii') except: pass if found_value not in values: # not present, we need a missing value translation[i] = missing_value else: translation[i] = values.index(found_value) codes = translation[codes] if missing_value in translation: # all special values will be marked as missing codes = np.ma.masked_array(codes, codes==missing_value) original_column = df.rename_column(column, '__original_' + column, unique=True) labels = [str(k) for k in values] df.add_column(column, codes) df._categories[column] = dict(labels=labels, N=len(values), values=values) return df
[ "Encode", "column", "as", "ordinal", "values", "and", "mark", "it", "as", "categorical", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4569-L4613
[ "def", "ordinal_encode", "(", "self", ",", "column", ",", "values", "=", "None", ",", "inplace", "=", "False", ")", ":", "column", "=", "_ensure_string_from_expression", "(", "column", ")", "df", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "# for the codes, we need to work on the unfiltered dataset, since the filter", "# may change, and we also cannot add an array that is smaller in length", "df_unfiltered", "=", "df", ".", "copy", "(", ")", "# maybe we need some filter manipulation methods", "df_unfiltered", ".", "select_nothing", "(", "name", "=", "FILTER_SELECTION_NAME", ")", "df_unfiltered", ".", "_length_unfiltered", "=", "df", ".", "_length_original", "df_unfiltered", ".", "set_active_range", "(", "0", ",", "df", ".", "_length_original", ")", "# codes point to the index of found_values", "# meaning: found_values[codes[0]] == ds[column].values[0]", "found_values", ",", "codes", "=", "df_unfiltered", ".", "unique", "(", "column", ",", "return_inverse", "=", "True", ")", "if", "values", "is", "None", ":", "values", "=", "found_values", "else", ":", "# we have specified which values we should support, anything", "# not found will be masked", "translation", "=", "np", ".", "zeros", "(", "len", "(", "found_values", ")", ",", "dtype", "=", "np", ".", "uint64", ")", "# mark values that are in the column, but not in values with a special value", "missing_value", "=", "len", "(", "found_values", ")", "for", "i", ",", "found_value", "in", "enumerate", "(", "found_values", ")", ":", "try", ":", "found_value", "=", "found_value", ".", "decode", "(", "'ascii'", ")", "except", ":", "pass", "if", "found_value", "not", "in", "values", ":", "# not present, we need a missing value", "translation", "[", "i", "]", "=", "missing_value", "else", ":", "translation", "[", "i", "]", "=", "values", ".", "index", "(", "found_value", ")", "codes", "=", "translation", "[", "codes", "]", "if", "missing_value", "in", "translation", ":", "# all special values will be marked as missing", "codes", "=", "np", ".", "ma", ".", "masked_array", "(", "codes", ",", "codes", "==", "missing_value", ")", "original_column", "=", "df", ".", "rename_column", "(", "column", ",", "'__original_'", "+", "column", ",", "unique", "=", "True", ")", "labels", "=", "[", "str", "(", "k", ")", "for", "k", "in", "values", "]", "df", ".", "add_column", "(", "column", ",", "codes", ")", "df", ".", "_categories", "[", "column", "]", "=", "dict", "(", "labels", "=", "labels", ",", "N", "=", "len", "(", "values", ")", ",", "values", "=", "values", ")", "return", "df" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.data
Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2)
packages/vaex-core/vaex/dataframe.py
def data(self): """Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2) """ class Datas(object): pass datas = Datas() for name, array in self.columns.items(): setattr(datas, name, array) return datas
def data(self): """Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2) """ class Datas(object): pass datas = Datas() for name, array in self.columns.items(): setattr(datas, name, array) return datas
[ "Gives", "direct", "access", "to", "the", "data", "as", "numpy", "arrays", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4619-L4640
[ "def", "data", "(", "self", ")", ":", "class", "Datas", "(", "object", ")", ":", "pass", "datas", "=", "Datas", "(", ")", "for", "name", ",", "array", "in", "self", ".", "columns", ".", "items", "(", ")", ":", "setattr", "(", "datas", ",", "name", ",", "array", ")", "return", "datas" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.shallow_copy
Creates a (shallow) copy of the DataFrame. It will link to the same data, but will have its own state, e.g. virtual columns, variables, selection etc.
packages/vaex-core/vaex/dataframe.py
def shallow_copy(self, virtual=True, variables=True): """Creates a (shallow) copy of the DataFrame. It will link to the same data, but will have its own state, e.g. virtual columns, variables, selection etc. """ df = DataFrameLocal(self.name, self.path, self.column_names) df.columns.update(self.columns) df._length_unfiltered = self._length_unfiltered df._length_original = self._length_original df._index_end = self._index_end df._index_start = self._index_start df._active_fraction = self._active_fraction if virtual: df.virtual_columns.update(self.virtual_columns) if variables: df.variables.update(self.variables) # half shallow/deep copy # for key, value in self.selection_histories.items(): # df.selection_histories[key] = list(value) # for key, value in self.selection_history_indices.items(): # df.selection_history_indices[key] = value return df
def shallow_copy(self, virtual=True, variables=True): """Creates a (shallow) copy of the DataFrame. It will link to the same data, but will have its own state, e.g. virtual columns, variables, selection etc. """ df = DataFrameLocal(self.name, self.path, self.column_names) df.columns.update(self.columns) df._length_unfiltered = self._length_unfiltered df._length_original = self._length_original df._index_end = self._index_end df._index_start = self._index_start df._active_fraction = self._active_fraction if virtual: df.virtual_columns.update(self.virtual_columns) if variables: df.variables.update(self.variables) # half shallow/deep copy # for key, value in self.selection_histories.items(): # df.selection_histories[key] = list(value) # for key, value in self.selection_history_indices.items(): # df.selection_history_indices[key] = value return df
[ "Creates", "a", "(", "shallow", ")", "copy", "of", "the", "DataFrame", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4694-L4716
[ "def", "shallow_copy", "(", "self", ",", "virtual", "=", "True", ",", "variables", "=", "True", ")", ":", "df", "=", "DataFrameLocal", "(", "self", ".", "name", ",", "self", ".", "path", ",", "self", ".", "column_names", ")", "df", ".", "columns", ".", "update", "(", "self", ".", "columns", ")", "df", ".", "_length_unfiltered", "=", "self", ".", "_length_unfiltered", "df", ".", "_length_original", "=", "self", ".", "_length_original", "df", ".", "_index_end", "=", "self", ".", "_index_end", "df", ".", "_index_start", "=", "self", ".", "_index_start", "df", ".", "_active_fraction", "=", "self", ".", "_active_fraction", "if", "virtual", ":", "df", ".", "virtual_columns", ".", "update", "(", "self", ".", "virtual_columns", ")", "if", "variables", ":", "df", ".", "variables", ".", "update", "(", "self", ".", "variables", ")", "# half shallow/deep copy", "# for key, value in self.selection_histories.items():", "# df.selection_histories[key] = list(value)", "# for key, value in self.selection_history_indices.items():", "# df.selection_history_indices[key] = value", "return", "df" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.length
Get the length of the DataFrames, for the selection of the whole DataFrame. If selection is False, it returns len(df). TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.length` :param selection: When True, will return the number of selected rows :return:
packages/vaex-core/vaex/dataframe.py
def length(self, selection=False): """Get the length of the DataFrames, for the selection of the whole DataFrame. If selection is False, it returns len(df). TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.length` :param selection: When True, will return the number of selected rows :return: """ if selection: return 0 if self.mask is None else np.sum(self.mask) else: return len(self)
def length(self, selection=False): """Get the length of the DataFrames, for the selection of the whole DataFrame. If selection is False, it returns len(df). TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.length` :param selection: When True, will return the number of selected rows :return: """ if selection: return 0 if self.mask is None else np.sum(self.mask) else: return len(self)
[ "Get", "the", "length", "of", "the", "DataFrames", "for", "the", "selection", "of", "the", "whole", "DataFrame", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4722-L4735
[ "def", "length", "(", "self", ",", "selection", "=", "False", ")", ":", "if", "selection", ":", "return", "0", "if", "self", ".", "mask", "is", "None", "else", "np", ".", "sum", "(", "self", ".", "mask", ")", "else", ":", "return", "len", "(", "self", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal._hstack
Join the columns of the other DataFrame to this one, assuming the ordering is the same
packages/vaex-core/vaex/dataframe.py
def _hstack(self, other, prefix=None): """Join the columns of the other DataFrame to this one, assuming the ordering is the same""" assert len(self) == len(other), "does not make sense to horizontally stack DataFrames with different lengths" for name in other.get_column_names(): if prefix: new_name = prefix + name else: new_name = name self.add_column(new_name, other.columns[name])
def _hstack(self, other, prefix=None): """Join the columns of the other DataFrame to this one, assuming the ordering is the same""" assert len(self) == len(other), "does not make sense to horizontally stack DataFrames with different lengths" for name in other.get_column_names(): if prefix: new_name = prefix + name else: new_name = name self.add_column(new_name, other.columns[name])
[ "Join", "the", "columns", "of", "the", "other", "DataFrame", "to", "this", "one", "assuming", "the", "ordering", "is", "the", "same" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4767-L4775
[ "def", "_hstack", "(", "self", ",", "other", ",", "prefix", "=", "None", ")", ":", "assert", "len", "(", "self", ")", "==", "len", "(", "other", ")", ",", "\"does not make sense to horizontally stack DataFrames with different lengths\"", "for", "name", "in", "other", ".", "get_column_names", "(", ")", ":", "if", "prefix", ":", "new_name", "=", "prefix", "+", "name", "else", ":", "new_name", "=", "name", "self", ".", "add_column", "(", "new_name", ",", "other", ".", "columns", "[", "name", "]", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.concat
Concatenates two DataFrames, adding the rows of one the other DataFrame to the current, returned in a new DataFrame. No copy of the data is made. :param other: The other DataFrame that is concatenated with this DataFrame :return: New DataFrame with the rows concatenated :rtype: DataFrameConcatenated
packages/vaex-core/vaex/dataframe.py
def concat(self, other): """Concatenates two DataFrames, adding the rows of one the other DataFrame to the current, returned in a new DataFrame. No copy of the data is made. :param other: The other DataFrame that is concatenated with this DataFrame :return: New DataFrame with the rows concatenated :rtype: DataFrameConcatenated """ dfs = [] if isinstance(self, DataFrameConcatenated): dfs.extend(self.dfs) else: dfs.extend([self]) if isinstance(other, DataFrameConcatenated): dfs.extend(other.dfs) else: dfs.extend([other]) return DataFrameConcatenated(dfs)
def concat(self, other): """Concatenates two DataFrames, adding the rows of one the other DataFrame to the current, returned in a new DataFrame. No copy of the data is made. :param other: The other DataFrame that is concatenated with this DataFrame :return: New DataFrame with the rows concatenated :rtype: DataFrameConcatenated """ dfs = [] if isinstance(self, DataFrameConcatenated): dfs.extend(self.dfs) else: dfs.extend([self]) if isinstance(other, DataFrameConcatenated): dfs.extend(other.dfs) else: dfs.extend([other]) return DataFrameConcatenated(dfs)
[ "Concatenates", "two", "DataFrames", "adding", "the", "rows", "of", "one", "the", "other", "DataFrame", "to", "the", "current", "returned", "in", "a", "new", "DataFrame", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4777-L4795
[ "def", "concat", "(", "self", ",", "other", ")", ":", "dfs", "=", "[", "]", "if", "isinstance", "(", "self", ",", "DataFrameConcatenated", ")", ":", "dfs", ".", "extend", "(", "self", ".", "dfs", ")", "else", ":", "dfs", ".", "extend", "(", "[", "self", "]", ")", "if", "isinstance", "(", "other", ",", "DataFrameConcatenated", ")", ":", "dfs", ".", "extend", "(", "other", ".", "dfs", ")", "else", ":", "dfs", ".", "extend", "(", "[", "other", "]", ")", "return", "DataFrameConcatenated", "(", "dfs", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.evaluate
The local implementation of :func:`DataFrame.evaluate`
packages/vaex-core/vaex/dataframe.py
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, internal=False): """The local implementation of :func:`DataFrame.evaluate`""" expression = _ensure_string_from_expression(expression) selection = _ensure_strings_from_expressions(selection) i1 = i1 or 0 i2 = i2 or (len(self) if (self.filtered and filtered) else self.length_unfiltered()) mask = None if self.filtered and filtered: # if we filter, i1:i2 has a different meaning indices = self._filtered_range_to_unfiltered_indices(i1, i2) i1 = indices[0] i2 = indices[-1] + 1 # +1 to make it inclusive # for both a selection or filtering we have a mask if selection is not None or (self.filtered and filtered): mask = self.evaluate_selection_mask(selection, i1, i2) scope = scopes._BlockScope(self, i1, i2, mask=mask, **self.variables) # value = value[mask] if out is not None: scope.buffers[expression] = out value = scope.evaluate(expression) if isinstance(value, ColumnString) and not internal: value = value.to_numpy() return value
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, internal=False): """The local implementation of :func:`DataFrame.evaluate`""" expression = _ensure_string_from_expression(expression) selection = _ensure_strings_from_expressions(selection) i1 = i1 or 0 i2 = i2 or (len(self) if (self.filtered and filtered) else self.length_unfiltered()) mask = None if self.filtered and filtered: # if we filter, i1:i2 has a different meaning indices = self._filtered_range_to_unfiltered_indices(i1, i2) i1 = indices[0] i2 = indices[-1] + 1 # +1 to make it inclusive # for both a selection or filtering we have a mask if selection is not None or (self.filtered and filtered): mask = self.evaluate_selection_mask(selection, i1, i2) scope = scopes._BlockScope(self, i1, i2, mask=mask, **self.variables) # value = value[mask] if out is not None: scope.buffers[expression] = out value = scope.evaluate(expression) if isinstance(value, ColumnString) and not internal: value = value.to_numpy() return value
[ "The", "local", "implementation", "of", ":", "func", ":", "DataFrame", ".", "evaluate" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4839-L4861
[ "def", "evaluate", "(", "self", ",", "expression", ",", "i1", "=", "None", ",", "i2", "=", "None", ",", "out", "=", "None", ",", "selection", "=", "None", ",", "filtered", "=", "True", ",", "internal", "=", "False", ")", ":", "expression", "=", "_ensure_string_from_expression", "(", "expression", ")", "selection", "=", "_ensure_strings_from_expressions", "(", "selection", ")", "i1", "=", "i1", "or", "0", "i2", "=", "i2", "or", "(", "len", "(", "self", ")", "if", "(", "self", ".", "filtered", "and", "filtered", ")", "else", "self", ".", "length_unfiltered", "(", ")", ")", "mask", "=", "None", "if", "self", ".", "filtered", "and", "filtered", ":", "# if we filter, i1:i2 has a different meaning", "indices", "=", "self", ".", "_filtered_range_to_unfiltered_indices", "(", "i1", ",", "i2", ")", "i1", "=", "indices", "[", "0", "]", "i2", "=", "indices", "[", "-", "1", "]", "+", "1", "# +1 to make it inclusive", "# for both a selection or filtering we have a mask", "if", "selection", "is", "not", "None", "or", "(", "self", ".", "filtered", "and", "filtered", ")", ":", "mask", "=", "self", ".", "evaluate_selection_mask", "(", "selection", ",", "i1", ",", "i2", ")", "scope", "=", "scopes", ".", "_BlockScope", "(", "self", ",", "i1", ",", "i2", ",", "mask", "=", "mask", ",", "*", "*", "self", ".", "variables", ")", "# value = value[mask]", "if", "out", "is", "not", "None", ":", "scope", ".", "buffers", "[", "expression", "]", "=", "out", "value", "=", "scope", ".", "evaluate", "(", "expression", ")", "if", "isinstance", "(", "value", ",", "ColumnString", ")", "and", "not", "internal", ":", "value", "=", "value", ".", "to_numpy", "(", ")", "return", "value" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.compare
Compare two DataFrames and report their difference, use with care for large DataFrames
packages/vaex-core/vaex/dataframe.py
def compare(self, other, report_missing=True, report_difference=False, show=10, orderby=None, column_names=None): """Compare two DataFrames and report their difference, use with care for large DataFrames""" if column_names is None: column_names = self.get_column_names(virtual=False) for other_column_name in other.get_column_names(virtual=False): if other_column_name not in column_names: column_names.append(other_column_name) different_values = [] missing = [] type_mismatch = [] meta_mismatch = [] assert len(self) == len(other) if orderby: index1 = np.argsort(self.columns[orderby]) index2 = np.argsort(other.columns[orderby]) for column_name in column_names: if column_name not in self.get_column_names(virtual=False): missing.append(column_name) if report_missing: print("%s missing from this DataFrame" % column_name) elif column_name not in other.get_column_names(virtual=False): missing.append(column_name) if report_missing: print("%s missing from other DataFrame" % column_name) else: ucd1 = self.ucds.get(column_name) ucd2 = other.ucds.get(column_name) if ucd1 != ucd2: print("ucd mismatch : %r vs %r for %s" % (ucd1, ucd2, column_name)) meta_mismatch.append(column_name) unit1 = self.units.get(column_name) unit2 = other.units.get(column_name) if unit1 != unit2: print("unit mismatch : %r vs %r for %s" % (unit1, unit2, column_name)) meta_mismatch.append(column_name) type1 = self.dtype(column_name) if type1 != str_type: type1 = type1.type type2 = other.dtype(column_name) if type2 != str_type: type2 = type2.type if type1 != type2: print("different dtypes: %s vs %s for %s" % (self.dtype(column_name), other.dtype(column_name), column_name)) type_mismatch.append(column_name) else: # a = self.columns[column_name] # b = other.columns[column_name] # if self.filtered: # a = a[self.evaluate_selection_mask(None)] # if other.filtered: # b = b[other.evaluate_selection_mask(None)] a = self.evaluate(column_name) b = other.evaluate(column_name) if orderby: a = a[index1] b = b[index2] def normalize(ar): if ar.dtype == str_type: return ar if ar.dtype.kind == "f" and hasattr(ar, "mask"): mask = ar.mask ar = ar.copy() ar[mask] = np.nan if ar.dtype.kind in "SU": if hasattr(ar, "mask"): data = ar.data else: data = ar values = [value.strip() for value in data.tolist()] if hasattr(ar, "mask"): ar = np.ma.masked_array(values, ar.mask) else: ar = np.array(values) return ar def equal_mask(a, b): a = normalize(a) b = normalize(b) boolean_mask = (a == b) if self.dtype(column_name) != str_type and self.dtype(column_name).kind == 'f': # floats with nan won't equal itself, i.e. NaN != NaN boolean_mask |= (np.isnan(a) & np.isnan(b)) return boolean_mask boolean_mask = equal_mask(a, b) all_equal = np.all(boolean_mask) if not all_equal: count = np.sum(~boolean_mask) print("%s does not match for both DataFrames, %d rows are diffent out of %d" % (column_name, count, len(self))) different_values.append(column_name) if report_difference: indices = np.arange(len(self))[~boolean_mask] values1 = self.columns[column_name][:][~boolean_mask] values2 = other.columns[column_name][:][~boolean_mask] print("\tshowing difference for the first 10") for i in range(min(len(values1), show)): try: diff = values1[i] - values2[i] except: diff = "does not exists" print("%s[%d] == %s != %s other.%s[%d] (diff = %s)" % (column_name, indices[i], values1[i], values2[i], column_name, indices[i], diff)) return different_values, missing, type_mismatch, meta_mismatch
def compare(self, other, report_missing=True, report_difference=False, show=10, orderby=None, column_names=None): """Compare two DataFrames and report their difference, use with care for large DataFrames""" if column_names is None: column_names = self.get_column_names(virtual=False) for other_column_name in other.get_column_names(virtual=False): if other_column_name not in column_names: column_names.append(other_column_name) different_values = [] missing = [] type_mismatch = [] meta_mismatch = [] assert len(self) == len(other) if orderby: index1 = np.argsort(self.columns[orderby]) index2 = np.argsort(other.columns[orderby]) for column_name in column_names: if column_name not in self.get_column_names(virtual=False): missing.append(column_name) if report_missing: print("%s missing from this DataFrame" % column_name) elif column_name not in other.get_column_names(virtual=False): missing.append(column_name) if report_missing: print("%s missing from other DataFrame" % column_name) else: ucd1 = self.ucds.get(column_name) ucd2 = other.ucds.get(column_name) if ucd1 != ucd2: print("ucd mismatch : %r vs %r for %s" % (ucd1, ucd2, column_name)) meta_mismatch.append(column_name) unit1 = self.units.get(column_name) unit2 = other.units.get(column_name) if unit1 != unit2: print("unit mismatch : %r vs %r for %s" % (unit1, unit2, column_name)) meta_mismatch.append(column_name) type1 = self.dtype(column_name) if type1 != str_type: type1 = type1.type type2 = other.dtype(column_name) if type2 != str_type: type2 = type2.type if type1 != type2: print("different dtypes: %s vs %s for %s" % (self.dtype(column_name), other.dtype(column_name), column_name)) type_mismatch.append(column_name) else: # a = self.columns[column_name] # b = other.columns[column_name] # if self.filtered: # a = a[self.evaluate_selection_mask(None)] # if other.filtered: # b = b[other.evaluate_selection_mask(None)] a = self.evaluate(column_name) b = other.evaluate(column_name) if orderby: a = a[index1] b = b[index2] def normalize(ar): if ar.dtype == str_type: return ar if ar.dtype.kind == "f" and hasattr(ar, "mask"): mask = ar.mask ar = ar.copy() ar[mask] = np.nan if ar.dtype.kind in "SU": if hasattr(ar, "mask"): data = ar.data else: data = ar values = [value.strip() for value in data.tolist()] if hasattr(ar, "mask"): ar = np.ma.masked_array(values, ar.mask) else: ar = np.array(values) return ar def equal_mask(a, b): a = normalize(a) b = normalize(b) boolean_mask = (a == b) if self.dtype(column_name) != str_type and self.dtype(column_name).kind == 'f': # floats with nan won't equal itself, i.e. NaN != NaN boolean_mask |= (np.isnan(a) & np.isnan(b)) return boolean_mask boolean_mask = equal_mask(a, b) all_equal = np.all(boolean_mask) if not all_equal: count = np.sum(~boolean_mask) print("%s does not match for both DataFrames, %d rows are diffent out of %d" % (column_name, count, len(self))) different_values.append(column_name) if report_difference: indices = np.arange(len(self))[~boolean_mask] values1 = self.columns[column_name][:][~boolean_mask] values2 = other.columns[column_name][:][~boolean_mask] print("\tshowing difference for the first 10") for i in range(min(len(values1), show)): try: diff = values1[i] - values2[i] except: diff = "does not exists" print("%s[%d] == %s != %s other.%s[%d] (diff = %s)" % (column_name, indices[i], values1[i], values2[i], column_name, indices[i], diff)) return different_values, missing, type_mismatch, meta_mismatch
[ "Compare", "two", "DataFrames", "and", "report", "their", "difference", "use", "with", "care", "for", "large", "DataFrames" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4867-L4967
[ "def", "compare", "(", "self", ",", "other", ",", "report_missing", "=", "True", ",", "report_difference", "=", "False", ",", "show", "=", "10", ",", "orderby", "=", "None", ",", "column_names", "=", "None", ")", ":", "if", "column_names", "is", "None", ":", "column_names", "=", "self", ".", "get_column_names", "(", "virtual", "=", "False", ")", "for", "other_column_name", "in", "other", ".", "get_column_names", "(", "virtual", "=", "False", ")", ":", "if", "other_column_name", "not", "in", "column_names", ":", "column_names", ".", "append", "(", "other_column_name", ")", "different_values", "=", "[", "]", "missing", "=", "[", "]", "type_mismatch", "=", "[", "]", "meta_mismatch", "=", "[", "]", "assert", "len", "(", "self", ")", "==", "len", "(", "other", ")", "if", "orderby", ":", "index1", "=", "np", ".", "argsort", "(", "self", ".", "columns", "[", "orderby", "]", ")", "index2", "=", "np", ".", "argsort", "(", "other", ".", "columns", "[", "orderby", "]", ")", "for", "column_name", "in", "column_names", ":", "if", "column_name", "not", "in", "self", ".", "get_column_names", "(", "virtual", "=", "False", ")", ":", "missing", ".", "append", "(", "column_name", ")", "if", "report_missing", ":", "print", "(", "\"%s missing from this DataFrame\"", "%", "column_name", ")", "elif", "column_name", "not", "in", "other", ".", "get_column_names", "(", "virtual", "=", "False", ")", ":", "missing", ".", "append", "(", "column_name", ")", "if", "report_missing", ":", "print", "(", "\"%s missing from other DataFrame\"", "%", "column_name", ")", "else", ":", "ucd1", "=", "self", ".", "ucds", ".", "get", "(", "column_name", ")", "ucd2", "=", "other", ".", "ucds", ".", "get", "(", "column_name", ")", "if", "ucd1", "!=", "ucd2", ":", "print", "(", "\"ucd mismatch : %r vs %r for %s\"", "%", "(", "ucd1", ",", "ucd2", ",", "column_name", ")", ")", "meta_mismatch", ".", "append", "(", "column_name", ")", "unit1", "=", "self", ".", "units", ".", "get", "(", "column_name", ")", "unit2", "=", "other", ".", "units", ".", "get", "(", "column_name", ")", "if", "unit1", "!=", "unit2", ":", "print", "(", "\"unit mismatch : %r vs %r for %s\"", "%", "(", "unit1", ",", "unit2", ",", "column_name", ")", ")", "meta_mismatch", ".", "append", "(", "column_name", ")", "type1", "=", "self", ".", "dtype", "(", "column_name", ")", "if", "type1", "!=", "str_type", ":", "type1", "=", "type1", ".", "type", "type2", "=", "other", ".", "dtype", "(", "column_name", ")", "if", "type2", "!=", "str_type", ":", "type2", "=", "type2", ".", "type", "if", "type1", "!=", "type2", ":", "print", "(", "\"different dtypes: %s vs %s for %s\"", "%", "(", "self", ".", "dtype", "(", "column_name", ")", ",", "other", ".", "dtype", "(", "column_name", ")", ",", "column_name", ")", ")", "type_mismatch", ".", "append", "(", "column_name", ")", "else", ":", "# a = self.columns[column_name]", "# b = other.columns[column_name]", "# if self.filtered:", "# a = a[self.evaluate_selection_mask(None)]", "# if other.filtered:", "# b = b[other.evaluate_selection_mask(None)]", "a", "=", "self", ".", "evaluate", "(", "column_name", ")", "b", "=", "other", ".", "evaluate", "(", "column_name", ")", "if", "orderby", ":", "a", "=", "a", "[", "index1", "]", "b", "=", "b", "[", "index2", "]", "def", "normalize", "(", "ar", ")", ":", "if", "ar", ".", "dtype", "==", "str_type", ":", "return", "ar", "if", "ar", ".", "dtype", ".", "kind", "==", "\"f\"", "and", "hasattr", "(", "ar", ",", "\"mask\"", ")", ":", "mask", "=", "ar", ".", "mask", "ar", "=", "ar", ".", "copy", "(", ")", "ar", "[", "mask", "]", "=", "np", ".", "nan", "if", "ar", ".", "dtype", ".", "kind", "in", "\"SU\"", ":", "if", "hasattr", "(", "ar", ",", "\"mask\"", ")", ":", "data", "=", "ar", ".", "data", "else", ":", "data", "=", "ar", "values", "=", "[", "value", ".", "strip", "(", ")", "for", "value", "in", "data", ".", "tolist", "(", ")", "]", "if", "hasattr", "(", "ar", ",", "\"mask\"", ")", ":", "ar", "=", "np", ".", "ma", ".", "masked_array", "(", "values", ",", "ar", ".", "mask", ")", "else", ":", "ar", "=", "np", ".", "array", "(", "values", ")", "return", "ar", "def", "equal_mask", "(", "a", ",", "b", ")", ":", "a", "=", "normalize", "(", "a", ")", "b", "=", "normalize", "(", "b", ")", "boolean_mask", "=", "(", "a", "==", "b", ")", "if", "self", ".", "dtype", "(", "column_name", ")", "!=", "str_type", "and", "self", ".", "dtype", "(", "column_name", ")", ".", "kind", "==", "'f'", ":", "# floats with nan won't equal itself, i.e. NaN != NaN", "boolean_mask", "|=", "(", "np", ".", "isnan", "(", "a", ")", "&", "np", ".", "isnan", "(", "b", ")", ")", "return", "boolean_mask", "boolean_mask", "=", "equal_mask", "(", "a", ",", "b", ")", "all_equal", "=", "np", ".", "all", "(", "boolean_mask", ")", "if", "not", "all_equal", ":", "count", "=", "np", ".", "sum", "(", "~", "boolean_mask", ")", "print", "(", "\"%s does not match for both DataFrames, %d rows are diffent out of %d\"", "%", "(", "column_name", ",", "count", ",", "len", "(", "self", ")", ")", ")", "different_values", ".", "append", "(", "column_name", ")", "if", "report_difference", ":", "indices", "=", "np", ".", "arange", "(", "len", "(", "self", ")", ")", "[", "~", "boolean_mask", "]", "values1", "=", "self", ".", "columns", "[", "column_name", "]", "[", ":", "]", "[", "~", "boolean_mask", "]", "values2", "=", "other", ".", "columns", "[", "column_name", "]", "[", ":", "]", "[", "~", "boolean_mask", "]", "print", "(", "\"\\tshowing difference for the first 10\"", ")", "for", "i", "in", "range", "(", "min", "(", "len", "(", "values1", ")", ",", "show", ")", ")", ":", "try", ":", "diff", "=", "values1", "[", "i", "]", "-", "values2", "[", "i", "]", "except", ":", "diff", "=", "\"does not exists\"", "print", "(", "\"%s[%d] == %s != %s other.%s[%d] (diff = %s)\"", "%", "(", "column_name", ",", "indices", "[", "i", "]", ",", "values1", "[", "i", "]", ",", "values2", "[", "i", "]", ",", "column_name", ",", "indices", "[", "i", "]", ",", "diff", ")", ")", "return", "different_values", ",", "missing", ",", "type_mismatch", ",", "meta_mismatch" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.join
Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit row index). Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running :func:`DataFrame.extract` first. Example: >>> a = np.array(['a', 'b', 'c']) >>> x = np.arange(1,4) >>> ds1 = vaex.from_arrays(a=a, x=x) >>> b = np.array(['a', 'b', 'd']) >>> y = x**2 >>> ds2 = vaex.from_arrays(b=b, y=y) >>> ds1.join(ds2, left_on='a', right_on='b') :param other: Other DataFrame to join with (the right side) :param on: default key for the left table (self) :param left_on: key for the left table (self), overrides on :param right_on: default key for the right table (other), overrides on :param lsuffix: suffix to add to the left column names in case of a name collision :param rsuffix: similar for the right :param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values) 'right' is similar with self and other swapped. :param inplace: {inplace} :return:
packages/vaex-core/vaex/dataframe.py
def join(self, other, on=None, left_on=None, right_on=None, lsuffix='', rsuffix='', how='left', inplace=False): """Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit row index). Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running :func:`DataFrame.extract` first. Example: >>> a = np.array(['a', 'b', 'c']) >>> x = np.arange(1,4) >>> ds1 = vaex.from_arrays(a=a, x=x) >>> b = np.array(['a', 'b', 'd']) >>> y = x**2 >>> ds2 = vaex.from_arrays(b=b, y=y) >>> ds1.join(ds2, left_on='a', right_on='b') :param other: Other DataFrame to join with (the right side) :param on: default key for the left table (self) :param left_on: key for the left table (self), overrides on :param right_on: default key for the right table (other), overrides on :param lsuffix: suffix to add to the left column names in case of a name collision :param rsuffix: similar for the right :param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values) 'right' is similar with self and other swapped. :param inplace: {inplace} :return: """ ds = self if inplace else self.copy() if how == 'left': left = ds right = other elif how == 'right': left = other right = ds lsuffix, rsuffix = rsuffix, lsuffix left_on, right_on = right_on, left_on else: raise ValueError('join type not supported: {}, only left and right'.format(how)) for name in right: if name in left and name + rsuffix == name + lsuffix: raise ValueError('column name collision: {} exists in both column, and no proper suffix given' .format(name)) right = right.extract() # get rid of filters and active_range assert left.length_unfiltered() == left.length_original() N = left.length_unfiltered() N_other = len(right) left_on = left_on or on right_on = right_on or on if left_on is None and right_on is None: for name in right: right_name = name if name in left: left.rename_column(name, name + lsuffix) right_name = name + rsuffix if name in right.virtual_columns: left.add_virtual_column(right_name, right.virtual_columns[name]) else: left.add_column(right_name, right.columns[name]) else: left_values = left.evaluate(left_on, filtered=False) right_values = right.evaluate(right_on) # maps from the left_values to row # if np.ma.isMaskedArray(left_values): mask = ~left_values.mask left_values = left_values.data index_left = dict(zip(left_values[mask], np.arange(N)[mask])) else: index_left = dict(zip(left_values, np.arange(N))) # idem for right if np.ma.isMaskedArray(right_values): mask = ~right_values.mask right_values = right_values.data index_other = dict(zip(right_values[mask], np.arange(N_other)[mask])) else: index_other = dict(zip(right_values, np.arange(N_other))) # we do a left join, find all rows of the right DataFrame # that has an entry on the left # for each row in the right # find which row it needs to go to in the right # from_indices = np.zeros(N_other, dtype=np.int64) # row # of right # to_indices = np.zeros(N_other, dtype=np.int64) # goes to row # on the left # keep a boolean mask of which rows are found left_mask = np.ones(N, dtype=np.bool) # and which row they point to in the right left_row_to_right = np.zeros(N, dtype=np.int64) - 1 for i in range(N_other): left_row = index_left.get(right_values[i]) if left_row is not None: left_mask[left_row] = False # unmask, it exists left_row_to_right[left_row] = i lookup = np.ma.array(left_row_to_right, mask=left_mask) for name in right: right_name = name if name in left: left.rename_column(name, name + lsuffix) right_name = name + rsuffix if name in right.virtual_columns: left.add_virtual_column(right_name, right.virtual_columns[name]) else: left.add_column(right_name, ColumnIndexed(right, lookup, name)) return left
def join(self, other, on=None, left_on=None, right_on=None, lsuffix='', rsuffix='', how='left', inplace=False): """Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit row index). Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running :func:`DataFrame.extract` first. Example: >>> a = np.array(['a', 'b', 'c']) >>> x = np.arange(1,4) >>> ds1 = vaex.from_arrays(a=a, x=x) >>> b = np.array(['a', 'b', 'd']) >>> y = x**2 >>> ds2 = vaex.from_arrays(b=b, y=y) >>> ds1.join(ds2, left_on='a', right_on='b') :param other: Other DataFrame to join with (the right side) :param on: default key for the left table (self) :param left_on: key for the left table (self), overrides on :param right_on: default key for the right table (other), overrides on :param lsuffix: suffix to add to the left column names in case of a name collision :param rsuffix: similar for the right :param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values) 'right' is similar with self and other swapped. :param inplace: {inplace} :return: """ ds = self if inplace else self.copy() if how == 'left': left = ds right = other elif how == 'right': left = other right = ds lsuffix, rsuffix = rsuffix, lsuffix left_on, right_on = right_on, left_on else: raise ValueError('join type not supported: {}, only left and right'.format(how)) for name in right: if name in left and name + rsuffix == name + lsuffix: raise ValueError('column name collision: {} exists in both column, and no proper suffix given' .format(name)) right = right.extract() # get rid of filters and active_range assert left.length_unfiltered() == left.length_original() N = left.length_unfiltered() N_other = len(right) left_on = left_on or on right_on = right_on or on if left_on is None and right_on is None: for name in right: right_name = name if name in left: left.rename_column(name, name + lsuffix) right_name = name + rsuffix if name in right.virtual_columns: left.add_virtual_column(right_name, right.virtual_columns[name]) else: left.add_column(right_name, right.columns[name]) else: left_values = left.evaluate(left_on, filtered=False) right_values = right.evaluate(right_on) # maps from the left_values to row # if np.ma.isMaskedArray(left_values): mask = ~left_values.mask left_values = left_values.data index_left = dict(zip(left_values[mask], np.arange(N)[mask])) else: index_left = dict(zip(left_values, np.arange(N))) # idem for right if np.ma.isMaskedArray(right_values): mask = ~right_values.mask right_values = right_values.data index_other = dict(zip(right_values[mask], np.arange(N_other)[mask])) else: index_other = dict(zip(right_values, np.arange(N_other))) # we do a left join, find all rows of the right DataFrame # that has an entry on the left # for each row in the right # find which row it needs to go to in the right # from_indices = np.zeros(N_other, dtype=np.int64) # row # of right # to_indices = np.zeros(N_other, dtype=np.int64) # goes to row # on the left # keep a boolean mask of which rows are found left_mask = np.ones(N, dtype=np.bool) # and which row they point to in the right left_row_to_right = np.zeros(N, dtype=np.int64) - 1 for i in range(N_other): left_row = index_left.get(right_values[i]) if left_row is not None: left_mask[left_row] = False # unmask, it exists left_row_to_right[left_row] = i lookup = np.ma.array(left_row_to_right, mask=left_mask) for name in right: right_name = name if name in left: left.rename_column(name, name + lsuffix) right_name = name + rsuffix if name in right.virtual_columns: left.add_virtual_column(right_name, right.virtual_columns[name]) else: left.add_column(right_name, ColumnIndexed(right, lookup, name)) return left
[ "Return", "a", "DataFrame", "joined", "with", "other", "DataFrames", "matched", "by", "columns", "/", "expression", "on", "/", "left_on", "/", "right_on" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L4970-L5078
[ "def", "join", "(", "self", ",", "other", ",", "on", "=", "None", ",", "left_on", "=", "None", ",", "right_on", "=", "None", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "''", ",", "how", "=", "'left'", ",", "inplace", "=", "False", ")", ":", "ds", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "if", "how", "==", "'left'", ":", "left", "=", "ds", "right", "=", "other", "elif", "how", "==", "'right'", ":", "left", "=", "other", "right", "=", "ds", "lsuffix", ",", "rsuffix", "=", "rsuffix", ",", "lsuffix", "left_on", ",", "right_on", "=", "right_on", ",", "left_on", "else", ":", "raise", "ValueError", "(", "'join type not supported: {}, only left and right'", ".", "format", "(", "how", ")", ")", "for", "name", "in", "right", ":", "if", "name", "in", "left", "and", "name", "+", "rsuffix", "==", "name", "+", "lsuffix", ":", "raise", "ValueError", "(", "'column name collision: {} exists in both column, and no proper suffix given'", ".", "format", "(", "name", ")", ")", "right", "=", "right", ".", "extract", "(", ")", "# get rid of filters and active_range", "assert", "left", ".", "length_unfiltered", "(", ")", "==", "left", ".", "length_original", "(", ")", "N", "=", "left", ".", "length_unfiltered", "(", ")", "N_other", "=", "len", "(", "right", ")", "left_on", "=", "left_on", "or", "on", "right_on", "=", "right_on", "or", "on", "if", "left_on", "is", "None", "and", "right_on", "is", "None", ":", "for", "name", "in", "right", ":", "right_name", "=", "name", "if", "name", "in", "left", ":", "left", ".", "rename_column", "(", "name", ",", "name", "+", "lsuffix", ")", "right_name", "=", "name", "+", "rsuffix", "if", "name", "in", "right", ".", "virtual_columns", ":", "left", ".", "add_virtual_column", "(", "right_name", ",", "right", ".", "virtual_columns", "[", "name", "]", ")", "else", ":", "left", ".", "add_column", "(", "right_name", ",", "right", ".", "columns", "[", "name", "]", ")", "else", ":", "left_values", "=", "left", ".", "evaluate", "(", "left_on", ",", "filtered", "=", "False", ")", "right_values", "=", "right", ".", "evaluate", "(", "right_on", ")", "# maps from the left_values to row #", "if", "np", ".", "ma", ".", "isMaskedArray", "(", "left_values", ")", ":", "mask", "=", "~", "left_values", ".", "mask", "left_values", "=", "left_values", ".", "data", "index_left", "=", "dict", "(", "zip", "(", "left_values", "[", "mask", "]", ",", "np", ".", "arange", "(", "N", ")", "[", "mask", "]", ")", ")", "else", ":", "index_left", "=", "dict", "(", "zip", "(", "left_values", ",", "np", ".", "arange", "(", "N", ")", ")", ")", "# idem for right", "if", "np", ".", "ma", ".", "isMaskedArray", "(", "right_values", ")", ":", "mask", "=", "~", "right_values", ".", "mask", "right_values", "=", "right_values", ".", "data", "index_other", "=", "dict", "(", "zip", "(", "right_values", "[", "mask", "]", ",", "np", ".", "arange", "(", "N_other", ")", "[", "mask", "]", ")", ")", "else", ":", "index_other", "=", "dict", "(", "zip", "(", "right_values", ",", "np", ".", "arange", "(", "N_other", ")", ")", ")", "# we do a left join, find all rows of the right DataFrame", "# that has an entry on the left", "# for each row in the right", "# find which row it needs to go to in the right", "# from_indices = np.zeros(N_other, dtype=np.int64) # row # of right", "# to_indices = np.zeros(N_other, dtype=np.int64) # goes to row # on the left", "# keep a boolean mask of which rows are found", "left_mask", "=", "np", ".", "ones", "(", "N", ",", "dtype", "=", "np", ".", "bool", ")", "# and which row they point to in the right", "left_row_to_right", "=", "np", ".", "zeros", "(", "N", ",", "dtype", "=", "np", ".", "int64", ")", "-", "1", "for", "i", "in", "range", "(", "N_other", ")", ":", "left_row", "=", "index_left", ".", "get", "(", "right_values", "[", "i", "]", ")", "if", "left_row", "is", "not", "None", ":", "left_mask", "[", "left_row", "]", "=", "False", "# unmask, it exists", "left_row_to_right", "[", "left_row", "]", "=", "i", "lookup", "=", "np", ".", "ma", ".", "array", "(", "left_row_to_right", ",", "mask", "=", "left_mask", ")", "for", "name", "in", "right", ":", "right_name", "=", "name", "if", "name", "in", "left", ":", "left", ".", "rename_column", "(", "name", ",", "name", "+", "lsuffix", ")", "right_name", "=", "name", "+", "rsuffix", "if", "name", "in", "right", ".", "virtual_columns", ":", "left", ".", "add_virtual_column", "(", "right_name", ",", "right", ".", "virtual_columns", "[", "name", "]", ")", "else", ":", "left", ".", "add_column", "(", "right_name", ",", "ColumnIndexed", "(", "right", ",", "lookup", ",", "name", ")", ")", "return", "left" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.export
Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return:
packages/vaex-core/vaex/dataframe.py
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ if path.endswith('.arrow'): self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.hdf5'): self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.fits'): self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) if path.endswith('.parquet'): self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ if path.endswith('.arrow'): self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.hdf5'): self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.fits'): self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) if path.endswith('.parquet'): self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
[ "Exports", "the", "DataFrame", "to", "a", "file", "written", "with", "arrow" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5080-L5103
[ "def", "export", "(", "self", ",", "path", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "False", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "if", "path", ".", "endswith", "(", "'.arrow'", ")", ":", "self", ".", "export_arrow", "(", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "elif", "path", ".", "endswith", "(", "'.hdf5'", ")", ":", "self", ".", "export_hdf5", "(", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "elif", "path", ".", "endswith", "(", "'.fits'", ")", ":", "self", ".", "export_fits", "(", "path", ",", "column_names", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "if", "path", ".", "endswith", "(", "'.parquet'", ")", ":", "self", ".", "export_parquet", "(", "path", ",", "column_names", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.export_arrow
Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return:
packages/vaex-core/vaex/dataframe.py
def export_arrow(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ import vaex_arrow.export vaex_arrow.export.export(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export_arrow(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ import vaex_arrow.export vaex_arrow.export.export(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
[ "Exports", "the", "DataFrame", "to", "a", "file", "written", "with", "arrow" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5105-L5122
[ "def", "export_arrow", "(", "self", ",", "path", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "False", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "import", "vaex_arrow", ".", "export", "vaex_arrow", ".", "export", ".", "export", "(", "self", ",", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.export_hdf5
Exports the DataFrame to a vaex hdf5 file :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return:
packages/vaex-core/vaex/dataframe.py
def export_hdf5(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a vaex hdf5 file :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ import vaex.export vaex.export.export_hdf5(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export_hdf5(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a vaex hdf5 file :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ import vaex.export vaex.export.export_hdf5(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
[ "Exports", "the", "DataFrame", "to", "a", "vaex", "hdf5", "file" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5143-L5160
[ "def", "export_hdf5", "(", "self", ",", "path", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "False", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "import", "vaex", ".", "export", "vaex", ".", "export", ".", "export_hdf5", "(", "self", ",", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.groupby
Return a :class:`GroupBy` or :class:`DataFrame` object when agg is not None Examples: >>> import vaex >>> import numpy as np >>> np.random.seed(42) >>> x = np.random.randint(1, 5, 10) >>> y = x**2 >>> df = vaex.from_arrays(x=x, y=y) >>> df.groupby(df.x, agg='count') # x y_count 0 3 4 1 4 2 2 1 3 3 2 1 >>> df.groupby(df.x, agg=[vaex.agg.count('y'), vaex.agg.mean('y')]) # x y_count y_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 >>> df.groupby(df.x, agg={'z': [vaex.agg.count('y'), vaex.agg.mean('y')]}) # x z_count z_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 Example using datetime: >>> import vaex >>> import numpy as np >>> t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64) >>> y = np.arange(len(t)) >>> df = vaex.from_arrays(t=t, y=y) >>> df.groupby(vaex.BinnerTime.per_week(df.t)).agg({'y' : 'sum'}) # t y 0 2015-01-01 00:00:00 21 1 2015-01-08 00:00:00 70 2 2015-01-15 00:00:00 119 3 2015-01-22 00:00:00 168 4 2015-01-29 00:00:00 87 :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the groupby object. :return: :class:`DataFrame` or :class:`GroupBy` object.
packages/vaex-core/vaex/dataframe.py
def groupby(self, by=None, agg=None): """Return a :class:`GroupBy` or :class:`DataFrame` object when agg is not None Examples: >>> import vaex >>> import numpy as np >>> np.random.seed(42) >>> x = np.random.randint(1, 5, 10) >>> y = x**2 >>> df = vaex.from_arrays(x=x, y=y) >>> df.groupby(df.x, agg='count') # x y_count 0 3 4 1 4 2 2 1 3 3 2 1 >>> df.groupby(df.x, agg=[vaex.agg.count('y'), vaex.agg.mean('y')]) # x y_count y_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 >>> df.groupby(df.x, agg={'z': [vaex.agg.count('y'), vaex.agg.mean('y')]}) # x z_count z_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 Example using datetime: >>> import vaex >>> import numpy as np >>> t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64) >>> y = np.arange(len(t)) >>> df = vaex.from_arrays(t=t, y=y) >>> df.groupby(vaex.BinnerTime.per_week(df.t)).agg({'y' : 'sum'}) # t y 0 2015-01-01 00:00:00 21 1 2015-01-08 00:00:00 70 2 2015-01-15 00:00:00 119 3 2015-01-22 00:00:00 168 4 2015-01-29 00:00:00 87 :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the groupby object. :return: :class:`DataFrame` or :class:`GroupBy` object. """ from .groupby import GroupBy groupby = GroupBy(self, by=by) if agg is None: return groupby else: return groupby.agg(agg)
def groupby(self, by=None, agg=None): """Return a :class:`GroupBy` or :class:`DataFrame` object when agg is not None Examples: >>> import vaex >>> import numpy as np >>> np.random.seed(42) >>> x = np.random.randint(1, 5, 10) >>> y = x**2 >>> df = vaex.from_arrays(x=x, y=y) >>> df.groupby(df.x, agg='count') # x y_count 0 3 4 1 4 2 2 1 3 3 2 1 >>> df.groupby(df.x, agg=[vaex.agg.count('y'), vaex.agg.mean('y')]) # x y_count y_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 >>> df.groupby(df.x, agg={'z': [vaex.agg.count('y'), vaex.agg.mean('y')]}) # x z_count z_mean 0 3 4 9 1 4 2 16 2 1 3 1 3 2 1 4 Example using datetime: >>> import vaex >>> import numpy as np >>> t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64) >>> y = np.arange(len(t)) >>> df = vaex.from_arrays(t=t, y=y) >>> df.groupby(vaex.BinnerTime.per_week(df.t)).agg({'y' : 'sum'}) # t y 0 2015-01-01 00:00:00 21 1 2015-01-08 00:00:00 70 2 2015-01-15 00:00:00 119 3 2015-01-22 00:00:00 168 4 2015-01-29 00:00:00 87 :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the groupby object. :return: :class:`DataFrame` or :class:`GroupBy` object. """ from .groupby import GroupBy groupby = GroupBy(self, by=by) if agg is None: return groupby else: return groupby.agg(agg)
[ "Return", "a", ":", "class", ":", "GroupBy", "or", ":", "class", ":", "DataFrame", "object", "when", "agg", "is", "not", "None" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5202-L5258
[ "def", "groupby", "(", "self", ",", "by", "=", "None", ",", "agg", "=", "None", ")", ":", "from", ".", "groupby", "import", "GroupBy", "groupby", "=", "GroupBy", "(", "self", ",", "by", "=", "by", ")", "if", "agg", "is", "None", ":", "return", "groupby", "else", ":", "return", "groupby", ".", "agg", "(", "agg", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameLocal.binby
Return a :class:`BinBy` or :class:`DataArray` object when agg is not None The binby operations does not return a 'flat' DataFrame, instead it returns an N-d grid in the form of an xarray. :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the binby object. :return: :class:`DataArray` or :class:`BinBy` object.
packages/vaex-core/vaex/dataframe.py
def binby(self, by=None, agg=None): """Return a :class:`BinBy` or :class:`DataArray` object when agg is not None The binby operations does not return a 'flat' DataFrame, instead it returns an N-d grid in the form of an xarray. :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the binby object. :return: :class:`DataArray` or :class:`BinBy` object. """ from .groupby import BinBy binby = BinBy(self, by=by) if agg is None: return binby else: return binby.agg(agg)
def binby(self, by=None, agg=None): """Return a :class:`BinBy` or :class:`DataArray` object when agg is not None The binby operations does not return a 'flat' DataFrame, instead it returns an N-d grid in the form of an xarray. :param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary where the keys indicate the target column names, and the values the operations, or the a list of aggregates. When not given, it will return the binby object. :return: :class:`DataArray` or :class:`BinBy` object. """ from .groupby import BinBy binby = BinBy(self, by=by) if agg is None: return binby else: return binby.agg(agg)
[ "Return", "a", ":", "class", ":", "BinBy", "or", ":", "class", ":", "DataArray", "object", "when", "agg", "is", "not", "None" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5260-L5277
[ "def", "binby", "(", "self", ",", "by", "=", "None", ",", "agg", "=", "None", ")", ":", "from", ".", "groupby", "import", "BinBy", "binby", "=", "BinBy", "(", "self", ",", "by", "=", "by", ")", "if", "agg", "is", "None", ":", "return", "binby", "else", ":", "return", "binby", ".", "agg", "(", "agg", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
DataFrameArrays.add_column
Add a column to the DataFrame :param str name: name of column :param data: numpy array with the data
packages/vaex-core/vaex/dataframe.py
def add_column(self, name, data): """Add a column to the DataFrame :param str name: name of column :param data: numpy array with the data """ # assert _is_array_type_ok(data), "dtype not supported: %r, %r" % (data.dtype, data.dtype.type) # self._length = len(data) # if self._length_unfiltered is None: # self._length_unfiltered = len(data) # self._length_original = len(data) # self._index_end = self._length_unfiltered super(DataFrameArrays, self).add_column(name, data) self._length_unfiltered = int(round(self._length_original * self._active_fraction))
def add_column(self, name, data): """Add a column to the DataFrame :param str name: name of column :param data: numpy array with the data """ # assert _is_array_type_ok(data), "dtype not supported: %r, %r" % (data.dtype, data.dtype.type) # self._length = len(data) # if self._length_unfiltered is None: # self._length_unfiltered = len(data) # self._length_original = len(data) # self._index_end = self._length_unfiltered super(DataFrameArrays, self).add_column(name, data) self._length_unfiltered = int(round(self._length_original * self._active_fraction))
[ "Add", "a", "column", "to", "the", "DataFrame" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5420-L5433
[ "def", "add_column", "(", "self", ",", "name", ",", "data", ")", ":", "# assert _is_array_type_ok(data), \"dtype not supported: %r, %r\" % (data.dtype, data.dtype.type)", "# self._length = len(data)", "# if self._length_unfiltered is None:", "# self._length_unfiltered = len(data)", "# self._length_original = len(data)", "# self._index_end = self._length_unfiltered", "super", "(", "DataFrameArrays", ",", "self", ")", ".", "add_column", "(", "name", ",", "data", ")", "self", ".", "_length_unfiltered", "=", "int", "(", "round", "(", "self", ".", "_length_original", "*", "self", ".", "_active_fraction", ")", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
Promise.then
This method takes two optional arguments. The first argument is used if the "self promise" is fulfilled and the other is used if the "self promise" is rejected. In either case, this method returns another promise that effectively represents the result of either the first of the second argument (in the case that the "self promise" is fulfilled or rejected, respectively). Each argument can be either: * None - Meaning no action is taken * A function - which will be called with either the value of the "self promise" or the reason for rejection of the "self promise". The function may return: * A value - which will be used to fulfill the promise returned by this method. * A promise - which, when fulfilled or rejected, will cascade its value or reason to the promise returned by this method. * A value - which will be assigned as either the value or the reason for the promise returned by this method when the "self promise" is either fulfilled or rejected, respectively. :type success: (object) -> object :type failure: (object) -> object :rtype : Promise
packages/vaex-core/vaex/promise.py
def then(self, success=None, failure=None): """ This method takes two optional arguments. The first argument is used if the "self promise" is fulfilled and the other is used if the "self promise" is rejected. In either case, this method returns another promise that effectively represents the result of either the first of the second argument (in the case that the "self promise" is fulfilled or rejected, respectively). Each argument can be either: * None - Meaning no action is taken * A function - which will be called with either the value of the "self promise" or the reason for rejection of the "self promise". The function may return: * A value - which will be used to fulfill the promise returned by this method. * A promise - which, when fulfilled or rejected, will cascade its value or reason to the promise returned by this method. * A value - which will be assigned as either the value or the reason for the promise returned by this method when the "self promise" is either fulfilled or rejected, respectively. :type success: (object) -> object :type failure: (object) -> object :rtype : Promise """ ret = self.create_next() def callAndFulfill(v): """ A callback to be invoked if the "self promise" is fulfilled. """ try: if aplus._isFunction(success): ret.fulfill(success(v)) else: ret.fulfill(v) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) def callAndReject(r): """ A callback to be invoked if the "self promise" is rejected. """ try: if aplus._isFunction(failure): ret.fulfill(failure(r)) else: ret.reject(r) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) self.done(callAndFulfill, callAndReject) return ret
def then(self, success=None, failure=None): """ This method takes two optional arguments. The first argument is used if the "self promise" is fulfilled and the other is used if the "self promise" is rejected. In either case, this method returns another promise that effectively represents the result of either the first of the second argument (in the case that the "self promise" is fulfilled or rejected, respectively). Each argument can be either: * None - Meaning no action is taken * A function - which will be called with either the value of the "self promise" or the reason for rejection of the "self promise". The function may return: * A value - which will be used to fulfill the promise returned by this method. * A promise - which, when fulfilled or rejected, will cascade its value or reason to the promise returned by this method. * A value - which will be assigned as either the value or the reason for the promise returned by this method when the "self promise" is either fulfilled or rejected, respectively. :type success: (object) -> object :type failure: (object) -> object :rtype : Promise """ ret = self.create_next() def callAndFulfill(v): """ A callback to be invoked if the "self promise" is fulfilled. """ try: if aplus._isFunction(success): ret.fulfill(success(v)) else: ret.fulfill(v) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) def callAndReject(r): """ A callback to be invoked if the "self promise" is rejected. """ try: if aplus._isFunction(failure): ret.fulfill(failure(r)) else: ret.reject(r) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) self.done(callAndFulfill, callAndReject) return ret
[ "This", "method", "takes", "two", "optional", "arguments", ".", "The", "first", "argument", "is", "used", "if", "the", "self", "promise", "is", "fulfilled", "and", "the", "other", "is", "used", "if", "the", "self", "promise", "is", "rejected", ".", "In", "either", "case", "this", "method", "returns", "another", "promise", "that", "effectively", "represents", "the", "result", "of", "either", "the", "first", "of", "the", "second", "argument", "(", "in", "the", "case", "that", "the", "self", "promise", "is", "fulfilled", "or", "rejected", "respectively", ")", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/promise.py#L57-L120
[ "def", "then", "(", "self", ",", "success", "=", "None", ",", "failure", "=", "None", ")", ":", "ret", "=", "self", ".", "create_next", "(", ")", "def", "callAndFulfill", "(", "v", ")", ":", "\"\"\"\n A callback to be invoked if the \"self promise\"\n is fulfilled.\n \"\"\"", "try", ":", "if", "aplus", ".", "_isFunction", "(", "success", ")", ":", "ret", ".", "fulfill", "(", "success", "(", "v", ")", ")", "else", ":", "ret", ".", "fulfill", "(", "v", ")", "except", "Exception", "as", "e", ":", "Promise", ".", "last_exc_info", "=", "sys", ".", "exc_info", "(", ")", "e", ".", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "ret", ".", "reject", "(", "e", ")", "def", "callAndReject", "(", "r", ")", ":", "\"\"\"\n A callback to be invoked if the \"self promise\"\n is rejected.\n \"\"\"", "try", ":", "if", "aplus", ".", "_isFunction", "(", "failure", ")", ":", "ret", ".", "fulfill", "(", "failure", "(", "r", ")", ")", "else", ":", "ret", ".", "reject", "(", "r", ")", "except", "Exception", "as", "e", ":", "Promise", ".", "last_exc_info", "=", "sys", ".", "exc_info", "(", ")", "e", ".", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "ret", ".", "reject", "(", "e", ")", "self", ".", "done", "(", "callAndFulfill", ",", "callAndReject", ")", "return", "ret" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
patch
Adds method f to the DataFrame class
packages/vaex-viz/vaex/viz/mpl.py
def patch(f): '''Adds method f to the DataFrame class''' name = f.__name__ setattr(DataFrame, name, f) return f
def patch(f): '''Adds method f to the DataFrame class''' name = f.__name__ setattr(DataFrame, name, f) return f
[ "Adds", "method", "f", "to", "the", "DataFrame", "class" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-viz/vaex/viz/mpl.py#L21-L25
[ "def", "patch", "(", "f", ")", ":", "name", "=", "f", ".", "__name__", "setattr", "(", "DataFrame", ",", "name", ",", "f", ")", "return", "f" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
plot1d
Viz data in 1d (histograms, running means etc) Example >>> df.plot1d(df.x) >>> df.plot1d(df.x, limits=[0, 100], shape=100) >>> df.plot1d(df.x, what='mean(y)', limits=[0, 100], shape=100) If you want to do a computation yourself, pass the grid argument, but you are responsible for passing the same limits arguments: >>> counts = df.mean(df.y, binby=df.x, limits=[0, 100], shape=100)/100. >>> df.plot1d(df.x, limits=[0, 100], shape=100, grid=means, label='mean(y)/100') :param x: Expression to bin in the x direction :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum :param grid: If the binning is done before by yourself, you can pass it :param facet: Expression to produce facetted plots ( facet='x:0,1,12' will produce 12 plots with x in a range between 0 and 1) :param limits: list of [xmin, xmax], or a description such as 'minmax', '99%' :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param n: normalization function, currently only 'normalize' is supported, or None for no normalization :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param normalize_axis: :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param: tight_layout: call pylab.tight_layout or not :param kwargs: extra argument passed to pylab.plot :return:
packages/vaex-viz/vaex/viz/mpl.py
def plot1d(self, x=None, what="count(*)", grid=None, shape=64, facet=None, limits=None, figsize=None, f="identity", n=None, normalize_axis=None, xlabel=None, ylabel=None, label=None, selection=None, show=False, tight_layout=True, hardcopy=None, **kwargs): """Viz data in 1d (histograms, running means etc) Example >>> df.plot1d(df.x) >>> df.plot1d(df.x, limits=[0, 100], shape=100) >>> df.plot1d(df.x, what='mean(y)', limits=[0, 100], shape=100) If you want to do a computation yourself, pass the grid argument, but you are responsible for passing the same limits arguments: >>> counts = df.mean(df.y, binby=df.x, limits=[0, 100], shape=100)/100. >>> df.plot1d(df.x, limits=[0, 100], shape=100, grid=means, label='mean(y)/100') :param x: Expression to bin in the x direction :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum :param grid: If the binning is done before by yourself, you can pass it :param facet: Expression to produce facetted plots ( facet='x:0,1,12' will produce 12 plots with x in a range between 0 and 1) :param limits: list of [xmin, xmax], or a description such as 'minmax', '99%' :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param n: normalization function, currently only 'normalize' is supported, or None for no normalization :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param normalize_axis: :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param: tight_layout: call pylab.tight_layout or not :param kwargs: extra argument passed to pylab.plot :return: """ import pylab f = _parse_f(f) n = _parse_n(n) if type(shape) == int: shape = (shape,) binby = [] x = _ensure_strings_from_expressions(x) for expression in [x]: if expression is not None: binby = [expression] + binby limits = self.limits(binby, limits) if figsize is not None: pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') fig = pylab.gcf() import re if facet is not None: match = re.match("(.*):(.*),(.*),(.*)", facet) if match: groups = match.groups() facet_expression = groups[0] facet_limits = [ast.literal_eval(groups[1]), ast.literal_eval(groups[2])] facet_count = ast.literal_eval(groups[3]) limits.append(facet_limits) binby.append(facet_expression) shape = (facet_count,) + shape else: raise ValueError("Could not understand 'facet' argument %r, expected something in form: 'column:-1,10:5'" % facet) if grid is None: if what: if isinstance(what, (vaex.stat.Expression)): grid = what.calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) else: what = what.strip() index = what.index("(") import re groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() functions = ["mean", "sum", "std", "count"] if function in functions: # grid = getattr(self, function)(arguments, binby, limits=limits, shape=shape, selection=selection) grid = getattr(vaex.stat, function)(arguments).calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) elif function == "count" and arguments == "*": grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) elif function == "cumulative" and arguments == "*": # TODO: comulative should also include the tails outside limits grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) grid = np.cumsum(grid) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) else: grid = self.histogram(binby, size=shape, limits=limits, selection=selection) fgrid = f(grid) if n is not None: # ngrid = n(fgrid, axis=normalize_axis) ngrid = fgrid / fgrid.sum() else: ngrid = fgrid # reductions = [_parse_reduction(r, colormap, colors) for r in reduce] # rgrid = ngrid * 1. # for r in reduce: # r = _parse_reduction(r, colormap, colors) # rgrid = r(rgrid) # grid = self.reduce(grid, ) xmin, xmax = limits[-1] if facet: N = len(grid[-1]) else: N = len(grid) xexpression = binby[0] xar = np.arange(N + 1) / (N - 0.) * (xmax - xmin) + xmin label = str(label or selection or x) if facet: import math rows, columns = int(math.ceil(facet_count / 4.)), 4 values = np.linspace(facet_limits[0], facet_limits[1], facet_count + 1) for i in range(facet_count): ax = pylab.subplot(rows, columns, i + 1) value = ax.plot(xar, ngrid[i], drawstyle="steps-mid", label=label, **kwargs) v1, v2 = values[i], values[i + 1] pylab.xlabel(xlabel or x) pylab.ylabel(ylabel or what) ax.set_title("%3f <= %s < %3f" % (v1, facet_expression, v2)) if self.iscategory(xexpression): labels = self.category_labels(xexpression) step = len(labels) // max_labels pylab.xticks(range(len(labels))[::step], labels[::step], size='small') else: # im = pylab.imshow(rgrid, extent=np.array(limits[:2]).flatten(), origin="lower", aspect=aspect) pylab.xlabel(xlabel or self.label(x)) pylab.ylabel(ylabel or what) # print(xar, ngrid) # repeat the first element, that's how plot/steps likes it.. g = np.concatenate([ngrid[0:1], ngrid]) value = pylab.plot(xar, g, drawstyle="steps-pre", label=label, **kwargs) if self.iscategory(xexpression): labels = self.category_labels(xexpression) step = len(labels) // max_labels pylab.xticks(range(len(labels))[::step], labels[::step], size='small') if tight_layout: pylab.tight_layout() if hardcopy: pylab.savefig(hardcopy) if show: pylab.show() return value
def plot1d(self, x=None, what="count(*)", grid=None, shape=64, facet=None, limits=None, figsize=None, f="identity", n=None, normalize_axis=None, xlabel=None, ylabel=None, label=None, selection=None, show=False, tight_layout=True, hardcopy=None, **kwargs): """Viz data in 1d (histograms, running means etc) Example >>> df.plot1d(df.x) >>> df.plot1d(df.x, limits=[0, 100], shape=100) >>> df.plot1d(df.x, what='mean(y)', limits=[0, 100], shape=100) If you want to do a computation yourself, pass the grid argument, but you are responsible for passing the same limits arguments: >>> counts = df.mean(df.y, binby=df.x, limits=[0, 100], shape=100)/100. >>> df.plot1d(df.x, limits=[0, 100], shape=100, grid=means, label='mean(y)/100') :param x: Expression to bin in the x direction :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum :param grid: If the binning is done before by yourself, you can pass it :param facet: Expression to produce facetted plots ( facet='x:0,1,12' will produce 12 plots with x in a range between 0 and 1) :param limits: list of [xmin, xmax], or a description such as 'minmax', '99%' :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param n: normalization function, currently only 'normalize' is supported, or None for no normalization :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param normalize_axis: :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param: tight_layout: call pylab.tight_layout or not :param kwargs: extra argument passed to pylab.plot :return: """ import pylab f = _parse_f(f) n = _parse_n(n) if type(shape) == int: shape = (shape,) binby = [] x = _ensure_strings_from_expressions(x) for expression in [x]: if expression is not None: binby = [expression] + binby limits = self.limits(binby, limits) if figsize is not None: pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') fig = pylab.gcf() import re if facet is not None: match = re.match("(.*):(.*),(.*),(.*)", facet) if match: groups = match.groups() facet_expression = groups[0] facet_limits = [ast.literal_eval(groups[1]), ast.literal_eval(groups[2])] facet_count = ast.literal_eval(groups[3]) limits.append(facet_limits) binby.append(facet_expression) shape = (facet_count,) + shape else: raise ValueError("Could not understand 'facet' argument %r, expected something in form: 'column:-1,10:5'" % facet) if grid is None: if what: if isinstance(what, (vaex.stat.Expression)): grid = what.calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) else: what = what.strip() index = what.index("(") import re groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() functions = ["mean", "sum", "std", "count"] if function in functions: # grid = getattr(self, function)(arguments, binby, limits=limits, shape=shape, selection=selection) grid = getattr(vaex.stat, function)(arguments).calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) elif function == "count" and arguments == "*": grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) elif function == "cumulative" and arguments == "*": # TODO: comulative should also include the tails outside limits grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) grid = np.cumsum(grid) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) else: grid = self.histogram(binby, size=shape, limits=limits, selection=selection) fgrid = f(grid) if n is not None: # ngrid = n(fgrid, axis=normalize_axis) ngrid = fgrid / fgrid.sum() else: ngrid = fgrid # reductions = [_parse_reduction(r, colormap, colors) for r in reduce] # rgrid = ngrid * 1. # for r in reduce: # r = _parse_reduction(r, colormap, colors) # rgrid = r(rgrid) # grid = self.reduce(grid, ) xmin, xmax = limits[-1] if facet: N = len(grid[-1]) else: N = len(grid) xexpression = binby[0] xar = np.arange(N + 1) / (N - 0.) * (xmax - xmin) + xmin label = str(label or selection or x) if facet: import math rows, columns = int(math.ceil(facet_count / 4.)), 4 values = np.linspace(facet_limits[0], facet_limits[1], facet_count + 1) for i in range(facet_count): ax = pylab.subplot(rows, columns, i + 1) value = ax.plot(xar, ngrid[i], drawstyle="steps-mid", label=label, **kwargs) v1, v2 = values[i], values[i + 1] pylab.xlabel(xlabel or x) pylab.ylabel(ylabel or what) ax.set_title("%3f <= %s < %3f" % (v1, facet_expression, v2)) if self.iscategory(xexpression): labels = self.category_labels(xexpression) step = len(labels) // max_labels pylab.xticks(range(len(labels))[::step], labels[::step], size='small') else: # im = pylab.imshow(rgrid, extent=np.array(limits[:2]).flatten(), origin="lower", aspect=aspect) pylab.xlabel(xlabel or self.label(x)) pylab.ylabel(ylabel or what) # print(xar, ngrid) # repeat the first element, that's how plot/steps likes it.. g = np.concatenate([ngrid[0:1], ngrid]) value = pylab.plot(xar, g, drawstyle="steps-pre", label=label, **kwargs) if self.iscategory(xexpression): labels = self.category_labels(xexpression) step = len(labels) // max_labels pylab.xticks(range(len(labels))[::step], labels[::step], size='small') if tight_layout: pylab.tight_layout() if hardcopy: pylab.savefig(hardcopy) if show: pylab.show() return value
[ "Viz", "data", "in", "1d", "(", "histograms", "running", "means", "etc", ")" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-viz/vaex/viz/mpl.py#L36-L180
[ "def", "plot1d", "(", "self", ",", "x", "=", "None", ",", "what", "=", "\"count(*)\"", ",", "grid", "=", "None", ",", "shape", "=", "64", ",", "facet", "=", "None", ",", "limits", "=", "None", ",", "figsize", "=", "None", ",", "f", "=", "\"identity\"", ",", "n", "=", "None", ",", "normalize_axis", "=", "None", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "label", "=", "None", ",", "selection", "=", "None", ",", "show", "=", "False", ",", "tight_layout", "=", "True", ",", "hardcopy", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "pylab", "f", "=", "_parse_f", "(", "f", ")", "n", "=", "_parse_n", "(", "n", ")", "if", "type", "(", "shape", ")", "==", "int", ":", "shape", "=", "(", "shape", ",", ")", "binby", "=", "[", "]", "x", "=", "_ensure_strings_from_expressions", "(", "x", ")", "for", "expression", "in", "[", "x", "]", ":", "if", "expression", "is", "not", "None", ":", "binby", "=", "[", "expression", "]", "+", "binby", "limits", "=", "self", ".", "limits", "(", "binby", ",", "limits", ")", "if", "figsize", "is", "not", "None", ":", "pylab", ".", "figure", "(", "num", "=", "None", ",", "figsize", "=", "figsize", ",", "dpi", "=", "80", ",", "facecolor", "=", "'w'", ",", "edgecolor", "=", "'k'", ")", "fig", "=", "pylab", ".", "gcf", "(", ")", "import", "re", "if", "facet", "is", "not", "None", ":", "match", "=", "re", ".", "match", "(", "\"(.*):(.*),(.*),(.*)\"", ",", "facet", ")", "if", "match", ":", "groups", "=", "match", ".", "groups", "(", ")", "facet_expression", "=", "groups", "[", "0", "]", "facet_limits", "=", "[", "ast", ".", "literal_eval", "(", "groups", "[", "1", "]", ")", ",", "ast", ".", "literal_eval", "(", "groups", "[", "2", "]", ")", "]", "facet_count", "=", "ast", ".", "literal_eval", "(", "groups", "[", "3", "]", ")", "limits", ".", "append", "(", "facet_limits", ")", "binby", ".", "append", "(", "facet_expression", ")", "shape", "=", "(", "facet_count", ",", ")", "+", "shape", "else", ":", "raise", "ValueError", "(", "\"Could not understand 'facet' argument %r, expected something in form: 'column:-1,10:5'\"", "%", "facet", ")", "if", "grid", "is", "None", ":", "if", "what", ":", "if", "isinstance", "(", "what", ",", "(", "vaex", ".", "stat", ".", "Expression", ")", ")", ":", "grid", "=", "what", ".", "calculate", "(", "self", ",", "binby", "=", "binby", ",", "limits", "=", "limits", ",", "shape", "=", "shape", ",", "selection", "=", "selection", ")", "else", ":", "what", "=", "what", ".", "strip", "(", ")", "index", "=", "what", ".", "index", "(", "\"(\"", ")", "import", "re", "groups", "=", "re", ".", "match", "(", "\"(.*)\\((.*)\\)\"", ",", "what", ")", ".", "groups", "(", ")", "if", "groups", "and", "len", "(", "groups", ")", "==", "2", ":", "function", "=", "groups", "[", "0", "]", "arguments", "=", "groups", "[", "1", "]", ".", "strip", "(", ")", "functions", "=", "[", "\"mean\"", ",", "\"sum\"", ",", "\"std\"", ",", "\"count\"", "]", "if", "function", "in", "functions", ":", "# grid = getattr(self, function)(arguments, binby, limits=limits, shape=shape, selection=selection)", "grid", "=", "getattr", "(", "vaex", ".", "stat", ",", "function", ")", "(", "arguments", ")", ".", "calculate", "(", "self", ",", "binby", "=", "binby", ",", "limits", "=", "limits", ",", "shape", "=", "shape", ",", "selection", "=", "selection", ")", "elif", "function", "==", "\"count\"", "and", "arguments", "==", "\"*\"", ":", "grid", "=", "self", ".", "count", "(", "binby", "=", "binby", ",", "shape", "=", "shape", ",", "limits", "=", "limits", ",", "selection", "=", "selection", ")", "elif", "function", "==", "\"cumulative\"", "and", "arguments", "==", "\"*\"", ":", "# TODO: comulative should also include the tails outside limits", "grid", "=", "self", ".", "count", "(", "binby", "=", "binby", ",", "shape", "=", "shape", ",", "limits", "=", "limits", ",", "selection", "=", "selection", ")", "grid", "=", "np", ".", "cumsum", "(", "grid", ")", "else", ":", "raise", "ValueError", "(", "\"Could not understand method: %s, expected one of %r'\"", "%", "(", "function", ",", "functions", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'\"", "%", "what", ")", "else", ":", "grid", "=", "self", ".", "histogram", "(", "binby", ",", "size", "=", "shape", ",", "limits", "=", "limits", ",", "selection", "=", "selection", ")", "fgrid", "=", "f", "(", "grid", ")", "if", "n", "is", "not", "None", ":", "# ngrid = n(fgrid, axis=normalize_axis)", "ngrid", "=", "fgrid", "/", "fgrid", ".", "sum", "(", ")", "else", ":", "ngrid", "=", "fgrid", "# reductions = [_parse_reduction(r, colormap, colors) for r in reduce]", "# rgrid = ngrid * 1.", "# for r in reduce:", "# r = _parse_reduction(r, colormap, colors)", "# rgrid = r(rgrid)", "# grid = self.reduce(grid, )", "xmin", ",", "xmax", "=", "limits", "[", "-", "1", "]", "if", "facet", ":", "N", "=", "len", "(", "grid", "[", "-", "1", "]", ")", "else", ":", "N", "=", "len", "(", "grid", ")", "xexpression", "=", "binby", "[", "0", "]", "xar", "=", "np", ".", "arange", "(", "N", "+", "1", ")", "/", "(", "N", "-", "0.", ")", "*", "(", "xmax", "-", "xmin", ")", "+", "xmin", "label", "=", "str", "(", "label", "or", "selection", "or", "x", ")", "if", "facet", ":", "import", "math", "rows", ",", "columns", "=", "int", "(", "math", ".", "ceil", "(", "facet_count", "/", "4.", ")", ")", ",", "4", "values", "=", "np", ".", "linspace", "(", "facet_limits", "[", "0", "]", ",", "facet_limits", "[", "1", "]", ",", "facet_count", "+", "1", ")", "for", "i", "in", "range", "(", "facet_count", ")", ":", "ax", "=", "pylab", ".", "subplot", "(", "rows", ",", "columns", ",", "i", "+", "1", ")", "value", "=", "ax", ".", "plot", "(", "xar", ",", "ngrid", "[", "i", "]", ",", "drawstyle", "=", "\"steps-mid\"", ",", "label", "=", "label", ",", "*", "*", "kwargs", ")", "v1", ",", "v2", "=", "values", "[", "i", "]", ",", "values", "[", "i", "+", "1", "]", "pylab", ".", "xlabel", "(", "xlabel", "or", "x", ")", "pylab", ".", "ylabel", "(", "ylabel", "or", "what", ")", "ax", ".", "set_title", "(", "\"%3f <= %s < %3f\"", "%", "(", "v1", ",", "facet_expression", ",", "v2", ")", ")", "if", "self", ".", "iscategory", "(", "xexpression", ")", ":", "labels", "=", "self", ".", "category_labels", "(", "xexpression", ")", "step", "=", "len", "(", "labels", ")", "//", "max_labels", "pylab", ".", "xticks", "(", "range", "(", "len", "(", "labels", ")", ")", "[", ":", ":", "step", "]", ",", "labels", "[", ":", ":", "step", "]", ",", "size", "=", "'small'", ")", "else", ":", "# im = pylab.imshow(rgrid, extent=np.array(limits[:2]).flatten(), origin=\"lower\", aspect=aspect)", "pylab", ".", "xlabel", "(", "xlabel", "or", "self", ".", "label", "(", "x", ")", ")", "pylab", ".", "ylabel", "(", "ylabel", "or", "what", ")", "# print(xar, ngrid)", "# repeat the first element, that's how plot/steps likes it..", "g", "=", "np", ".", "concatenate", "(", "[", "ngrid", "[", "0", ":", "1", "]", ",", "ngrid", "]", ")", "value", "=", "pylab", ".", "plot", "(", "xar", ",", "g", ",", "drawstyle", "=", "\"steps-pre\"", ",", "label", "=", "label", ",", "*", "*", "kwargs", ")", "if", "self", ".", "iscategory", "(", "xexpression", ")", ":", "labels", "=", "self", ".", "category_labels", "(", "xexpression", ")", "step", "=", "len", "(", "labels", ")", "//", "max_labels", "pylab", ".", "xticks", "(", "range", "(", "len", "(", "labels", ")", ")", "[", ":", ":", "step", "]", ",", "labels", "[", ":", ":", "step", "]", ",", "size", "=", "'small'", ")", "if", "tight_layout", ":", "pylab", ".", "tight_layout", "(", ")", "if", "hardcopy", ":", "pylab", ".", "savefig", "(", "hardcopy", ")", "if", "show", ":", "pylab", ".", "show", "(", ")", "return", "value" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
scatter
Viz (small amounts) of data in 2d using a scatter plot Convenience wrapper around pylab.scatter when for working with small DataFrames or selections :param x: Expression for x axis :param y: Idem for y :param s_expr: When given, use if for the s (size) argument of pylab.scatter :param c_expr: When given, use if for the c (color) argument of pylab.scatter :param labels: Annotate the points with these text values :param selection: Single selection expression, or None :param length_limit: maximum number of rows it will plot :param length_check: should we do the maximum row check or not? :param label: label for the legend :param xlabel: label for x axis, if None .label(x) is used :param ylabel: label for y axis, if None .label(y) is used :param errorbar_kwargs: extra dict with arguments passed to plt.errorbar :param kwargs: extra arguments passed to pylab.scatter :return:
packages/vaex-viz/vaex/viz/mpl.py
def scatter(self, x, y, xerr=None, yerr=None, cov=None, corr=None, s_expr=None, c_expr=None, labels=None, selection=None, length_limit=50000, length_check=True, label=None, xlabel=None, ylabel=None, errorbar_kwargs={}, ellipse_kwargs={}, **kwargs): """Viz (small amounts) of data in 2d using a scatter plot Convenience wrapper around pylab.scatter when for working with small DataFrames or selections :param x: Expression for x axis :param y: Idem for y :param s_expr: When given, use if for the s (size) argument of pylab.scatter :param c_expr: When given, use if for the c (color) argument of pylab.scatter :param labels: Annotate the points with these text values :param selection: Single selection expression, or None :param length_limit: maximum number of rows it will plot :param length_check: should we do the maximum row check or not? :param label: label for the legend :param xlabel: label for x axis, if None .label(x) is used :param ylabel: label for y axis, if None .label(y) is used :param errorbar_kwargs: extra dict with arguments passed to plt.errorbar :param kwargs: extra arguments passed to pylab.scatter :return: """ import pylab as plt x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) label = str(label or selection) selection = _ensure_strings_from_expressions(selection) if length_check: count = self.count(selection=selection) if count > length_limit: raise ValueError("the number of rows (%d) is above the limit (%d), pass length_check=False, or increase length_limit" % (count, length_limit)) x_values = self.evaluate(x, selection=selection) y_values = self.evaluate(y, selection=selection) if s_expr: kwargs["s"] = self.evaluate(s_expr, selection=selection) if c_expr: kwargs["c"] = self.evaluate(c_expr, selection=selection) plt.xlabel(xlabel or self.label(x)) plt.ylabel(ylabel or self.label(y)) s = plt.scatter(x_values, y_values, label=label, **kwargs) if labels: label_values = self.evaluate(labels, selection=selection) for i, label_value in enumerate(label_values): plt.annotate(label_value, (x_values[i], y_values[i])) xerr_values = None yerr_values = None if cov is not None or corr is not None: from matplotlib.patches import Ellipse sx = self.evaluate(xerr, selection=selection) sy = self.evaluate(yerr, selection=selection) if corr is not None: sxy = self.evaluate(corr, selection=selection) * sx * sy elif cov is not None: sxy = self.evaluate(cov, selection=selection) cov_matrix = np.zeros((len(sx), 2, 2)) cov_matrix[:,0,0] = sx**2 cov_matrix[:,1,1] = sy**2 cov_matrix[:,0,1] = cov_matrix[:,1,0] = sxy ax = plt.gca() ellipse_kwargs = dict(ellipse_kwargs) ellipse_kwargs['facecolor'] = ellipse_kwargs.get('facecolor', 'none') ellipse_kwargs['edgecolor'] = ellipse_kwargs.get('edgecolor', 'black') for i in range(len(sx)): eigen_values, eigen_vectors = np.linalg.eig(cov_matrix[i]) indices = np.argsort(eigen_values)[::-1] eigen_values = eigen_values[indices] eigen_vectors = eigen_vectors[:,indices] v1 = eigen_vectors[:, 0] v2 = eigen_vectors[:, 1] varx = cov_matrix[i, 0, 0] vary = cov_matrix[i, 1, 1] angle = np.arctan2(v1[1], v1[0]) # round off errors cause negative values? if eigen_values[1] < 0 and abs((eigen_values[1]/eigen_values[0])) < 1e-10: eigen_values[1] = 0 if eigen_values[0] < 0 or eigen_values[1] < 0: raise ValueError('neg val') width, height = np.sqrt(np.max(eigen_values)), np.sqrt(np.min(eigen_values)) e = Ellipse(xy=(x_values[i], y_values[i]), width=width, height=height, angle=np.degrees(angle), **ellipse_kwargs) ax.add_artist(e) else: if xerr is not None: if _issequence(xerr): assert len(xerr) == 2, "if xerr is a sequence it should be of length 2" xerr_values = [self.evaluate(xerr[0], selection=selection), self.evaluate(xerr[1], selection=selection)] else: xerr_values = self.evaluate(xerr, selection=selection) if yerr is not None: if _issequence(yerr): assert len(yerr) == 2, "if yerr is a sequence it should be of length 2" yerr_values = [self.evaluate(yerr[0], selection=selection), self.evaluate(yerr[1], selection=selection)] else: yerr_values = self.evaluate(yerr, selection=selection) if xerr_values is not None or yerr_values is not None: errorbar_kwargs = dict(errorbar_kwargs) errorbar_kwargs['fmt'] = errorbar_kwargs.get('fmt', 'none') plt.errorbar(x_values, y_values, yerr=yerr_values, xerr=xerr_values, **errorbar_kwargs) return s
def scatter(self, x, y, xerr=None, yerr=None, cov=None, corr=None, s_expr=None, c_expr=None, labels=None, selection=None, length_limit=50000, length_check=True, label=None, xlabel=None, ylabel=None, errorbar_kwargs={}, ellipse_kwargs={}, **kwargs): """Viz (small amounts) of data in 2d using a scatter plot Convenience wrapper around pylab.scatter when for working with small DataFrames or selections :param x: Expression for x axis :param y: Idem for y :param s_expr: When given, use if for the s (size) argument of pylab.scatter :param c_expr: When given, use if for the c (color) argument of pylab.scatter :param labels: Annotate the points with these text values :param selection: Single selection expression, or None :param length_limit: maximum number of rows it will plot :param length_check: should we do the maximum row check or not? :param label: label for the legend :param xlabel: label for x axis, if None .label(x) is used :param ylabel: label for y axis, if None .label(y) is used :param errorbar_kwargs: extra dict with arguments passed to plt.errorbar :param kwargs: extra arguments passed to pylab.scatter :return: """ import pylab as plt x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) label = str(label or selection) selection = _ensure_strings_from_expressions(selection) if length_check: count = self.count(selection=selection) if count > length_limit: raise ValueError("the number of rows (%d) is above the limit (%d), pass length_check=False, or increase length_limit" % (count, length_limit)) x_values = self.evaluate(x, selection=selection) y_values = self.evaluate(y, selection=selection) if s_expr: kwargs["s"] = self.evaluate(s_expr, selection=selection) if c_expr: kwargs["c"] = self.evaluate(c_expr, selection=selection) plt.xlabel(xlabel or self.label(x)) plt.ylabel(ylabel or self.label(y)) s = plt.scatter(x_values, y_values, label=label, **kwargs) if labels: label_values = self.evaluate(labels, selection=selection) for i, label_value in enumerate(label_values): plt.annotate(label_value, (x_values[i], y_values[i])) xerr_values = None yerr_values = None if cov is not None or corr is not None: from matplotlib.patches import Ellipse sx = self.evaluate(xerr, selection=selection) sy = self.evaluate(yerr, selection=selection) if corr is not None: sxy = self.evaluate(corr, selection=selection) * sx * sy elif cov is not None: sxy = self.evaluate(cov, selection=selection) cov_matrix = np.zeros((len(sx), 2, 2)) cov_matrix[:,0,0] = sx**2 cov_matrix[:,1,1] = sy**2 cov_matrix[:,0,1] = cov_matrix[:,1,0] = sxy ax = plt.gca() ellipse_kwargs = dict(ellipse_kwargs) ellipse_kwargs['facecolor'] = ellipse_kwargs.get('facecolor', 'none') ellipse_kwargs['edgecolor'] = ellipse_kwargs.get('edgecolor', 'black') for i in range(len(sx)): eigen_values, eigen_vectors = np.linalg.eig(cov_matrix[i]) indices = np.argsort(eigen_values)[::-1] eigen_values = eigen_values[indices] eigen_vectors = eigen_vectors[:,indices] v1 = eigen_vectors[:, 0] v2 = eigen_vectors[:, 1] varx = cov_matrix[i, 0, 0] vary = cov_matrix[i, 1, 1] angle = np.arctan2(v1[1], v1[0]) # round off errors cause negative values? if eigen_values[1] < 0 and abs((eigen_values[1]/eigen_values[0])) < 1e-10: eigen_values[1] = 0 if eigen_values[0] < 0 or eigen_values[1] < 0: raise ValueError('neg val') width, height = np.sqrt(np.max(eigen_values)), np.sqrt(np.min(eigen_values)) e = Ellipse(xy=(x_values[i], y_values[i]), width=width, height=height, angle=np.degrees(angle), **ellipse_kwargs) ax.add_artist(e) else: if xerr is not None: if _issequence(xerr): assert len(xerr) == 2, "if xerr is a sequence it should be of length 2" xerr_values = [self.evaluate(xerr[0], selection=selection), self.evaluate(xerr[1], selection=selection)] else: xerr_values = self.evaluate(xerr, selection=selection) if yerr is not None: if _issequence(yerr): assert len(yerr) == 2, "if yerr is a sequence it should be of length 2" yerr_values = [self.evaluate(yerr[0], selection=selection), self.evaluate(yerr[1], selection=selection)] else: yerr_values = self.evaluate(yerr, selection=selection) if xerr_values is not None or yerr_values is not None: errorbar_kwargs = dict(errorbar_kwargs) errorbar_kwargs['fmt'] = errorbar_kwargs.get('fmt', 'none') plt.errorbar(x_values, y_values, yerr=yerr_values, xerr=xerr_values, **errorbar_kwargs) return s
[ "Viz", "(", "small", "amounts", ")", "of", "data", "in", "2d", "using", "a", "scatter", "plot" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-viz/vaex/viz/mpl.py#L188-L284
[ "def", "scatter", "(", "self", ",", "x", ",", "y", ",", "xerr", "=", "None", ",", "yerr", "=", "None", ",", "cov", "=", "None", ",", "corr", "=", "None", ",", "s_expr", "=", "None", ",", "c_expr", "=", "None", ",", "labels", "=", "None", ",", "selection", "=", "None", ",", "length_limit", "=", "50000", ",", "length_check", "=", "True", ",", "label", "=", "None", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "errorbar_kwargs", "=", "{", "}", ",", "ellipse_kwargs", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "import", "pylab", "as", "plt", "x", "=", "_ensure_strings_from_expressions", "(", "x", ")", "y", "=", "_ensure_strings_from_expressions", "(", "y", ")", "label", "=", "str", "(", "label", "or", "selection", ")", "selection", "=", "_ensure_strings_from_expressions", "(", "selection", ")", "if", "length_check", ":", "count", "=", "self", ".", "count", "(", "selection", "=", "selection", ")", "if", "count", ">", "length_limit", ":", "raise", "ValueError", "(", "\"the number of rows (%d) is above the limit (%d), pass length_check=False, or increase length_limit\"", "%", "(", "count", ",", "length_limit", ")", ")", "x_values", "=", "self", ".", "evaluate", "(", "x", ",", "selection", "=", "selection", ")", "y_values", "=", "self", ".", "evaluate", "(", "y", ",", "selection", "=", "selection", ")", "if", "s_expr", ":", "kwargs", "[", "\"s\"", "]", "=", "self", ".", "evaluate", "(", "s_expr", ",", "selection", "=", "selection", ")", "if", "c_expr", ":", "kwargs", "[", "\"c\"", "]", "=", "self", ".", "evaluate", "(", "c_expr", ",", "selection", "=", "selection", ")", "plt", ".", "xlabel", "(", "xlabel", "or", "self", ".", "label", "(", "x", ")", ")", "plt", ".", "ylabel", "(", "ylabel", "or", "self", ".", "label", "(", "y", ")", ")", "s", "=", "plt", ".", "scatter", "(", "x_values", ",", "y_values", ",", "label", "=", "label", ",", "*", "*", "kwargs", ")", "if", "labels", ":", "label_values", "=", "self", ".", "evaluate", "(", "labels", ",", "selection", "=", "selection", ")", "for", "i", ",", "label_value", "in", "enumerate", "(", "label_values", ")", ":", "plt", ".", "annotate", "(", "label_value", ",", "(", "x_values", "[", "i", "]", ",", "y_values", "[", "i", "]", ")", ")", "xerr_values", "=", "None", "yerr_values", "=", "None", "if", "cov", "is", "not", "None", "or", "corr", "is", "not", "None", ":", "from", "matplotlib", ".", "patches", "import", "Ellipse", "sx", "=", "self", ".", "evaluate", "(", "xerr", ",", "selection", "=", "selection", ")", "sy", "=", "self", ".", "evaluate", "(", "yerr", ",", "selection", "=", "selection", ")", "if", "corr", "is", "not", "None", ":", "sxy", "=", "self", ".", "evaluate", "(", "corr", ",", "selection", "=", "selection", ")", "*", "sx", "*", "sy", "elif", "cov", "is", "not", "None", ":", "sxy", "=", "self", ".", "evaluate", "(", "cov", ",", "selection", "=", "selection", ")", "cov_matrix", "=", "np", ".", "zeros", "(", "(", "len", "(", "sx", ")", ",", "2", ",", "2", ")", ")", "cov_matrix", "[", ":", ",", "0", ",", "0", "]", "=", "sx", "**", "2", "cov_matrix", "[", ":", ",", "1", ",", "1", "]", "=", "sy", "**", "2", "cov_matrix", "[", ":", ",", "0", ",", "1", "]", "=", "cov_matrix", "[", ":", ",", "1", ",", "0", "]", "=", "sxy", "ax", "=", "plt", ".", "gca", "(", ")", "ellipse_kwargs", "=", "dict", "(", "ellipse_kwargs", ")", "ellipse_kwargs", "[", "'facecolor'", "]", "=", "ellipse_kwargs", ".", "get", "(", "'facecolor'", ",", "'none'", ")", "ellipse_kwargs", "[", "'edgecolor'", "]", "=", "ellipse_kwargs", ".", "get", "(", "'edgecolor'", ",", "'black'", ")", "for", "i", "in", "range", "(", "len", "(", "sx", ")", ")", ":", "eigen_values", ",", "eigen_vectors", "=", "np", ".", "linalg", ".", "eig", "(", "cov_matrix", "[", "i", "]", ")", "indices", "=", "np", ".", "argsort", "(", "eigen_values", ")", "[", ":", ":", "-", "1", "]", "eigen_values", "=", "eigen_values", "[", "indices", "]", "eigen_vectors", "=", "eigen_vectors", "[", ":", ",", "indices", "]", "v1", "=", "eigen_vectors", "[", ":", ",", "0", "]", "v2", "=", "eigen_vectors", "[", ":", ",", "1", "]", "varx", "=", "cov_matrix", "[", "i", ",", "0", ",", "0", "]", "vary", "=", "cov_matrix", "[", "i", ",", "1", ",", "1", "]", "angle", "=", "np", ".", "arctan2", "(", "v1", "[", "1", "]", ",", "v1", "[", "0", "]", ")", "# round off errors cause negative values?", "if", "eigen_values", "[", "1", "]", "<", "0", "and", "abs", "(", "(", "eigen_values", "[", "1", "]", "/", "eigen_values", "[", "0", "]", ")", ")", "<", "1e-10", ":", "eigen_values", "[", "1", "]", "=", "0", "if", "eigen_values", "[", "0", "]", "<", "0", "or", "eigen_values", "[", "1", "]", "<", "0", ":", "raise", "ValueError", "(", "'neg val'", ")", "width", ",", "height", "=", "np", ".", "sqrt", "(", "np", ".", "max", "(", "eigen_values", ")", ")", ",", "np", ".", "sqrt", "(", "np", ".", "min", "(", "eigen_values", ")", ")", "e", "=", "Ellipse", "(", "xy", "=", "(", "x_values", "[", "i", "]", ",", "y_values", "[", "i", "]", ")", ",", "width", "=", "width", ",", "height", "=", "height", ",", "angle", "=", "np", ".", "degrees", "(", "angle", ")", ",", "*", "*", "ellipse_kwargs", ")", "ax", ".", "add_artist", "(", "e", ")", "else", ":", "if", "xerr", "is", "not", "None", ":", "if", "_issequence", "(", "xerr", ")", ":", "assert", "len", "(", "xerr", ")", "==", "2", ",", "\"if xerr is a sequence it should be of length 2\"", "xerr_values", "=", "[", "self", ".", "evaluate", "(", "xerr", "[", "0", "]", ",", "selection", "=", "selection", ")", ",", "self", ".", "evaluate", "(", "xerr", "[", "1", "]", ",", "selection", "=", "selection", ")", "]", "else", ":", "xerr_values", "=", "self", ".", "evaluate", "(", "xerr", ",", "selection", "=", "selection", ")", "if", "yerr", "is", "not", "None", ":", "if", "_issequence", "(", "yerr", ")", ":", "assert", "len", "(", "yerr", ")", "==", "2", ",", "\"if yerr is a sequence it should be of length 2\"", "yerr_values", "=", "[", "self", ".", "evaluate", "(", "yerr", "[", "0", "]", ",", "selection", "=", "selection", ")", ",", "self", ".", "evaluate", "(", "yerr", "[", "1", "]", ",", "selection", "=", "selection", ")", "]", "else", ":", "yerr_values", "=", "self", ".", "evaluate", "(", "yerr", ",", "selection", "=", "selection", ")", "if", "xerr_values", "is", "not", "None", "or", "yerr_values", "is", "not", "None", ":", "errorbar_kwargs", "=", "dict", "(", "errorbar_kwargs", ")", "errorbar_kwargs", "[", "'fmt'", "]", "=", "errorbar_kwargs", ".", "get", "(", "'fmt'", ",", "'none'", ")", "plt", ".", "errorbar", "(", "x_values", ",", "y_values", ",", "yerr", "=", "yerr_values", ",", "xerr", "=", "xerr_values", ",", "*", "*", "errorbar_kwargs", ")", "return", "s" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
plot
Viz data in a 2d histogram/heatmap. Declarative plotting of statistical plots using matplotlib, supports subplots, selections, layers. Instead of passing x and y, pass a list as x argument for multiple panels. Give what a list of options to have multiple panels. When both are present then will be origanized in a column/row order. This methods creates a 6 dimensional 'grid', where each dimension can map the a visual dimension. The grid dimensions are: * x: shape determined by shape, content by x argument or the first dimension of each space * y: ,, * z: related to the z argument * selection: shape equals length of selection argument * what: shape equals length of what argument * space: shape equals length of x argument if multiple values are given By default, this its shape is (1, 1, 1, 1, shape, shape) (where x is the last dimension) The visual dimensions are * x: x coordinate on a plot / image (default maps to grid's x) * y: y ,, (default maps to grid's y) * layer: each image in this dimension is blended togeher to one image (default maps to z) * fade: each image is shown faded after the next image (default mapt to selection) * row: rows of subplots (default maps to space) * columns: columns of subplot (default maps to what) All these mappings can be changes by the visual argument, some examples: >>> df.plot('x', 'y', what=['mean(x)', 'correlation(vx, vy)']) Will plot each 'what' as a column. >>> df.plot('x', 'y', selection=['FeH < -3', '(FeH >= -3) & (FeH < -2)'], visual=dict(column='selection')) Will plot each selection as a column, instead of a faded on top of each other. :param x: Expression to bin in the x direction (by default maps to x), or list of pairs, like [['x', 'y'], ['x', 'z']], if multiple pairs are given, this dimension maps to rows by default :param y: y (by default maps to y) :param z: Expression to bin in the z direction, followed by a :start,end,shape signature, like 'FeH:-3,1:5' will produce 5 layers between -10 and 10 (by default maps to layer) :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param reduce: :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param normalize: normalization function, currently only 'normalize' is supported :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param vmin: instead of automatic normalization, (using normalize and normalization_axis) scale the data between vmin and vmax to [0, 1] :param vmax: see vmin :param shape: shape/size of the n-D histogram grid :param limits: list of [[xmin, xmax], [ymin, ymax]], or a description such as 'minmax', '99%' :param grid: if the binning is done before by yourself, you can pass it :param colormap: matplotlib colormap to use :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: :param ylabel: :param aspect: :param tight_layout: call pylab.tight_layout or not :param colorbar: plot a colorbar or not :param interpolation: interpolation for imshow, possible options are: 'nearest', 'bilinear', 'bicubic', see matplotlib for more :param return_extra: :return:
packages/vaex-viz/vaex/viz/mpl.py
def plot(self, x=None, y=None, z=None, what="count(*)", vwhat=None, reduce=["colormap"], f=None, normalize="normalize", normalize_axis="what", vmin=None, vmax=None, shape=256, vshape=32, limits=None, grid=None, colormap="afmhot", # colors=["red", "green", "blue"], figsize=None, xlabel=None, ylabel=None, aspect="auto", tight_layout=True, interpolation="nearest", show=False, colorbar=True, colorbar_label=None, selection=None, selection_labels=None, title=None, background_color="white", pre_blend=False, background_alpha=1., visual=dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what"), smooth_pre=None, smooth_post=None, wrap=True, wrap_columns=4, return_extra=False, hardcopy=None): """Viz data in a 2d histogram/heatmap. Declarative plotting of statistical plots using matplotlib, supports subplots, selections, layers. Instead of passing x and y, pass a list as x argument for multiple panels. Give what a list of options to have multiple panels. When both are present then will be origanized in a column/row order. This methods creates a 6 dimensional 'grid', where each dimension can map the a visual dimension. The grid dimensions are: * x: shape determined by shape, content by x argument or the first dimension of each space * y: ,, * z: related to the z argument * selection: shape equals length of selection argument * what: shape equals length of what argument * space: shape equals length of x argument if multiple values are given By default, this its shape is (1, 1, 1, 1, shape, shape) (where x is the last dimension) The visual dimensions are * x: x coordinate on a plot / image (default maps to grid's x) * y: y ,, (default maps to grid's y) * layer: each image in this dimension is blended togeher to one image (default maps to z) * fade: each image is shown faded after the next image (default mapt to selection) * row: rows of subplots (default maps to space) * columns: columns of subplot (default maps to what) All these mappings can be changes by the visual argument, some examples: >>> df.plot('x', 'y', what=['mean(x)', 'correlation(vx, vy)']) Will plot each 'what' as a column. >>> df.plot('x', 'y', selection=['FeH < -3', '(FeH >= -3) & (FeH < -2)'], visual=dict(column='selection')) Will plot each selection as a column, instead of a faded on top of each other. :param x: Expression to bin in the x direction (by default maps to x), or list of pairs, like [['x', 'y'], ['x', 'z']], if multiple pairs are given, this dimension maps to rows by default :param y: y (by default maps to y) :param z: Expression to bin in the z direction, followed by a :start,end,shape signature, like 'FeH:-3,1:5' will produce 5 layers between -10 and 10 (by default maps to layer) :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param reduce: :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param normalize: normalization function, currently only 'normalize' is supported :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param vmin: instead of automatic normalization, (using normalize and normalization_axis) scale the data between vmin and vmax to [0, 1] :param vmax: see vmin :param shape: shape/size of the n-D histogram grid :param limits: list of [[xmin, xmax], [ymin, ymax]], or a description such as 'minmax', '99%' :param grid: if the binning is done before by yourself, you can pass it :param colormap: matplotlib colormap to use :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: :param ylabel: :param aspect: :param tight_layout: call pylab.tight_layout or not :param colorbar: plot a colorbar or not :param interpolation: interpolation for imshow, possible options are: 'nearest', 'bilinear', 'bicubic', see matplotlib for more :param return_extra: :return: """ import pylab import matplotlib n = _parse_n(normalize) if type(shape) == int: shape = (shape,) * 2 binby = [] x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) for expression in [y, x]: if expression is not None: binby = [expression] + binby fig = pylab.gcf() if figsize is not None: fig.set_size_inches(*figsize) import re what_units = None whats = _ensure_list(what) selections = _ensure_list(selection) selections = _ensure_strings_from_expressions(selections) if y is None: waslist, [x, ] = vaex.utils.listify(x) else: waslist, [x, y] = vaex.utils.listify(x, y) x = list(zip(x, y)) limits = [limits] # every plot has its own vwhat for now vwhats = _expand_limits(vwhat, len(x)) # TODO: we're abusing this function.. logger.debug("x: %s", x) limits, shape = self.limits(x, limits, shape=shape) shape = shape[0] logger.debug("limits: %r", limits) # mapping of a grid axis to a label labels = {} shape = _expand_shape(shape, 2) vshape = _expand_shape(shape, 2) if z is not None: match = re.match("(.*):(.*),(.*),(.*)", z) if match: groups = match.groups() import ast z_expression = groups[0] logger.debug("found groups: %r", list(groups)) z_limits = [ast.literal_eval(groups[1]), ast.literal_eval(groups[2])] z_shape = ast.literal_eval(groups[3]) # for pair in x: x = [[z_expression] + list(k) for k in x] limits = np.array([[z_limits] + list(k) for k in limits]) shape = (z_shape,) + shape vshape = (z_shape,) + vshape logger.debug("x = %r", x) values = np.linspace(z_limits[0], z_limits[1], num=z_shape + 1) labels["z"] = list(["%s <= %s < %s" % (v1, z_expression, v2) for v1, v2 in zip(values[:-1], values[1:])]) else: raise ValueError("Could not understand 'z' argument %r, expected something in form: 'column:-1,10:5'" % facet) else: z_shape = 1 # z == 1 if z is None: total_grid = np.zeros((len(x), len(whats), len(selections), 1) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections), 1) + vshape, dtype=float) else: total_grid = np.zeros((len(x), len(whats), len(selections)) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections)) + vshape, dtype=float) logger.debug("shape of total grid: %r", total_grid.shape) axis = dict(plot=0, what=1, selection=2) xlimits = limits grid_axes = dict(x=-1, y=-2, z=-3, selection=-4, what=-5, subspace=-6) visual_axes = dict(x=-1, y=-2, layer=-3, fade=-4, column=-5, row=-6) # visual_default=dict(x="x", y="y", z="layer", selection="fade", subspace="row", what="column") # visual: mapping of a plot axis, to a grid axis visual_default = dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what") def invert(x): return dict((v, k) for k, v in x.items()) # visual_default_reverse = invert(visual_default) # visual_ = visual_default # visual = dict(visual) # copy for modification # add entries to avoid mapping multiple times to the same axis free_visual_axes = list(visual_default.keys()) # visual_reverse = invert(visual) logger.debug("1: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual.items(): if visual_name in free_visual_axes: free_visual_axes.remove(visual_name) else: raise ValueError("visual axes %s used multiple times" % visual_name) logger.debug("2: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name in free_visual_axes and grid_name not in visual.values(): free_visual_axes.remove(visual_name) visual[visual_name] = grid_name logger.debug("3: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name not in free_visual_axes and grid_name not in visual.values(): visual[free_visual_axes.pop(0)] = grid_name logger.debug("4: %r %r", visual, free_visual_axes) visual_reverse = invert(visual) # TODO: the meaning of visual and visual_reverse is changed below this line, super confusing visual, visual_reverse = visual_reverse, visual # so now, visual: mapping of a grid axis to plot axis # visual_reverse: mapping of a grid axis to plot axis move = {} for grid_name, visual_name in visual.items(): if visual_axes[visual_name] in visual.values(): index = visual.values().find(visual_name) key = visual.keys()[index] raise ValueError("trying to map %s to %s while, it is already mapped by %s" % (grid_name, visual_name, key)) move[grid_axes[grid_name]] = visual_axes[visual_name] # normalize_axis = _ensure_list(normalize_axis) fs = _expand(f, total_grid.shape[grid_axes[normalize_axis]]) # assert len(vwhat) # labels["y"] = ylabels what_labels = [] if grid is None: grid_of_grids = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): grid_of_grids.append([]) for j, what in enumerate(whats): if isinstance(what, vaex.stat.Expression): grid = what.calculate(self, binby=binby, shape=shape, limits=limits, selection=selections, delay=True) else: what = what.strip() index = what.index("(") import re groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() if "," in arguments: arguments = arguments.split(",") functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max", "median_approx"] unit_expression = None if function in ["mean", "sum", "std", "min", "max", "median"]: unit_expression = arguments if function in ["var"]: unit_expression = "(%s) * (%s)" % (arguments, arguments) if function in ["covar"]: unit_expression = "(%s) * (%s)" % arguments if unit_expression: unit = self.unit(unit_expression) if unit: what_units = unit.to_string('latex_inline') if function in functions: grid = getattr(self, function)(arguments, binby=binby, limits=limits, shape=shape, selection=selections, delay=True) elif function == "count": grid = self.count(arguments, binby, shape=shape, limits=limits, selection=selections, delay=True) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) if i == 0: # and j == 0: what_label = str(whats[j]) if what_units: what_label += " (%s)" % what_units if fs[j]: what_label = fs[j] + " " + what_label what_labels.append(what_label) grid_of_grids[-1].append(grid) self.executor.execute() for i, (binby, limits) in enumerate(zip(x, xlimits)): for j, what in enumerate(whats): grid = grid_of_grids[i][j].get() total_grid[i, j, :, :] = grid[:, None, ...] labels["what"] = what_labels else: dims_left = 6 - len(grid.shape) total_grid = np.broadcast_to(grid, (1,) * dims_left + grid.shape) # visual=dict(x="x", y="y", selection="fade", subspace="facet1", what="facet2",) def _selection_name(name): if name in [None, False]: return "selection: all" elif name in ["default", True]: return "selection: default" else: return "selection: %s" % name if selection_labels is None: labels["selection"] = list([_selection_name(k) for k in selections]) else: labels["selection"] = selection_labels # visual_grid = np.moveaxis(total_grid, move.keys(), move.values()) # np.moveaxis is in np 1.11 only?, use transpose axes = [None] * len(move) for key, value in move.items(): axes[value] = key visual_grid = np.transpose(total_grid, axes) logger.debug("grid shape: %r", total_grid.shape) logger.debug("visual: %r", visual.items()) logger.debug("move: %r", move) logger.debug("visual grid shape: %r", visual_grid.shape) xexpressions = [] yexpressions = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): xexpressions.append(binby[0]) yexpressions.append(binby[1]) if xlabel is None: xlabels = [] ylabels = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): if z is not None: xlabels.append(self.label(binby[1])) ylabels.append(self.label(binby[2])) else: xlabels.append(self.label(binby[0])) ylabels.append(self.label(binby[1])) else: Nl = visual_grid.shape[visual_axes['row']] xlabels = _expand(xlabel, Nl) ylabels = _expand(ylabel, Nl) #labels[visual["x"]] = (xlabels, ylabels) labels["x"] = xlabels labels["y"] = ylabels # grid = total_grid # print(grid.shape) # grid = self.reduce(grid, ) axes = [] # cax = pylab.subplot(1,1,1) background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color)) # if grid.shape[axis["selection"]] > 1:# and not facet: # rgrid = vaex.image.fade(rgrid) # finite_mask = np.any(finite_mask, axis=0) # do we really need this # print(rgrid.shape) # facet_row_axis = axis["what"] import math facet_columns = None facets = visual_grid.shape[visual_axes["row"]] * visual_grid.shape[visual_axes["column"]] if visual_grid.shape[visual_axes["column"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["row"]]) wrapped = True elif visual_grid.shape[visual_axes["row"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["column"]]) wrapped = True else: wrapped = False facet_columns = visual_grid.shape[visual_axes["column"]] facet_rows = int(math.ceil(facets / facet_columns)) logger.debug("facet_rows: %r", facet_rows) logger.debug("facet_columns: %r", facet_columns) # if visual_grid.shape[visual_axes["row"]] > 1: # and not wrap: # #facet_row_axis = axis["what"] # facet_columns = visual_grid.shape[visual_axes["column"]] # else: # facet_columns = min(wrap_columns, facets) # if grid.shape[axis["plot"]] > 1:# and not facet: # this loop could be done using axis arguments everywhere # assert len(normalize_axis) == 1, "currently only 1 normalization axis supported" grid = visual_grid * 1. fgrid = visual_grid * 1. ngrid = visual_grid * 1. # colorgrid = np.zeros(ngrid.shape + (4,), float) # print "norma", normalize_axis, visual_grid.shape[visual_axes[visual[normalize_axis]]] vmins = _expand(vmin, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) vmaxs = _expand(vmax, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) # for name in normalize_axis: visual_grid if smooth_pre: grid = vaex.grids.gf(grid, smooth_pre) if 1: axis = visual_axes[visual[normalize_axis]] for i in range(visual_grid.shape[axis]): item = [slice(None, None, None), ] * len(visual_grid.shape) item[axis] = i item = tuple(item) f = _parse_f(fs[i]) with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex fgrid.__setitem__(item, f(grid.__getitem__(item))) # print vmins[i], vmaxs[i] if vmins[i] is not None and vmaxs[i] is not None: nsubgrid = fgrid.__getitem__(item) * 1 nsubgrid -= vmins[i] nsubgrid /= (vmaxs[i] - vmins[i]) else: nsubgrid, vmin, vmax = n(fgrid.__getitem__(item)) vmins[i] = vmin vmaxs[i] = vmax # print " ", vmins[i], vmaxs[i] ngrid.__setitem__(item, nsubgrid) if 0: # TODO: above should be like the code below, with custom vmin and vmax grid = visual_grid[i] f = _parse_f(fs[i]) fgrid = f(grid) finite_mask = np.isfinite(grid) finite_mask = np.any(finite_mask, axis=0) if vmin is not None and vmax is not None: ngrid = fgrid * 1 ngrid -= vmin ngrid /= (vmax - vmin) ngrid = np.clip(ngrid, 0, 1) else: ngrid, vmin, vmax = n(fgrid) # vmin, vmax = np.nanmin(fgrid), np.nanmax(fgrid) # every 'what', should have its own colorbar, check if what corresponds to # rows or columns in facets, if so, do a colorbar per row or per column rows, columns = int(math.ceil(facets / float(facet_columns))), facet_columns colorbar_location = "individual" if visual["what"] == "row" and visual_grid.shape[1] == facet_columns: colorbar_location = "per_row" if visual["what"] == "column" and visual_grid.shape[0] == facet_rows: colorbar_location = "per_column" # values = np.linspace(facet_limits[0], facet_limits[1], facet_count+1) logger.debug("rows: %r, columns: %r", rows, columns) import matplotlib.gridspec as gridspec column_scale = 1 row_scale = 1 row_offset = 0 if facets > 1: if colorbar_location == "per_row": column_scale = 4 gs = gridspec.GridSpec(rows, columns * column_scale + 1) elif colorbar_location == "per_column": row_offset = 1 row_scale = 4 gs = gridspec.GridSpec(rows * row_scale + 1, columns) else: gs = gridspec.GridSpec(rows, columns) facet_index = 0 fs = _expand(f, len(whats)) colormaps = _expand(colormap, len(whats)) # row for i in range(visual_grid.shape[0]): # column for j in range(visual_grid.shape[1]): if colorbar and colorbar_location == "per_column" and i == 0: norm = matplotlib.colors.Normalize(vmins[j], vmaxs[j]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[j]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[0, j]) colorbar = fig.colorbar(sm, cax=ax, orientation="horizontal") else: colorbar = fig.colorbar(sm) if "what" in labels: label = labels["what"][j] if facets > 1: colorbar.ax.set_title(label) else: colorbar.ax.set_ylabel(colorbar_label or label) if colorbar and colorbar_location == "per_row" and j == 0: norm = matplotlib.colors.Normalize(vmins[i], vmaxs[i]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[i]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[i, -1]) colorbar = fig.colorbar(sm, cax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][i] colorbar.ax.set_ylabel(colorbar_label or label) rgrid = ngrid[i, j] * 1. # print rgrid.shape for k in range(rgrid.shape[0]): for l in range(rgrid.shape[0]): if smooth_post is not None: rgrid[k, l] = vaex.grids.gf(rgrid, smooth_post) if visual["what"] == "column": what_index = j elif visual["what"] == "row": what_index = i else: what_index = 0 if visual[normalize_axis] == "column": normalize_index = j elif visual[normalize_axis] == "row": normalize_index = i else: normalize_index = 0 for r in reduce: r = _parse_reduction(r, colormaps[what_index], []) rgrid = r(rgrid) row = facet_index // facet_columns column = facet_index % facet_columns if colorbar and colorbar_location == "individual": # visual_grid.shape[visual_axes[visual[normalize_axis]]] norm = matplotlib.colors.Normalize(vmins[normalize_index], vmaxs[normalize_index]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[what_index]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[row, column]) colorbar = fig.colorbar(sm, ax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][what_index] colorbar.ax.set_ylabel(colorbar_label or label) if facets > 1: ax = pylab.subplot(gs[row_offset + row * row_scale:row_offset + (row + 1) * row_scale, column * column_scale:(column + 1) * column_scale]) else: ax = pylab.gca() axes.append(ax) logger.debug("rgrid: %r", rgrid.shape) plot_rgrid = rgrid assert plot_rgrid.shape[1] == 1, "no layers supported yet" plot_rgrid = plot_rgrid[:, 0] if plot_rgrid.shape[0] > 1: plot_rgrid = vaex.image.fade(plot_rgrid[::-1]) else: plot_rgrid = plot_rgrid[0] extend = None if visual["subspace"] == "row": subplot_index = i elif visual["subspace"] == "column": subplot_index = j else: subplot_index = 0 extend = np.array(xlimits[subplot_index][-2:]).flatten() # extend = np.array(xlimits[i]).flatten() logger.debug("plot rgrid: %r", plot_rgrid.shape) plot_rgrid = np.transpose(plot_rgrid, (1, 0, 2)) im = ax.imshow(plot_rgrid, extent=extend.tolist(), origin="lower", aspect=aspect, interpolation=interpolation) # v1, v2 = values[i], values[i+1] def label(index, label, expression): if label and _issequence(label): return label[i] else: return self.label(expression) if visual_reverse["x"] =='x': labelsx = labels['x'] pylab.xlabel(labelsx[subplot_index]) if visual_reverse["x"] =='x': labelsy = labels['y'] pylab.ylabel(labelsy[subplot_index]) if visual["z"] in ['row']: labelsz = labels['z'] ax.set_title(labelsz[i]) if visual["z"] in ['column']: labelsz = labels['z'] ax.set_title(labelsz[j]) max_labels = 10 # xexpression = xexpressions[i] # if self.iscategory(xexpression): # labels = self.category_labels(xexpression) # step = len(labels) // max_labels # pylab.xticks(np.arange(len(labels))[::step], labels[::step], size='small') # yexpression = yexpressions[i] # if self.iscategory(yexpression): # labels = self.category_labels(yexpression) # step = len(labels) // max_labels # pylab.yticks(np.arange(len(labels))[::step], labels[::step], size='small') facet_index += 1 if title: fig.suptitle(title, fontsize="x-large") if tight_layout: if title: pylab.tight_layout(rect=[0, 0.03, 1, 0.95]) else: pylab.tight_layout() if hardcopy: pylab.savefig(hardcopy) if show: pylab.show() if return_extra: return im, grid, fgrid, ngrid, rgrid else: return im
def plot(self, x=None, y=None, z=None, what="count(*)", vwhat=None, reduce=["colormap"], f=None, normalize="normalize", normalize_axis="what", vmin=None, vmax=None, shape=256, vshape=32, limits=None, grid=None, colormap="afmhot", # colors=["red", "green", "blue"], figsize=None, xlabel=None, ylabel=None, aspect="auto", tight_layout=True, interpolation="nearest", show=False, colorbar=True, colorbar_label=None, selection=None, selection_labels=None, title=None, background_color="white", pre_blend=False, background_alpha=1., visual=dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what"), smooth_pre=None, smooth_post=None, wrap=True, wrap_columns=4, return_extra=False, hardcopy=None): """Viz data in a 2d histogram/heatmap. Declarative plotting of statistical plots using matplotlib, supports subplots, selections, layers. Instead of passing x and y, pass a list as x argument for multiple panels. Give what a list of options to have multiple panels. When both are present then will be origanized in a column/row order. This methods creates a 6 dimensional 'grid', where each dimension can map the a visual dimension. The grid dimensions are: * x: shape determined by shape, content by x argument or the first dimension of each space * y: ,, * z: related to the z argument * selection: shape equals length of selection argument * what: shape equals length of what argument * space: shape equals length of x argument if multiple values are given By default, this its shape is (1, 1, 1, 1, shape, shape) (where x is the last dimension) The visual dimensions are * x: x coordinate on a plot / image (default maps to grid's x) * y: y ,, (default maps to grid's y) * layer: each image in this dimension is blended togeher to one image (default maps to z) * fade: each image is shown faded after the next image (default mapt to selection) * row: rows of subplots (default maps to space) * columns: columns of subplot (default maps to what) All these mappings can be changes by the visual argument, some examples: >>> df.plot('x', 'y', what=['mean(x)', 'correlation(vx, vy)']) Will plot each 'what' as a column. >>> df.plot('x', 'y', selection=['FeH < -3', '(FeH >= -3) & (FeH < -2)'], visual=dict(column='selection')) Will plot each selection as a column, instead of a faded on top of each other. :param x: Expression to bin in the x direction (by default maps to x), or list of pairs, like [['x', 'y'], ['x', 'z']], if multiple pairs are given, this dimension maps to rows by default :param y: y (by default maps to y) :param z: Expression to bin in the z direction, followed by a :start,end,shape signature, like 'FeH:-3,1:5' will produce 5 layers between -10 and 10 (by default maps to layer) :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param reduce: :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param normalize: normalization function, currently only 'normalize' is supported :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param vmin: instead of automatic normalization, (using normalize and normalization_axis) scale the data between vmin and vmax to [0, 1] :param vmax: see vmin :param shape: shape/size of the n-D histogram grid :param limits: list of [[xmin, xmax], [ymin, ymax]], or a description such as 'minmax', '99%' :param grid: if the binning is done before by yourself, you can pass it :param colormap: matplotlib colormap to use :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: :param ylabel: :param aspect: :param tight_layout: call pylab.tight_layout or not :param colorbar: plot a colorbar or not :param interpolation: interpolation for imshow, possible options are: 'nearest', 'bilinear', 'bicubic', see matplotlib for more :param return_extra: :return: """ import pylab import matplotlib n = _parse_n(normalize) if type(shape) == int: shape = (shape,) * 2 binby = [] x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) for expression in [y, x]: if expression is not None: binby = [expression] + binby fig = pylab.gcf() if figsize is not None: fig.set_size_inches(*figsize) import re what_units = None whats = _ensure_list(what) selections = _ensure_list(selection) selections = _ensure_strings_from_expressions(selections) if y is None: waslist, [x, ] = vaex.utils.listify(x) else: waslist, [x, y] = vaex.utils.listify(x, y) x = list(zip(x, y)) limits = [limits] # every plot has its own vwhat for now vwhats = _expand_limits(vwhat, len(x)) # TODO: we're abusing this function.. logger.debug("x: %s", x) limits, shape = self.limits(x, limits, shape=shape) shape = shape[0] logger.debug("limits: %r", limits) # mapping of a grid axis to a label labels = {} shape = _expand_shape(shape, 2) vshape = _expand_shape(shape, 2) if z is not None: match = re.match("(.*):(.*),(.*),(.*)", z) if match: groups = match.groups() import ast z_expression = groups[0] logger.debug("found groups: %r", list(groups)) z_limits = [ast.literal_eval(groups[1]), ast.literal_eval(groups[2])] z_shape = ast.literal_eval(groups[3]) # for pair in x: x = [[z_expression] + list(k) for k in x] limits = np.array([[z_limits] + list(k) for k in limits]) shape = (z_shape,) + shape vshape = (z_shape,) + vshape logger.debug("x = %r", x) values = np.linspace(z_limits[0], z_limits[1], num=z_shape + 1) labels["z"] = list(["%s <= %s < %s" % (v1, z_expression, v2) for v1, v2 in zip(values[:-1], values[1:])]) else: raise ValueError("Could not understand 'z' argument %r, expected something in form: 'column:-1,10:5'" % facet) else: z_shape = 1 # z == 1 if z is None: total_grid = np.zeros((len(x), len(whats), len(selections), 1) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections), 1) + vshape, dtype=float) else: total_grid = np.zeros((len(x), len(whats), len(selections)) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections)) + vshape, dtype=float) logger.debug("shape of total grid: %r", total_grid.shape) axis = dict(plot=0, what=1, selection=2) xlimits = limits grid_axes = dict(x=-1, y=-2, z=-3, selection=-4, what=-5, subspace=-6) visual_axes = dict(x=-1, y=-2, layer=-3, fade=-4, column=-5, row=-6) # visual_default=dict(x="x", y="y", z="layer", selection="fade", subspace="row", what="column") # visual: mapping of a plot axis, to a grid axis visual_default = dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what") def invert(x): return dict((v, k) for k, v in x.items()) # visual_default_reverse = invert(visual_default) # visual_ = visual_default # visual = dict(visual) # copy for modification # add entries to avoid mapping multiple times to the same axis free_visual_axes = list(visual_default.keys()) # visual_reverse = invert(visual) logger.debug("1: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual.items(): if visual_name in free_visual_axes: free_visual_axes.remove(visual_name) else: raise ValueError("visual axes %s used multiple times" % visual_name) logger.debug("2: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name in free_visual_axes and grid_name not in visual.values(): free_visual_axes.remove(visual_name) visual[visual_name] = grid_name logger.debug("3: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name not in free_visual_axes and grid_name not in visual.values(): visual[free_visual_axes.pop(0)] = grid_name logger.debug("4: %r %r", visual, free_visual_axes) visual_reverse = invert(visual) # TODO: the meaning of visual and visual_reverse is changed below this line, super confusing visual, visual_reverse = visual_reverse, visual # so now, visual: mapping of a grid axis to plot axis # visual_reverse: mapping of a grid axis to plot axis move = {} for grid_name, visual_name in visual.items(): if visual_axes[visual_name] in visual.values(): index = visual.values().find(visual_name) key = visual.keys()[index] raise ValueError("trying to map %s to %s while, it is already mapped by %s" % (grid_name, visual_name, key)) move[grid_axes[grid_name]] = visual_axes[visual_name] # normalize_axis = _ensure_list(normalize_axis) fs = _expand(f, total_grid.shape[grid_axes[normalize_axis]]) # assert len(vwhat) # labels["y"] = ylabels what_labels = [] if grid is None: grid_of_grids = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): grid_of_grids.append([]) for j, what in enumerate(whats): if isinstance(what, vaex.stat.Expression): grid = what.calculate(self, binby=binby, shape=shape, limits=limits, selection=selections, delay=True) else: what = what.strip() index = what.index("(") import re groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() if "," in arguments: arguments = arguments.split(",") functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max", "median_approx"] unit_expression = None if function in ["mean", "sum", "std", "min", "max", "median"]: unit_expression = arguments if function in ["var"]: unit_expression = "(%s) * (%s)" % (arguments, arguments) if function in ["covar"]: unit_expression = "(%s) * (%s)" % arguments if unit_expression: unit = self.unit(unit_expression) if unit: what_units = unit.to_string('latex_inline') if function in functions: grid = getattr(self, function)(arguments, binby=binby, limits=limits, shape=shape, selection=selections, delay=True) elif function == "count": grid = self.count(arguments, binby, shape=shape, limits=limits, selection=selections, delay=True) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) if i == 0: # and j == 0: what_label = str(whats[j]) if what_units: what_label += " (%s)" % what_units if fs[j]: what_label = fs[j] + " " + what_label what_labels.append(what_label) grid_of_grids[-1].append(grid) self.executor.execute() for i, (binby, limits) in enumerate(zip(x, xlimits)): for j, what in enumerate(whats): grid = grid_of_grids[i][j].get() total_grid[i, j, :, :] = grid[:, None, ...] labels["what"] = what_labels else: dims_left = 6 - len(grid.shape) total_grid = np.broadcast_to(grid, (1,) * dims_left + grid.shape) # visual=dict(x="x", y="y", selection="fade", subspace="facet1", what="facet2",) def _selection_name(name): if name in [None, False]: return "selection: all" elif name in ["default", True]: return "selection: default" else: return "selection: %s" % name if selection_labels is None: labels["selection"] = list([_selection_name(k) for k in selections]) else: labels["selection"] = selection_labels # visual_grid = np.moveaxis(total_grid, move.keys(), move.values()) # np.moveaxis is in np 1.11 only?, use transpose axes = [None] * len(move) for key, value in move.items(): axes[value] = key visual_grid = np.transpose(total_grid, axes) logger.debug("grid shape: %r", total_grid.shape) logger.debug("visual: %r", visual.items()) logger.debug("move: %r", move) logger.debug("visual grid shape: %r", visual_grid.shape) xexpressions = [] yexpressions = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): xexpressions.append(binby[0]) yexpressions.append(binby[1]) if xlabel is None: xlabels = [] ylabels = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): if z is not None: xlabels.append(self.label(binby[1])) ylabels.append(self.label(binby[2])) else: xlabels.append(self.label(binby[0])) ylabels.append(self.label(binby[1])) else: Nl = visual_grid.shape[visual_axes['row']] xlabels = _expand(xlabel, Nl) ylabels = _expand(ylabel, Nl) #labels[visual["x"]] = (xlabels, ylabels) labels["x"] = xlabels labels["y"] = ylabels # grid = total_grid # print(grid.shape) # grid = self.reduce(grid, ) axes = [] # cax = pylab.subplot(1,1,1) background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color)) # if grid.shape[axis["selection"]] > 1:# and not facet: # rgrid = vaex.image.fade(rgrid) # finite_mask = np.any(finite_mask, axis=0) # do we really need this # print(rgrid.shape) # facet_row_axis = axis["what"] import math facet_columns = None facets = visual_grid.shape[visual_axes["row"]] * visual_grid.shape[visual_axes["column"]] if visual_grid.shape[visual_axes["column"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["row"]]) wrapped = True elif visual_grid.shape[visual_axes["row"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["column"]]) wrapped = True else: wrapped = False facet_columns = visual_grid.shape[visual_axes["column"]] facet_rows = int(math.ceil(facets / facet_columns)) logger.debug("facet_rows: %r", facet_rows) logger.debug("facet_columns: %r", facet_columns) # if visual_grid.shape[visual_axes["row"]] > 1: # and not wrap: # #facet_row_axis = axis["what"] # facet_columns = visual_grid.shape[visual_axes["column"]] # else: # facet_columns = min(wrap_columns, facets) # if grid.shape[axis["plot"]] > 1:# and not facet: # this loop could be done using axis arguments everywhere # assert len(normalize_axis) == 1, "currently only 1 normalization axis supported" grid = visual_grid * 1. fgrid = visual_grid * 1. ngrid = visual_grid * 1. # colorgrid = np.zeros(ngrid.shape + (4,), float) # print "norma", normalize_axis, visual_grid.shape[visual_axes[visual[normalize_axis]]] vmins = _expand(vmin, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) vmaxs = _expand(vmax, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) # for name in normalize_axis: visual_grid if smooth_pre: grid = vaex.grids.gf(grid, smooth_pre) if 1: axis = visual_axes[visual[normalize_axis]] for i in range(visual_grid.shape[axis]): item = [slice(None, None, None), ] * len(visual_grid.shape) item[axis] = i item = tuple(item) f = _parse_f(fs[i]) with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex fgrid.__setitem__(item, f(grid.__getitem__(item))) # print vmins[i], vmaxs[i] if vmins[i] is not None and vmaxs[i] is not None: nsubgrid = fgrid.__getitem__(item) * 1 nsubgrid -= vmins[i] nsubgrid /= (vmaxs[i] - vmins[i]) else: nsubgrid, vmin, vmax = n(fgrid.__getitem__(item)) vmins[i] = vmin vmaxs[i] = vmax # print " ", vmins[i], vmaxs[i] ngrid.__setitem__(item, nsubgrid) if 0: # TODO: above should be like the code below, with custom vmin and vmax grid = visual_grid[i] f = _parse_f(fs[i]) fgrid = f(grid) finite_mask = np.isfinite(grid) finite_mask = np.any(finite_mask, axis=0) if vmin is not None and vmax is not None: ngrid = fgrid * 1 ngrid -= vmin ngrid /= (vmax - vmin) ngrid = np.clip(ngrid, 0, 1) else: ngrid, vmin, vmax = n(fgrid) # vmin, vmax = np.nanmin(fgrid), np.nanmax(fgrid) # every 'what', should have its own colorbar, check if what corresponds to # rows or columns in facets, if so, do a colorbar per row or per column rows, columns = int(math.ceil(facets / float(facet_columns))), facet_columns colorbar_location = "individual" if visual["what"] == "row" and visual_grid.shape[1] == facet_columns: colorbar_location = "per_row" if visual["what"] == "column" and visual_grid.shape[0] == facet_rows: colorbar_location = "per_column" # values = np.linspace(facet_limits[0], facet_limits[1], facet_count+1) logger.debug("rows: %r, columns: %r", rows, columns) import matplotlib.gridspec as gridspec column_scale = 1 row_scale = 1 row_offset = 0 if facets > 1: if colorbar_location == "per_row": column_scale = 4 gs = gridspec.GridSpec(rows, columns * column_scale + 1) elif colorbar_location == "per_column": row_offset = 1 row_scale = 4 gs = gridspec.GridSpec(rows * row_scale + 1, columns) else: gs = gridspec.GridSpec(rows, columns) facet_index = 0 fs = _expand(f, len(whats)) colormaps = _expand(colormap, len(whats)) # row for i in range(visual_grid.shape[0]): # column for j in range(visual_grid.shape[1]): if colorbar and colorbar_location == "per_column" and i == 0: norm = matplotlib.colors.Normalize(vmins[j], vmaxs[j]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[j]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[0, j]) colorbar = fig.colorbar(sm, cax=ax, orientation="horizontal") else: colorbar = fig.colorbar(sm) if "what" in labels: label = labels["what"][j] if facets > 1: colorbar.ax.set_title(label) else: colorbar.ax.set_ylabel(colorbar_label or label) if colorbar and colorbar_location == "per_row" and j == 0: norm = matplotlib.colors.Normalize(vmins[i], vmaxs[i]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[i]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[i, -1]) colorbar = fig.colorbar(sm, cax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][i] colorbar.ax.set_ylabel(colorbar_label or label) rgrid = ngrid[i, j] * 1. # print rgrid.shape for k in range(rgrid.shape[0]): for l in range(rgrid.shape[0]): if smooth_post is not None: rgrid[k, l] = vaex.grids.gf(rgrid, smooth_post) if visual["what"] == "column": what_index = j elif visual["what"] == "row": what_index = i else: what_index = 0 if visual[normalize_axis] == "column": normalize_index = j elif visual[normalize_axis] == "row": normalize_index = i else: normalize_index = 0 for r in reduce: r = _parse_reduction(r, colormaps[what_index], []) rgrid = r(rgrid) row = facet_index // facet_columns column = facet_index % facet_columns if colorbar and colorbar_location == "individual": # visual_grid.shape[visual_axes[visual[normalize_axis]]] norm = matplotlib.colors.Normalize(vmins[normalize_index], vmaxs[normalize_index]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[what_index]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[row, column]) colorbar = fig.colorbar(sm, ax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][what_index] colorbar.ax.set_ylabel(colorbar_label or label) if facets > 1: ax = pylab.subplot(gs[row_offset + row * row_scale:row_offset + (row + 1) * row_scale, column * column_scale:(column + 1) * column_scale]) else: ax = pylab.gca() axes.append(ax) logger.debug("rgrid: %r", rgrid.shape) plot_rgrid = rgrid assert plot_rgrid.shape[1] == 1, "no layers supported yet" plot_rgrid = plot_rgrid[:, 0] if plot_rgrid.shape[0] > 1: plot_rgrid = vaex.image.fade(plot_rgrid[::-1]) else: plot_rgrid = plot_rgrid[0] extend = None if visual["subspace"] == "row": subplot_index = i elif visual["subspace"] == "column": subplot_index = j else: subplot_index = 0 extend = np.array(xlimits[subplot_index][-2:]).flatten() # extend = np.array(xlimits[i]).flatten() logger.debug("plot rgrid: %r", plot_rgrid.shape) plot_rgrid = np.transpose(plot_rgrid, (1, 0, 2)) im = ax.imshow(plot_rgrid, extent=extend.tolist(), origin="lower", aspect=aspect, interpolation=interpolation) # v1, v2 = values[i], values[i+1] def label(index, label, expression): if label and _issequence(label): return label[i] else: return self.label(expression) if visual_reverse["x"] =='x': labelsx = labels['x'] pylab.xlabel(labelsx[subplot_index]) if visual_reverse["x"] =='x': labelsy = labels['y'] pylab.ylabel(labelsy[subplot_index]) if visual["z"] in ['row']: labelsz = labels['z'] ax.set_title(labelsz[i]) if visual["z"] in ['column']: labelsz = labels['z'] ax.set_title(labelsz[j]) max_labels = 10 # xexpression = xexpressions[i] # if self.iscategory(xexpression): # labels = self.category_labels(xexpression) # step = len(labels) // max_labels # pylab.xticks(np.arange(len(labels))[::step], labels[::step], size='small') # yexpression = yexpressions[i] # if self.iscategory(yexpression): # labels = self.category_labels(yexpression) # step = len(labels) // max_labels # pylab.yticks(np.arange(len(labels))[::step], labels[::step], size='small') facet_index += 1 if title: fig.suptitle(title, fontsize="x-large") if tight_layout: if title: pylab.tight_layout(rect=[0, 0.03, 1, 0.95]) else: pylab.tight_layout() if hardcopy: pylab.savefig(hardcopy) if show: pylab.show() if return_extra: return im, grid, fgrid, ngrid, rgrid else: return im
[ "Viz", "data", "in", "a", "2d", "histogram", "/", "heatmap", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-viz/vaex/viz/mpl.py#L290-L848
[ "def", "plot", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ",", "z", "=", "None", ",", "what", "=", "\"count(*)\"", ",", "vwhat", "=", "None", ",", "reduce", "=", "[", "\"colormap\"", "]", ",", "f", "=", "None", ",", "normalize", "=", "\"normalize\"", ",", "normalize_axis", "=", "\"what\"", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "shape", "=", "256", ",", "vshape", "=", "32", ",", "limits", "=", "None", ",", "grid", "=", "None", ",", "colormap", "=", "\"afmhot\"", ",", "# colors=[\"red\", \"green\", \"blue\"],", "figsize", "=", "None", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "aspect", "=", "\"auto\"", ",", "tight_layout", "=", "True", ",", "interpolation", "=", "\"nearest\"", ",", "show", "=", "False", ",", "colorbar", "=", "True", ",", "colorbar_label", "=", "None", ",", "selection", "=", "None", ",", "selection_labels", "=", "None", ",", "title", "=", "None", ",", "background_color", "=", "\"white\"", ",", "pre_blend", "=", "False", ",", "background_alpha", "=", "1.", ",", "visual", "=", "dict", "(", "x", "=", "\"x\"", ",", "y", "=", "\"y\"", ",", "layer", "=", "\"z\"", ",", "fade", "=", "\"selection\"", ",", "row", "=", "\"subspace\"", ",", "column", "=", "\"what\"", ")", ",", "smooth_pre", "=", "None", ",", "smooth_post", "=", "None", ",", "wrap", "=", "True", ",", "wrap_columns", "=", "4", ",", "return_extra", "=", "False", ",", "hardcopy", "=", "None", ")", ":", "import", "pylab", "import", "matplotlib", "n", "=", "_parse_n", "(", "normalize", ")", "if", "type", "(", "shape", ")", "==", "int", ":", "shape", "=", "(", "shape", ",", ")", "*", "2", "binby", "=", "[", "]", "x", "=", "_ensure_strings_from_expressions", "(", "x", ")", "y", "=", "_ensure_strings_from_expressions", "(", "y", ")", "for", "expression", "in", "[", "y", ",", "x", "]", ":", "if", "expression", "is", "not", "None", ":", "binby", "=", "[", "expression", "]", "+", "binby", "fig", "=", "pylab", ".", "gcf", "(", ")", "if", "figsize", "is", "not", "None", ":", "fig", ".", "set_size_inches", "(", "*", "figsize", ")", "import", "re", "what_units", "=", "None", "whats", "=", "_ensure_list", "(", "what", ")", "selections", "=", "_ensure_list", "(", "selection", ")", "selections", "=", "_ensure_strings_from_expressions", "(", "selections", ")", "if", "y", "is", "None", ":", "waslist", ",", "[", "x", ",", "]", "=", "vaex", ".", "utils", ".", "listify", "(", "x", ")", "else", ":", "waslist", ",", "[", "x", ",", "y", "]", "=", "vaex", ".", "utils", ".", "listify", "(", "x", ",", "y", ")", "x", "=", "list", "(", "zip", "(", "x", ",", "y", ")", ")", "limits", "=", "[", "limits", "]", "# every plot has its own vwhat for now", "vwhats", "=", "_expand_limits", "(", "vwhat", ",", "len", "(", "x", ")", ")", "# TODO: we're abusing this function..", "logger", ".", "debug", "(", "\"x: %s\"", ",", "x", ")", "limits", ",", "shape", "=", "self", ".", "limits", "(", "x", ",", "limits", ",", "shape", "=", "shape", ")", "shape", "=", "shape", "[", "0", "]", "logger", ".", "debug", "(", "\"limits: %r\"", ",", "limits", ")", "# mapping of a grid axis to a label", "labels", "=", "{", "}", "shape", "=", "_expand_shape", "(", "shape", ",", "2", ")", "vshape", "=", "_expand_shape", "(", "shape", ",", "2", ")", "if", "z", "is", "not", "None", ":", "match", "=", "re", ".", "match", "(", "\"(.*):(.*),(.*),(.*)\"", ",", "z", ")", "if", "match", ":", "groups", "=", "match", ".", "groups", "(", ")", "import", "ast", "z_expression", "=", "groups", "[", "0", "]", "logger", ".", "debug", "(", "\"found groups: %r\"", ",", "list", "(", "groups", ")", ")", "z_limits", "=", "[", "ast", ".", "literal_eval", "(", "groups", "[", "1", "]", ")", ",", "ast", ".", "literal_eval", "(", "groups", "[", "2", "]", ")", "]", "z_shape", "=", "ast", ".", "literal_eval", "(", "groups", "[", "3", "]", ")", "# for pair in x:", "x", "=", "[", "[", "z_expression", "]", "+", "list", "(", "k", ")", "for", "k", "in", "x", "]", "limits", "=", "np", ".", "array", "(", "[", "[", "z_limits", "]", "+", "list", "(", "k", ")", "for", "k", "in", "limits", "]", ")", "shape", "=", "(", "z_shape", ",", ")", "+", "shape", "vshape", "=", "(", "z_shape", ",", ")", "+", "vshape", "logger", ".", "debug", "(", "\"x = %r\"", ",", "x", ")", "values", "=", "np", ".", "linspace", "(", "z_limits", "[", "0", "]", ",", "z_limits", "[", "1", "]", ",", "num", "=", "z_shape", "+", "1", ")", "labels", "[", "\"z\"", "]", "=", "list", "(", "[", "\"%s <= %s < %s\"", "%", "(", "v1", ",", "z_expression", ",", "v2", ")", "for", "v1", ",", "v2", "in", "zip", "(", "values", "[", ":", "-", "1", "]", ",", "values", "[", "1", ":", "]", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Could not understand 'z' argument %r, expected something in form: 'column:-1,10:5'\"", "%", "facet", ")", "else", ":", "z_shape", "=", "1", "# z == 1", "if", "z", "is", "None", ":", "total_grid", "=", "np", ".", "zeros", "(", "(", "len", "(", "x", ")", ",", "len", "(", "whats", ")", ",", "len", "(", "selections", ")", ",", "1", ")", "+", "shape", ",", "dtype", "=", "float", ")", "total_vgrid", "=", "np", ".", "zeros", "(", "(", "len", "(", "x", ")", ",", "len", "(", "whats", ")", ",", "len", "(", "selections", ")", ",", "1", ")", "+", "vshape", ",", "dtype", "=", "float", ")", "else", ":", "total_grid", "=", "np", ".", "zeros", "(", "(", "len", "(", "x", ")", ",", "len", "(", "whats", ")", ",", "len", "(", "selections", ")", ")", "+", "shape", ",", "dtype", "=", "float", ")", "total_vgrid", "=", "np", ".", "zeros", "(", "(", "len", "(", "x", ")", ",", "len", "(", "whats", ")", ",", "len", "(", "selections", ")", ")", "+", "vshape", ",", "dtype", "=", "float", ")", "logger", ".", "debug", "(", "\"shape of total grid: %r\"", ",", "total_grid", ".", "shape", ")", "axis", "=", "dict", "(", "plot", "=", "0", ",", "what", "=", "1", ",", "selection", "=", "2", ")", "xlimits", "=", "limits", "grid_axes", "=", "dict", "(", "x", "=", "-", "1", ",", "y", "=", "-", "2", ",", "z", "=", "-", "3", ",", "selection", "=", "-", "4", ",", "what", "=", "-", "5", ",", "subspace", "=", "-", "6", ")", "visual_axes", "=", "dict", "(", "x", "=", "-", "1", ",", "y", "=", "-", "2", ",", "layer", "=", "-", "3", ",", "fade", "=", "-", "4", ",", "column", "=", "-", "5", ",", "row", "=", "-", "6", ")", "# visual_default=dict(x=\"x\", y=\"y\", z=\"layer\", selection=\"fade\", subspace=\"row\", what=\"column\")", "# visual: mapping of a plot axis, to a grid axis", "visual_default", "=", "dict", "(", "x", "=", "\"x\"", ",", "y", "=", "\"y\"", ",", "layer", "=", "\"z\"", ",", "fade", "=", "\"selection\"", ",", "row", "=", "\"subspace\"", ",", "column", "=", "\"what\"", ")", "def", "invert", "(", "x", ")", ":", "return", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", ")", "# visual_default_reverse = invert(visual_default)", "# visual_ = visual_default", "# visual = dict(visual) # copy for modification", "# add entries to avoid mapping multiple times to the same axis", "free_visual_axes", "=", "list", "(", "visual_default", ".", "keys", "(", ")", ")", "# visual_reverse = invert(visual)", "logger", ".", "debug", "(", "\"1: %r %r\"", ",", "visual", ",", "free_visual_axes", ")", "for", "visual_name", ",", "grid_name", "in", "visual", ".", "items", "(", ")", ":", "if", "visual_name", "in", "free_visual_axes", ":", "free_visual_axes", ".", "remove", "(", "visual_name", ")", "else", ":", "raise", "ValueError", "(", "\"visual axes %s used multiple times\"", "%", "visual_name", ")", "logger", ".", "debug", "(", "\"2: %r %r\"", ",", "visual", ",", "free_visual_axes", ")", "for", "visual_name", ",", "grid_name", "in", "visual_default", ".", "items", "(", ")", ":", "if", "visual_name", "in", "free_visual_axes", "and", "grid_name", "not", "in", "visual", ".", "values", "(", ")", ":", "free_visual_axes", ".", "remove", "(", "visual_name", ")", "visual", "[", "visual_name", "]", "=", "grid_name", "logger", ".", "debug", "(", "\"3: %r %r\"", ",", "visual", ",", "free_visual_axes", ")", "for", "visual_name", ",", "grid_name", "in", "visual_default", ".", "items", "(", ")", ":", "if", "visual_name", "not", "in", "free_visual_axes", "and", "grid_name", "not", "in", "visual", ".", "values", "(", ")", ":", "visual", "[", "free_visual_axes", ".", "pop", "(", "0", ")", "]", "=", "grid_name", "logger", ".", "debug", "(", "\"4: %r %r\"", ",", "visual", ",", "free_visual_axes", ")", "visual_reverse", "=", "invert", "(", "visual", ")", "# TODO: the meaning of visual and visual_reverse is changed below this line, super confusing", "visual", ",", "visual_reverse", "=", "visual_reverse", ",", "visual", "# so now, visual: mapping of a grid axis to plot axis", "# visual_reverse: mapping of a grid axis to plot axis", "move", "=", "{", "}", "for", "grid_name", ",", "visual_name", "in", "visual", ".", "items", "(", ")", ":", "if", "visual_axes", "[", "visual_name", "]", "in", "visual", ".", "values", "(", ")", ":", "index", "=", "visual", ".", "values", "(", ")", ".", "find", "(", "visual_name", ")", "key", "=", "visual", ".", "keys", "(", ")", "[", "index", "]", "raise", "ValueError", "(", "\"trying to map %s to %s while, it is already mapped by %s\"", "%", "(", "grid_name", ",", "visual_name", ",", "key", ")", ")", "move", "[", "grid_axes", "[", "grid_name", "]", "]", "=", "visual_axes", "[", "visual_name", "]", "# normalize_axis = _ensure_list(normalize_axis)", "fs", "=", "_expand", "(", "f", ",", "total_grid", ".", "shape", "[", "grid_axes", "[", "normalize_axis", "]", "]", ")", "# assert len(vwhat)", "# labels[\"y\"] = ylabels", "what_labels", "=", "[", "]", "if", "grid", "is", "None", ":", "grid_of_grids", "=", "[", "]", "for", "i", ",", "(", "binby", ",", "limits", ")", "in", "enumerate", "(", "zip", "(", "x", ",", "xlimits", ")", ")", ":", "grid_of_grids", ".", "append", "(", "[", "]", ")", "for", "j", ",", "what", "in", "enumerate", "(", "whats", ")", ":", "if", "isinstance", "(", "what", ",", "vaex", ".", "stat", ".", "Expression", ")", ":", "grid", "=", "what", ".", "calculate", "(", "self", ",", "binby", "=", "binby", ",", "shape", "=", "shape", ",", "limits", "=", "limits", ",", "selection", "=", "selections", ",", "delay", "=", "True", ")", "else", ":", "what", "=", "what", ".", "strip", "(", ")", "index", "=", "what", ".", "index", "(", "\"(\"", ")", "import", "re", "groups", "=", "re", ".", "match", "(", "\"(.*)\\((.*)\\)\"", ",", "what", ")", ".", "groups", "(", ")", "if", "groups", "and", "len", "(", "groups", ")", "==", "2", ":", "function", "=", "groups", "[", "0", "]", "arguments", "=", "groups", "[", "1", "]", ".", "strip", "(", ")", "if", "\",\"", "in", "arguments", ":", "arguments", "=", "arguments", ".", "split", "(", "\",\"", ")", "functions", "=", "[", "\"mean\"", ",", "\"sum\"", ",", "\"std\"", ",", "\"var\"", ",", "\"correlation\"", ",", "\"covar\"", ",", "\"min\"", ",", "\"max\"", ",", "\"median_approx\"", "]", "unit_expression", "=", "None", "if", "function", "in", "[", "\"mean\"", ",", "\"sum\"", ",", "\"std\"", ",", "\"min\"", ",", "\"max\"", ",", "\"median\"", "]", ":", "unit_expression", "=", "arguments", "if", "function", "in", "[", "\"var\"", "]", ":", "unit_expression", "=", "\"(%s) * (%s)\"", "%", "(", "arguments", ",", "arguments", ")", "if", "function", "in", "[", "\"covar\"", "]", ":", "unit_expression", "=", "\"(%s) * (%s)\"", "%", "arguments", "if", "unit_expression", ":", "unit", "=", "self", ".", "unit", "(", "unit_expression", ")", "if", "unit", ":", "what_units", "=", "unit", ".", "to_string", "(", "'latex_inline'", ")", "if", "function", "in", "functions", ":", "grid", "=", "getattr", "(", "self", ",", "function", ")", "(", "arguments", ",", "binby", "=", "binby", ",", "limits", "=", "limits", ",", "shape", "=", "shape", ",", "selection", "=", "selections", ",", "delay", "=", "True", ")", "elif", "function", "==", "\"count\"", ":", "grid", "=", "self", ".", "count", "(", "arguments", ",", "binby", ",", "shape", "=", "shape", ",", "limits", "=", "limits", ",", "selection", "=", "selections", ",", "delay", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "\"Could not understand method: %s, expected one of %r'\"", "%", "(", "function", ",", "functions", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'\"", "%", "what", ")", "if", "i", "==", "0", ":", "# and j == 0:", "what_label", "=", "str", "(", "whats", "[", "j", "]", ")", "if", "what_units", ":", "what_label", "+=", "\" (%s)\"", "%", "what_units", "if", "fs", "[", "j", "]", ":", "what_label", "=", "fs", "[", "j", "]", "+", "\" \"", "+", "what_label", "what_labels", ".", "append", "(", "what_label", ")", "grid_of_grids", "[", "-", "1", "]", ".", "append", "(", "grid", ")", "self", ".", "executor", ".", "execute", "(", ")", "for", "i", ",", "(", "binby", ",", "limits", ")", "in", "enumerate", "(", "zip", "(", "x", ",", "xlimits", ")", ")", ":", "for", "j", ",", "what", "in", "enumerate", "(", "whats", ")", ":", "grid", "=", "grid_of_grids", "[", "i", "]", "[", "j", "]", ".", "get", "(", ")", "total_grid", "[", "i", ",", "j", ",", ":", ",", ":", "]", "=", "grid", "[", ":", ",", "None", ",", "...", "]", "labels", "[", "\"what\"", "]", "=", "what_labels", "else", ":", "dims_left", "=", "6", "-", "len", "(", "grid", ".", "shape", ")", "total_grid", "=", "np", ".", "broadcast_to", "(", "grid", ",", "(", "1", ",", ")", "*", "dims_left", "+", "grid", ".", "shape", ")", "# visual=dict(x=\"x\", y=\"y\", selection=\"fade\", subspace=\"facet1\", what=\"facet2\",)", "def", "_selection_name", "(", "name", ")", ":", "if", "name", "in", "[", "None", ",", "False", "]", ":", "return", "\"selection: all\"", "elif", "name", "in", "[", "\"default\"", ",", "True", "]", ":", "return", "\"selection: default\"", "else", ":", "return", "\"selection: %s\"", "%", "name", "if", "selection_labels", "is", "None", ":", "labels", "[", "\"selection\"", "]", "=", "list", "(", "[", "_selection_name", "(", "k", ")", "for", "k", "in", "selections", "]", ")", "else", ":", "labels", "[", "\"selection\"", "]", "=", "selection_labels", "# visual_grid = np.moveaxis(total_grid, move.keys(), move.values())", "# np.moveaxis is in np 1.11 only?, use transpose", "axes", "=", "[", "None", "]", "*", "len", "(", "move", ")", "for", "key", ",", "value", "in", "move", ".", "items", "(", ")", ":", "axes", "[", "value", "]", "=", "key", "visual_grid", "=", "np", ".", "transpose", "(", "total_grid", ",", "axes", ")", "logger", ".", "debug", "(", "\"grid shape: %r\"", ",", "total_grid", ".", "shape", ")", "logger", ".", "debug", "(", "\"visual: %r\"", ",", "visual", ".", "items", "(", ")", ")", "logger", ".", "debug", "(", "\"move: %r\"", ",", "move", ")", "logger", ".", "debug", "(", "\"visual grid shape: %r\"", ",", "visual_grid", ".", "shape", ")", "xexpressions", "=", "[", "]", "yexpressions", "=", "[", "]", "for", "i", ",", "(", "binby", ",", "limits", ")", "in", "enumerate", "(", "zip", "(", "x", ",", "xlimits", ")", ")", ":", "xexpressions", ".", "append", "(", "binby", "[", "0", "]", ")", "yexpressions", ".", "append", "(", "binby", "[", "1", "]", ")", "if", "xlabel", "is", "None", ":", "xlabels", "=", "[", "]", "ylabels", "=", "[", "]", "for", "i", ",", "(", "binby", ",", "limits", ")", "in", "enumerate", "(", "zip", "(", "x", ",", "xlimits", ")", ")", ":", "if", "z", "is", "not", "None", ":", "xlabels", ".", "append", "(", "self", ".", "label", "(", "binby", "[", "1", "]", ")", ")", "ylabels", ".", "append", "(", "self", ".", "label", "(", "binby", "[", "2", "]", ")", ")", "else", ":", "xlabels", ".", "append", "(", "self", ".", "label", "(", "binby", "[", "0", "]", ")", ")", "ylabels", ".", "append", "(", "self", ".", "label", "(", "binby", "[", "1", "]", ")", ")", "else", ":", "Nl", "=", "visual_grid", ".", "shape", "[", "visual_axes", "[", "'row'", "]", "]", "xlabels", "=", "_expand", "(", "xlabel", ",", "Nl", ")", "ylabels", "=", "_expand", "(", "ylabel", ",", "Nl", ")", "#labels[visual[\"x\"]] = (xlabels, ylabels)", "labels", "[", "\"x\"", "]", "=", "xlabels", "labels", "[", "\"y\"", "]", "=", "ylabels", "# grid = total_grid", "# print(grid.shape)", "# grid = self.reduce(grid, )", "axes", "=", "[", "]", "# cax = pylab.subplot(1,1,1)", "background_color", "=", "np", ".", "array", "(", "matplotlib", ".", "colors", ".", "colorConverter", ".", "to_rgb", "(", "background_color", ")", ")", "# if grid.shape[axis[\"selection\"]] > 1:# and not facet:", "# rgrid = vaex.image.fade(rgrid)", "# finite_mask = np.any(finite_mask, axis=0) # do we really need this", "# print(rgrid.shape)", "# facet_row_axis = axis[\"what\"]", "import", "math", "facet_columns", "=", "None", "facets", "=", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"row\"", "]", "]", "*", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"column\"", "]", "]", "if", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"column\"", "]", "]", "==", "1", "and", "wrap", ":", "facet_columns", "=", "min", "(", "wrap_columns", ",", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"row\"", "]", "]", ")", "wrapped", "=", "True", "elif", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"row\"", "]", "]", "==", "1", "and", "wrap", ":", "facet_columns", "=", "min", "(", "wrap_columns", ",", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"column\"", "]", "]", ")", "wrapped", "=", "True", "else", ":", "wrapped", "=", "False", "facet_columns", "=", "visual_grid", ".", "shape", "[", "visual_axes", "[", "\"column\"", "]", "]", "facet_rows", "=", "int", "(", "math", ".", "ceil", "(", "facets", "/", "facet_columns", ")", ")", "logger", ".", "debug", "(", "\"facet_rows: %r\"", ",", "facet_rows", ")", "logger", ".", "debug", "(", "\"facet_columns: %r\"", ",", "facet_columns", ")", "# if visual_grid.shape[visual_axes[\"row\"]] > 1: # and not wrap:", "# #facet_row_axis = axis[\"what\"]", "# facet_columns = visual_grid.shape[visual_axes[\"column\"]]", "# else:", "# facet_columns = min(wrap_columns, facets)", "# if grid.shape[axis[\"plot\"]] > 1:# and not facet:", "# this loop could be done using axis arguments everywhere", "# assert len(normalize_axis) == 1, \"currently only 1 normalization axis supported\"", "grid", "=", "visual_grid", "*", "1.", "fgrid", "=", "visual_grid", "*", "1.", "ngrid", "=", "visual_grid", "*", "1.", "# colorgrid = np.zeros(ngrid.shape + (4,), float)", "# print \"norma\", normalize_axis, visual_grid.shape[visual_axes[visual[normalize_axis]]]", "vmins", "=", "_expand", "(", "vmin", ",", "visual_grid", ".", "shape", "[", "visual_axes", "[", "visual", "[", "normalize_axis", "]", "]", "]", ",", "type", "=", "list", ")", "vmaxs", "=", "_expand", "(", "vmax", ",", "visual_grid", ".", "shape", "[", "visual_axes", "[", "visual", "[", "normalize_axis", "]", "]", "]", ",", "type", "=", "list", ")", "# for name in normalize_axis:", "visual_grid", "if", "smooth_pre", ":", "grid", "=", "vaex", ".", "grids", ".", "gf", "(", "grid", ",", "smooth_pre", ")", "if", "1", ":", "axis", "=", "visual_axes", "[", "visual", "[", "normalize_axis", "]", "]", "for", "i", "in", "range", "(", "visual_grid", ".", "shape", "[", "axis", "]", ")", ":", "item", "=", "[", "slice", "(", "None", ",", "None", ",", "None", ")", ",", "]", "*", "len", "(", "visual_grid", ".", "shape", ")", "item", "[", "axis", "]", "=", "i", "item", "=", "tuple", "(", "item", ")", "f", "=", "_parse_f", "(", "fs", "[", "i", "]", ")", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "# these are fine, we are ok with nan's in vaex", "fgrid", ".", "__setitem__", "(", "item", ",", "f", "(", "grid", ".", "__getitem__", "(", "item", ")", ")", ")", "# print vmins[i], vmaxs[i]", "if", "vmins", "[", "i", "]", "is", "not", "None", "and", "vmaxs", "[", "i", "]", "is", "not", "None", ":", "nsubgrid", "=", "fgrid", ".", "__getitem__", "(", "item", ")", "*", "1", "nsubgrid", "-=", "vmins", "[", "i", "]", "nsubgrid", "/=", "(", "vmaxs", "[", "i", "]", "-", "vmins", "[", "i", "]", ")", "else", ":", "nsubgrid", ",", "vmin", ",", "vmax", "=", "n", "(", "fgrid", ".", "__getitem__", "(", "item", ")", ")", "vmins", "[", "i", "]", "=", "vmin", "vmaxs", "[", "i", "]", "=", "vmax", "# print \" \", vmins[i], vmaxs[i]", "ngrid", ".", "__setitem__", "(", "item", ",", "nsubgrid", ")", "if", "0", ":", "# TODO: above should be like the code below, with custom vmin and vmax", "grid", "=", "visual_grid", "[", "i", "]", "f", "=", "_parse_f", "(", "fs", "[", "i", "]", ")", "fgrid", "=", "f", "(", "grid", ")", "finite_mask", "=", "np", ".", "isfinite", "(", "grid", ")", "finite_mask", "=", "np", ".", "any", "(", "finite_mask", ",", "axis", "=", "0", ")", "if", "vmin", "is", "not", "None", "and", "vmax", "is", "not", "None", ":", "ngrid", "=", "fgrid", "*", "1", "ngrid", "-=", "vmin", "ngrid", "/=", "(", "vmax", "-", "vmin", ")", "ngrid", "=", "np", ".", "clip", "(", "ngrid", ",", "0", ",", "1", ")", "else", ":", "ngrid", ",", "vmin", ",", "vmax", "=", "n", "(", "fgrid", ")", "# vmin, vmax = np.nanmin(fgrid), np.nanmax(fgrid)", "# every 'what', should have its own colorbar, check if what corresponds to", "# rows or columns in facets, if so, do a colorbar per row or per column", "rows", ",", "columns", "=", "int", "(", "math", ".", "ceil", "(", "facets", "/", "float", "(", "facet_columns", ")", ")", ")", ",", "facet_columns", "colorbar_location", "=", "\"individual\"", "if", "visual", "[", "\"what\"", "]", "==", "\"row\"", "and", "visual_grid", ".", "shape", "[", "1", "]", "==", "facet_columns", ":", "colorbar_location", "=", "\"per_row\"", "if", "visual", "[", "\"what\"", "]", "==", "\"column\"", "and", "visual_grid", ".", "shape", "[", "0", "]", "==", "facet_rows", ":", "colorbar_location", "=", "\"per_column\"", "# values = np.linspace(facet_limits[0], facet_limits[1], facet_count+1)", "logger", ".", "debug", "(", "\"rows: %r, columns: %r\"", ",", "rows", ",", "columns", ")", "import", "matplotlib", ".", "gridspec", "as", "gridspec", "column_scale", "=", "1", "row_scale", "=", "1", "row_offset", "=", "0", "if", "facets", ">", "1", ":", "if", "colorbar_location", "==", "\"per_row\"", ":", "column_scale", "=", "4", "gs", "=", "gridspec", ".", "GridSpec", "(", "rows", ",", "columns", "*", "column_scale", "+", "1", ")", "elif", "colorbar_location", "==", "\"per_column\"", ":", "row_offset", "=", "1", "row_scale", "=", "4", "gs", "=", "gridspec", ".", "GridSpec", "(", "rows", "*", "row_scale", "+", "1", ",", "columns", ")", "else", ":", "gs", "=", "gridspec", ".", "GridSpec", "(", "rows", ",", "columns", ")", "facet_index", "=", "0", "fs", "=", "_expand", "(", "f", ",", "len", "(", "whats", ")", ")", "colormaps", "=", "_expand", "(", "colormap", ",", "len", "(", "whats", ")", ")", "# row", "for", "i", "in", "range", "(", "visual_grid", ".", "shape", "[", "0", "]", ")", ":", "# column", "for", "j", "in", "range", "(", "visual_grid", ".", "shape", "[", "1", "]", ")", ":", "if", "colorbar", "and", "colorbar_location", "==", "\"per_column\"", "and", "i", "==", "0", ":", "norm", "=", "matplotlib", ".", "colors", ".", "Normalize", "(", "vmins", "[", "j", "]", ",", "vmaxs", "[", "j", "]", ")", "sm", "=", "matplotlib", ".", "cm", ".", "ScalarMappable", "(", "norm", ",", "colormaps", "[", "j", "]", ")", "sm", ".", "set_array", "(", "1", ")", "# make matplotlib happy (strange behavious)", "if", "facets", ">", "1", ":", "ax", "=", "pylab", ".", "subplot", "(", "gs", "[", "0", ",", "j", "]", ")", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ",", "cax", "=", "ax", ",", "orientation", "=", "\"horizontal\"", ")", "else", ":", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ")", "if", "\"what\"", "in", "labels", ":", "label", "=", "labels", "[", "\"what\"", "]", "[", "j", "]", "if", "facets", ">", "1", ":", "colorbar", ".", "ax", ".", "set_title", "(", "label", ")", "else", ":", "colorbar", ".", "ax", ".", "set_ylabel", "(", "colorbar_label", "or", "label", ")", "if", "colorbar", "and", "colorbar_location", "==", "\"per_row\"", "and", "j", "==", "0", ":", "norm", "=", "matplotlib", ".", "colors", ".", "Normalize", "(", "vmins", "[", "i", "]", ",", "vmaxs", "[", "i", "]", ")", "sm", "=", "matplotlib", ".", "cm", ".", "ScalarMappable", "(", "norm", ",", "colormaps", "[", "i", "]", ")", "sm", ".", "set_array", "(", "1", ")", "# make matplotlib happy (strange behavious)", "if", "facets", ">", "1", ":", "ax", "=", "pylab", ".", "subplot", "(", "gs", "[", "i", ",", "-", "1", "]", ")", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ",", "cax", "=", "ax", ")", "else", ":", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ")", "label", "=", "labels", "[", "\"what\"", "]", "[", "i", "]", "colorbar", ".", "ax", ".", "set_ylabel", "(", "colorbar_label", "or", "label", ")", "rgrid", "=", "ngrid", "[", "i", ",", "j", "]", "*", "1.", "# print rgrid.shape", "for", "k", "in", "range", "(", "rgrid", ".", "shape", "[", "0", "]", ")", ":", "for", "l", "in", "range", "(", "rgrid", ".", "shape", "[", "0", "]", ")", ":", "if", "smooth_post", "is", "not", "None", ":", "rgrid", "[", "k", ",", "l", "]", "=", "vaex", ".", "grids", ".", "gf", "(", "rgrid", ",", "smooth_post", ")", "if", "visual", "[", "\"what\"", "]", "==", "\"column\"", ":", "what_index", "=", "j", "elif", "visual", "[", "\"what\"", "]", "==", "\"row\"", ":", "what_index", "=", "i", "else", ":", "what_index", "=", "0", "if", "visual", "[", "normalize_axis", "]", "==", "\"column\"", ":", "normalize_index", "=", "j", "elif", "visual", "[", "normalize_axis", "]", "==", "\"row\"", ":", "normalize_index", "=", "i", "else", ":", "normalize_index", "=", "0", "for", "r", "in", "reduce", ":", "r", "=", "_parse_reduction", "(", "r", ",", "colormaps", "[", "what_index", "]", ",", "[", "]", ")", "rgrid", "=", "r", "(", "rgrid", ")", "row", "=", "facet_index", "//", "facet_columns", "column", "=", "facet_index", "%", "facet_columns", "if", "colorbar", "and", "colorbar_location", "==", "\"individual\"", ":", "# visual_grid.shape[visual_axes[visual[normalize_axis]]]", "norm", "=", "matplotlib", ".", "colors", ".", "Normalize", "(", "vmins", "[", "normalize_index", "]", ",", "vmaxs", "[", "normalize_index", "]", ")", "sm", "=", "matplotlib", ".", "cm", ".", "ScalarMappable", "(", "norm", ",", "colormaps", "[", "what_index", "]", ")", "sm", ".", "set_array", "(", "1", ")", "# make matplotlib happy (strange behavious)", "if", "facets", ">", "1", ":", "ax", "=", "pylab", ".", "subplot", "(", "gs", "[", "row", ",", "column", "]", ")", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ",", "ax", "=", "ax", ")", "else", ":", "colorbar", "=", "fig", ".", "colorbar", "(", "sm", ")", "label", "=", "labels", "[", "\"what\"", "]", "[", "what_index", "]", "colorbar", ".", "ax", ".", "set_ylabel", "(", "colorbar_label", "or", "label", ")", "if", "facets", ">", "1", ":", "ax", "=", "pylab", ".", "subplot", "(", "gs", "[", "row_offset", "+", "row", "*", "row_scale", ":", "row_offset", "+", "(", "row", "+", "1", ")", "*", "row_scale", ",", "column", "*", "column_scale", ":", "(", "column", "+", "1", ")", "*", "column_scale", "]", ")", "else", ":", "ax", "=", "pylab", ".", "gca", "(", ")", "axes", ".", "append", "(", "ax", ")", "logger", ".", "debug", "(", "\"rgrid: %r\"", ",", "rgrid", ".", "shape", ")", "plot_rgrid", "=", "rgrid", "assert", "plot_rgrid", ".", "shape", "[", "1", "]", "==", "1", ",", "\"no layers supported yet\"", "plot_rgrid", "=", "plot_rgrid", "[", ":", ",", "0", "]", "if", "plot_rgrid", ".", "shape", "[", "0", "]", ">", "1", ":", "plot_rgrid", "=", "vaex", ".", "image", ".", "fade", "(", "plot_rgrid", "[", ":", ":", "-", "1", "]", ")", "else", ":", "plot_rgrid", "=", "plot_rgrid", "[", "0", "]", "extend", "=", "None", "if", "visual", "[", "\"subspace\"", "]", "==", "\"row\"", ":", "subplot_index", "=", "i", "elif", "visual", "[", "\"subspace\"", "]", "==", "\"column\"", ":", "subplot_index", "=", "j", "else", ":", "subplot_index", "=", "0", "extend", "=", "np", ".", "array", "(", "xlimits", "[", "subplot_index", "]", "[", "-", "2", ":", "]", ")", ".", "flatten", "(", ")", "# extend = np.array(xlimits[i]).flatten()", "logger", ".", "debug", "(", "\"plot rgrid: %r\"", ",", "plot_rgrid", ".", "shape", ")", "plot_rgrid", "=", "np", ".", "transpose", "(", "plot_rgrid", ",", "(", "1", ",", "0", ",", "2", ")", ")", "im", "=", "ax", ".", "imshow", "(", "plot_rgrid", ",", "extent", "=", "extend", ".", "tolist", "(", ")", ",", "origin", "=", "\"lower\"", ",", "aspect", "=", "aspect", ",", "interpolation", "=", "interpolation", ")", "# v1, v2 = values[i], values[i+1]", "def", "label", "(", "index", ",", "label", ",", "expression", ")", ":", "if", "label", "and", "_issequence", "(", "label", ")", ":", "return", "label", "[", "i", "]", "else", ":", "return", "self", ".", "label", "(", "expression", ")", "if", "visual_reverse", "[", "\"x\"", "]", "==", "'x'", ":", "labelsx", "=", "labels", "[", "'x'", "]", "pylab", ".", "xlabel", "(", "labelsx", "[", "subplot_index", "]", ")", "if", "visual_reverse", "[", "\"x\"", "]", "==", "'x'", ":", "labelsy", "=", "labels", "[", "'y'", "]", "pylab", ".", "ylabel", "(", "labelsy", "[", "subplot_index", "]", ")", "if", "visual", "[", "\"z\"", "]", "in", "[", "'row'", "]", ":", "labelsz", "=", "labels", "[", "'z'", "]", "ax", ".", "set_title", "(", "labelsz", "[", "i", "]", ")", "if", "visual", "[", "\"z\"", "]", "in", "[", "'column'", "]", ":", "labelsz", "=", "labels", "[", "'z'", "]", "ax", ".", "set_title", "(", "labelsz", "[", "j", "]", ")", "max_labels", "=", "10", "# xexpression = xexpressions[i]", "# if self.iscategory(xexpression):", "# labels = self.category_labels(xexpression)", "# step = len(labels) // max_labels", "# pylab.xticks(np.arange(len(labels))[::step], labels[::step], size='small')", "# yexpression = yexpressions[i]", "# if self.iscategory(yexpression):", "# labels = self.category_labels(yexpression)", "# step = len(labels) // max_labels", "# pylab.yticks(np.arange(len(labels))[::step], labels[::step], size='small')", "facet_index", "+=", "1", "if", "title", ":", "fig", ".", "suptitle", "(", "title", ",", "fontsize", "=", "\"x-large\"", ")", "if", "tight_layout", ":", "if", "title", ":", "pylab", ".", "tight_layout", "(", "rect", "=", "[", "0", ",", "0.03", ",", "1", ",", "0.95", "]", ")", "else", ":", "pylab", ".", "tight_layout", "(", ")", "if", "hardcopy", ":", "pylab", ".", "savefig", "(", "hardcopy", ")", "if", "show", ":", "pylab", ".", "show", "(", ")", "if", "return_extra", ":", "return", "im", ",", "grid", ",", "fgrid", ",", "ngrid", ",", "rgrid", "else", ":", "return", "im" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
register_function
Decorator to register a new function with vaex. Example: >>> import vaex >>> df = vaex.example() >>> @vaex.register_function() >>> def invert(x): >>> return 1/x >>> df.x.invert() >>> import numpy as np >>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64')) >>> @vaex.register_function(as_property=True, scope='dt') >>> def dt_relative_day(x): >>> return vaex.functions.dt_dayofyear(x)/365. >>> df.departure.dt.relative_day
packages/vaex-core/vaex/functions.py
def register_function(scope=None, as_property=False, name=None): """Decorator to register a new function with vaex. Example: >>> import vaex >>> df = vaex.example() >>> @vaex.register_function() >>> def invert(x): >>> return 1/x >>> df.x.invert() >>> import numpy as np >>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64')) >>> @vaex.register_function(as_property=True, scope='dt') >>> def dt_relative_day(x): >>> return vaex.functions.dt_dayofyear(x)/365. >>> df.departure.dt.relative_day """ prefix = '' if scope: prefix = scope + "_" if scope not in scopes: raise KeyError("unknown scope") def wrapper(f, name=name): name = name or f.__name__ # remove possible prefix if name.startswith(prefix): name = name[len(prefix):] full_name = prefix + name if scope: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.expression.ds.func, full_name) args = (self.expression, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) if as_property: setattr(scopes[scope], name, property(closure())) else: setattr(scopes[scope], name, closure()) else: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.ds.func, full_name) args = (self, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) setattr(vaex.expression.Expression, name, closure()) vaex.expression.expression_namespace[prefix + name] = f return f # we leave the original function as is return wrapper
def register_function(scope=None, as_property=False, name=None): """Decorator to register a new function with vaex. Example: >>> import vaex >>> df = vaex.example() >>> @vaex.register_function() >>> def invert(x): >>> return 1/x >>> df.x.invert() >>> import numpy as np >>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64')) >>> @vaex.register_function(as_property=True, scope='dt') >>> def dt_relative_day(x): >>> return vaex.functions.dt_dayofyear(x)/365. >>> df.departure.dt.relative_day """ prefix = '' if scope: prefix = scope + "_" if scope not in scopes: raise KeyError("unknown scope") def wrapper(f, name=name): name = name or f.__name__ # remove possible prefix if name.startswith(prefix): name = name[len(prefix):] full_name = prefix + name if scope: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.expression.ds.func, full_name) args = (self.expression, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) if as_property: setattr(scopes[scope], name, property(closure())) else: setattr(scopes[scope], name, closure()) else: def closure(name=name, full_name=full_name, function=f): def wrapper(self, *args, **kwargs): lazy_func = getattr(self.ds.func, full_name) args = (self, ) + args return lazy_func(*args, **kwargs) return functools.wraps(function)(wrapper) setattr(vaex.expression.Expression, name, closure()) vaex.expression.expression_namespace[prefix + name] = f return f # we leave the original function as is return wrapper
[ "Decorator", "to", "register", "a", "new", "function", "with", "vaex", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L19-L71
[ "def", "register_function", "(", "scope", "=", "None", ",", "as_property", "=", "False", ",", "name", "=", "None", ")", ":", "prefix", "=", "''", "if", "scope", ":", "prefix", "=", "scope", "+", "\"_\"", "if", "scope", "not", "in", "scopes", ":", "raise", "KeyError", "(", "\"unknown scope\"", ")", "def", "wrapper", "(", "f", ",", "name", "=", "name", ")", ":", "name", "=", "name", "or", "f", ".", "__name__", "# remove possible prefix", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "name", "=", "name", "[", "len", "(", "prefix", ")", ":", "]", "full_name", "=", "prefix", "+", "name", "if", "scope", ":", "def", "closure", "(", "name", "=", "name", ",", "full_name", "=", "full_name", ",", "function", "=", "f", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lazy_func", "=", "getattr", "(", "self", ".", "expression", ".", "ds", ".", "func", ",", "full_name", ")", "args", "=", "(", "self", ".", "expression", ",", ")", "+", "args", "return", "lazy_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "functools", ".", "wraps", "(", "function", ")", "(", "wrapper", ")", "if", "as_property", ":", "setattr", "(", "scopes", "[", "scope", "]", ",", "name", ",", "property", "(", "closure", "(", ")", ")", ")", "else", ":", "setattr", "(", "scopes", "[", "scope", "]", ",", "name", ",", "closure", "(", ")", ")", "else", ":", "def", "closure", "(", "name", "=", "name", ",", "full_name", "=", "full_name", ",", "function", "=", "f", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lazy_func", "=", "getattr", "(", "self", ".", "ds", ".", "func", ",", "full_name", ")", "args", "=", "(", "self", ",", ")", "+", "args", "return", "lazy_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "functools", ".", "wraps", "(", "function", ")", "(", "wrapper", ")", "setattr", "(", "vaex", ".", "expression", ".", "Expression", ",", "name", ",", "closure", "(", ")", ")", "vaex", ".", "expression", ".", "expression_namespace", "[", "prefix", "+", "name", "]", "=", "f", "return", "f", "# we leave the original function as is", "return", "wrapper" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
fillna
Returns an array where missing values are replaced by value. If the dtype is object, nan values and 'nan' string values are replaced by value when fill_nan==True.
packages/vaex-core/vaex/functions.py
def fillna(ar, value, fill_nan=True, fill_masked=True): '''Returns an array where missing values are replaced by value. If the dtype is object, nan values and 'nan' string values are replaced by value when fill_nan==True. ''' ar = ar if not isinstance(ar, column.Column) else ar.to_numpy() if ar.dtype.kind in 'O' and fill_nan: strings = ar.astype(str) mask = strings == 'nan' ar = ar.copy() ar[mask] = value elif ar.dtype.kind in 'f' and fill_nan: mask = np.isnan(ar) if np.any(mask): ar = ar.copy() ar[mask] = value if fill_masked and np.ma.isMaskedArray(ar): mask = ar.mask if np.any(mask): ar = ar.data.copy() ar[mask] = value return ar
def fillna(ar, value, fill_nan=True, fill_masked=True): '''Returns an array where missing values are replaced by value. If the dtype is object, nan values and 'nan' string values are replaced by value when fill_nan==True. ''' ar = ar if not isinstance(ar, column.Column) else ar.to_numpy() if ar.dtype.kind in 'O' and fill_nan: strings = ar.astype(str) mask = strings == 'nan' ar = ar.copy() ar[mask] = value elif ar.dtype.kind in 'f' and fill_nan: mask = np.isnan(ar) if np.any(mask): ar = ar.copy() ar[mask] = value if fill_masked and np.ma.isMaskedArray(ar): mask = ar.mask if np.any(mask): ar = ar.data.copy() ar[mask] = value return ar
[ "Returns", "an", "array", "where", "missing", "values", "are", "replaced", "by", "value", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L122-L144
[ "def", "fillna", "(", "ar", ",", "value", ",", "fill_nan", "=", "True", ",", "fill_masked", "=", "True", ")", ":", "ar", "=", "ar", "if", "not", "isinstance", "(", "ar", ",", "column", ".", "Column", ")", "else", "ar", ".", "to_numpy", "(", ")", "if", "ar", ".", "dtype", ".", "kind", "in", "'O'", "and", "fill_nan", ":", "strings", "=", "ar", ".", "astype", "(", "str", ")", "mask", "=", "strings", "==", "'nan'", "ar", "=", "ar", ".", "copy", "(", ")", "ar", "[", "mask", "]", "=", "value", "elif", "ar", ".", "dtype", ".", "kind", "in", "'f'", "and", "fill_nan", ":", "mask", "=", "np", ".", "isnan", "(", "ar", ")", "if", "np", ".", "any", "(", "mask", ")", ":", "ar", "=", "ar", ".", "copy", "(", ")", "ar", "[", "mask", "]", "=", "value", "if", "fill_masked", "and", "np", ".", "ma", ".", "isMaskedArray", "(", "ar", ")", ":", "mask", "=", "ar", ".", "mask", "if", "np", ".", "any", "(", "mask", ")", ":", "ar", "=", "ar", ".", "data", ".", "copy", "(", ")", "ar", "[", "mask", "]", "=", "value", "return", "ar" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_dayofweek
Obtain the day of the week with Monday=0 and Sunday=6 :returns: an expression containing the day of week. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofweek Expression = dt_dayofweek(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 3 2 3
packages/vaex-core/vaex/functions.py
def dt_dayofweek(x): """Obtain the day of the week with Monday=0 and Sunday=6 :returns: an expression containing the day of week. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofweek Expression = dt_dayofweek(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 3 2 3 """ import pandas as pd return pd.Series(x).dt.dayofweek.values
def dt_dayofweek(x): """Obtain the day of the week with Monday=0 and Sunday=6 :returns: an expression containing the day of week. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofweek Expression = dt_dayofweek(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 3 2 3 """ import pandas as pd return pd.Series(x).dt.dayofweek.values
[ "Obtain", "the", "day", "of", "the", "week", "with", "Monday", "=", "0", "and", "Sunday", "=", "6" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L160-L186
[ "def", "dt_dayofweek", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "dayofweek", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_dayofyear
The ordinal day of the year. :returns: an expression containing the ordinal day of the year. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofyear Expression = dt_dayofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 285 1 42 2 316
packages/vaex-core/vaex/functions.py
def dt_dayofyear(x): """The ordinal day of the year. :returns: an expression containing the ordinal day of the year. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofyear Expression = dt_dayofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 285 1 42 2 316 """ import pandas as pd return pd.Series(x).dt.dayofyear.values
def dt_dayofyear(x): """The ordinal day of the year. :returns: an expression containing the ordinal day of the year. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.dayofyear Expression = dt_dayofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 285 1 42 2 316 """ import pandas as pd return pd.Series(x).dt.dayofyear.values
[ "The", "ordinal", "day", "of", "the", "year", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L189-L215
[ "def", "dt_dayofyear", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "dayofyear", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_is_leap_year
Check whether a year is a leap year. :returns: an expression which evaluates to True if a year is a leap year, and to False otherwise. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.is_leap_year Expression = dt_is_leap_year(date) Length: 3 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False
packages/vaex-core/vaex/functions.py
def dt_is_leap_year(x): """Check whether a year is a leap year. :returns: an expression which evaluates to True if a year is a leap year, and to False otherwise. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.is_leap_year Expression = dt_is_leap_year(date) Length: 3 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False """ import pandas as pd return pd.Series(x).dt.is_leap_year.values
def dt_is_leap_year(x): """Check whether a year is a leap year. :returns: an expression which evaluates to True if a year is a leap year, and to False otherwise. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.is_leap_year Expression = dt_is_leap_year(date) Length: 3 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False """ import pandas as pd return pd.Series(x).dt.is_leap_year.values
[ "Check", "whether", "a", "year", "is", "a", "leap", "year", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L218-L244
[ "def", "dt_is_leap_year", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "is_leap_year", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_year
Extracts the year out of a datetime sample. :returns: an expression containing the year extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.year Expression = dt_year(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 2009 1 2016 2 2015
packages/vaex-core/vaex/functions.py
def dt_year(x): """Extracts the year out of a datetime sample. :returns: an expression containing the year extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.year Expression = dt_year(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 2009 1 2016 2 2015 """ import pandas as pd return pd.Series(x).dt.year.values
def dt_year(x): """Extracts the year out of a datetime sample. :returns: an expression containing the year extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.year Expression = dt_year(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 2009 1 2016 2 2015 """ import pandas as pd return pd.Series(x).dt.year.values
[ "Extracts", "the", "year", "out", "of", "a", "datetime", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L247-L273
[ "def", "dt_year", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "year", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_month
Extracts the month out of a datetime sample. :returns: an expression containing the month extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month Expression = dt_month(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 10 1 2 2 11
packages/vaex-core/vaex/functions.py
def dt_month(x): """Extracts the month out of a datetime sample. :returns: an expression containing the month extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month Expression = dt_month(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 10 1 2 2 11 """ import pandas as pd return pd.Series(x).dt.month.values
def dt_month(x): """Extracts the month out of a datetime sample. :returns: an expression containing the month extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month Expression = dt_month(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 10 1 2 2 11 """ import pandas as pd return pd.Series(x).dt.month.values
[ "Extracts", "the", "month", "out", "of", "a", "datetime", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L276-L302
[ "def", "dt_month", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "month", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_month_name
Returns the month names of a datetime sample in English. :returns: an expression containing the month names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month_name Expression = dt_month_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 October 1 February 2 November
packages/vaex-core/vaex/functions.py
def dt_month_name(x): """Returns the month names of a datetime sample in English. :returns: an expression containing the month names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month_name Expression = dt_month_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 October 1 February 2 November """ import pandas as pd return pd.Series(_pandas_dt_fix(x)).dt.month_name().values.astype(str)
def dt_month_name(x): """Returns the month names of a datetime sample in English. :returns: an expression containing the month names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.month_name Expression = dt_month_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 October 1 February 2 November """ import pandas as pd return pd.Series(_pandas_dt_fix(x)).dt.month_name().values.astype(str)
[ "Returns", "the", "month", "names", "of", "a", "datetime", "sample", "in", "English", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L305-L331
[ "def", "dt_month_name", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "_pandas_dt_fix", "(", "x", ")", ")", ".", "dt", ".", "month_name", "(", ")", ".", "values", ".", "astype", "(", "str", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_day
Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12
packages/vaex-core/vaex/functions.py
def dt_day(x): """Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12 """ import pandas as pd return pd.Series(x).dt.day.values
def dt_day(x): """Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12 """ import pandas as pd return pd.Series(x).dt.day.values
[ "Extracts", "the", "day", "from", "a", "datetime", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L334-L360
[ "def", "dt_day", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "day", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_day_name
Returns the day names of a datetime sample in English. :returns: an expression containing the day names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day_name Expression = dt_day_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 Monday 1 Thursday 2 Thursday
packages/vaex-core/vaex/functions.py
def dt_day_name(x): """Returns the day names of a datetime sample in English. :returns: an expression containing the day names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day_name Expression = dt_day_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 Monday 1 Thursday 2 Thursday """ import pandas as pd return pd.Series(_pandas_dt_fix(x)).dt.day_name().values.astype(str)
def dt_day_name(x): """Returns the day names of a datetime sample in English. :returns: an expression containing the day names extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day_name Expression = dt_day_name(date) Length: 3 dtype: str (expression) --------------------------------- 0 Monday 1 Thursday 2 Thursday """ import pandas as pd return pd.Series(_pandas_dt_fix(x)).dt.day_name().values.astype(str)
[ "Returns", "the", "day", "names", "of", "a", "datetime", "sample", "in", "English", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L363-L389
[ "def", "dt_day_name", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "_pandas_dt_fix", "(", "x", ")", ")", ".", "dt", ".", "day_name", "(", ")", ".", "values", ".", "astype", "(", "str", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_weekofyear
Returns the week ordinal of the year. :returns: an expression containing the week ordinal of the year, extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.weekofyear Expression = dt_weekofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 42 1 6 2 46
packages/vaex-core/vaex/functions.py
def dt_weekofyear(x): """Returns the week ordinal of the year. :returns: an expression containing the week ordinal of the year, extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.weekofyear Expression = dt_weekofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 42 1 6 2 46 """ import pandas as pd return pd.Series(x).dt.weekofyear.values
def dt_weekofyear(x): """Returns the week ordinal of the year. :returns: an expression containing the week ordinal of the year, extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.weekofyear Expression = dt_weekofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 42 1 6 2 46 """ import pandas as pd return pd.Series(x).dt.weekofyear.values
[ "Returns", "the", "week", "ordinal", "of", "the", "year", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L392-L418
[ "def", "dt_weekofyear", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "weekofyear", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_hour
Extracts the hour out of a datetime samples. :returns: an expression containing the hour extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.hour Expression = dt_hour(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 3 1 10 2 11
packages/vaex-core/vaex/functions.py
def dt_hour(x): """Extracts the hour out of a datetime samples. :returns: an expression containing the hour extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.hour Expression = dt_hour(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 3 1 10 2 11 """ import pandas as pd return pd.Series(x).dt.hour.values
def dt_hour(x): """Extracts the hour out of a datetime samples. :returns: an expression containing the hour extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.hour Expression = dt_hour(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 3 1 10 2 11 """ import pandas as pd return pd.Series(x).dt.hour.values
[ "Extracts", "the", "hour", "out", "of", "a", "datetime", "samples", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L421-L447
[ "def", "dt_hour", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "hour", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_minute
Extracts the minute out of a datetime samples. :returns: an expression containing the minute extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.minute Expression = dt_minute(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 31 1 17 2 34
packages/vaex-core/vaex/functions.py
def dt_minute(x): """Extracts the minute out of a datetime samples. :returns: an expression containing the minute extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.minute Expression = dt_minute(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 31 1 17 2 34 """ import pandas as pd return pd.Series(x).dt.minute.values
def dt_minute(x): """Extracts the minute out of a datetime samples. :returns: an expression containing the minute extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.minute Expression = dt_minute(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 31 1 17 2 34 """ import pandas as pd return pd.Series(x).dt.minute.values
[ "Extracts", "the", "minute", "out", "of", "a", "datetime", "samples", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L450-L476
[ "def", "dt_minute", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "minute", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
dt_second
Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22
packages/vaex-core/vaex/functions.py
def dt_second(x): """Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22 """ import pandas as pd return pd.Series(x).dt.second.values
def dt_second(x): """Extracts the second out of a datetime samples. :returns: an expression containing the second extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.second Expression = dt_second(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 0 1 34 2 22 """ import pandas as pd return pd.Series(x).dt.second.values
[ "Extracts", "the", "second", "out", "of", "a", "datetime", "samples", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L479-L505
[ "def", "dt_second", "(", "x", ")", ":", "import", "pandas", "as", "pd", "return", "pd", ".", "Series", "(", "x", ")", ".", "dt", ".", "second", ".", "values" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_capitalize
Capitalize the first letter of a string sample. :returns: an expression containing the capitalized strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.capitalize() Expression = str_capitalize(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very pretty 2 Is coming 3 Our 4 Way.
packages/vaex-core/vaex/functions.py
def str_capitalize(x): """Capitalize the first letter of a string sample. :returns: an expression containing the capitalized strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.capitalize() Expression = str_capitalize(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very pretty 2 Is coming 3 Our 4 Way. """ sl = _to_string_sequence(x).capitalize() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_capitalize(x): """Capitalize the first letter of a string sample. :returns: an expression containing the capitalized strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.capitalize() Expression = str_capitalize(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very pretty 2 Is coming 3 Our 4 Way. """ sl = _to_string_sequence(x).capitalize() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Capitalize", "the", "first", "letter", "of", "a", "string", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L511-L540
[ "def", "str_capitalize", "(", "x", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "capitalize", "(", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_cat
Concatenate two string columns on a row-by-row basis. :param expression other: The expression of the other column to be concatenated. :returns: an expression containing the concatenated columns. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.cat(df.text) Expression = str_cat(text, text) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomething 1 very prettyvery pretty 2 is comingis coming 3 ourour 4 way.way.
packages/vaex-core/vaex/functions.py
def str_cat(x, other): """Concatenate two string columns on a row-by-row basis. :param expression other: The expression of the other column to be concatenated. :returns: an expression containing the concatenated columns. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.cat(df.text) Expression = str_cat(text, text) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomething 1 very prettyvery pretty 2 is comingis coming 3 ourour 4 way.way. """ sl1 = _to_string_sequence(x) sl2 = _to_string_sequence(other) sl = sl1.concat(sl2) return column.ColumnStringArrow.from_string_sequence(sl)
def str_cat(x, other): """Concatenate two string columns on a row-by-row basis. :param expression other: The expression of the other column to be concatenated. :returns: an expression containing the concatenated columns. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.cat(df.text) Expression = str_cat(text, text) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomething 1 very prettyvery pretty 2 is comingis coming 3 ourour 4 way.way. """ sl1 = _to_string_sequence(x) sl2 = _to_string_sequence(other) sl = sl1.concat(sl2) return column.ColumnStringArrow.from_string_sequence(sl)
[ "Concatenate", "two", "string", "columns", "on", "a", "row", "-", "by", "-", "row", "basis", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L543-L575
[ "def", "str_cat", "(", "x", ",", "other", ")", ":", "sl1", "=", "_to_string_sequence", "(", "x", ")", "sl2", "=", "_to_string_sequence", "(", "other", ")", "sl", "=", "sl1", ".", "concat", "(", "sl2", ")", "return", "column", ".", "ColumnStringArrow", ".", "from_string_sequence", "(", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_contains
Check if a string pattern or regex is contained within a sample of a string column. :param str pattern: A string or regex pattern :param bool regex: If True, :returns: an expression which is evaluated to True if the pattern is found in a given sample, and it is False otherwise. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.contains('very') Expression = str_contains(text, 'very') Length: 5 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False 3 False 4 False
packages/vaex-core/vaex/functions.py
def str_contains(x, pattern, regex=True): """Check if a string pattern or regex is contained within a sample of a string column. :param str pattern: A string or regex pattern :param bool regex: If True, :returns: an expression which is evaluated to True if the pattern is found in a given sample, and it is False otherwise. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.contains('very') Expression = str_contains(text, 'very') Length: 5 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False 3 False 4 False """ return _to_string_sequence(x).search(pattern, regex)
def str_contains(x, pattern, regex=True): """Check if a string pattern or regex is contained within a sample of a string column. :param str pattern: A string or regex pattern :param bool regex: If True, :returns: an expression which is evaluated to True if the pattern is found in a given sample, and it is False otherwise. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.contains('very') Expression = str_contains(text, 'very') Length: 5 dtype: bool (expression) ---------------------------------- 0 False 1 True 2 False 3 False 4 False """ return _to_string_sequence(x).search(pattern, regex)
[ "Check", "if", "a", "string", "pattern", "or", "regex", "is", "contained", "within", "a", "sample", "of", "a", "string", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L612-L642
[ "def", "str_contains", "(", "x", ",", "pattern", ",", "regex", "=", "True", ")", ":", "return", "_to_string_sequence", "(", "x", ")", ".", "search", "(", "pattern", ",", "regex", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_count
Count the occurences of a pattern in sample of a string column. :param str pat: A string or regex pattern :param bool regex: If True, :returns: an expression containing the number of times a pattern is found in each sample. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.count(pat="et", regex=False) Expression = str_count(text, pat='et', regex=False) Length: 5 dtype: int64 (expression) ----------------------------------- 0 1 1 1 2 0 3 0 4 0
packages/vaex-core/vaex/functions.py
def str_count(x, pat, regex=False): """Count the occurences of a pattern in sample of a string column. :param str pat: A string or regex pattern :param bool regex: If True, :returns: an expression containing the number of times a pattern is found in each sample. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.count(pat="et", regex=False) Expression = str_count(text, pat='et', regex=False) Length: 5 dtype: int64 (expression) ----------------------------------- 0 1 1 1 2 0 3 0 4 0 """ return _to_string_sequence(x).count(pat, regex)
def str_count(x, pat, regex=False): """Count the occurences of a pattern in sample of a string column. :param str pat: A string or regex pattern :param bool regex: If True, :returns: an expression containing the number of times a pattern is found in each sample. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.count(pat="et", regex=False) Expression = str_count(text, pat='et', regex=False) Length: 5 dtype: int64 (expression) ----------------------------------- 0 1 1 1 2 0 3 0 4 0 """ return _to_string_sequence(x).count(pat, regex)
[ "Count", "the", "occurences", "of", "a", "pattern", "in", "sample", "of", "a", "string", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L646-L676
[ "def", "str_count", "(", "x", ",", "pat", ",", "regex", "=", "False", ")", ":", "return", "_to_string_sequence", "(", "x", ")", ".", "count", "(", "pat", ",", "regex", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_find
Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.find(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1
packages/vaex-core/vaex/functions.py
def str_find(x, sub, start=0, end=None): """Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.find(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, True)
def str_find(x, sub, start=0, end=None): """Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.find(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, True)
[ "Returns", "the", "lowest", "indices", "in", "each", "string", "in", "a", "column", "where", "the", "provided", "substring", "is", "fully", "contained", "between", "within", "a", "sample", ".", "If", "the", "substring", "is", "not", "found", "-", "1", "is", "returned", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L731-L763
[ "def", "str_find", "(", "x", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "_to_string_sequence", "(", "x", ")", ".", "find", "(", "sub", ",", "start", ",", "0", "if", "end", "is", "None", "else", "end", ",", "end", "is", "None", ",", "True", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_get
Extract a character from each sample at the specified position from a string column. Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan. :param int i: The index location, at which to extract the character. :returns: an expression containing the extracted characters. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.get(5) Expression = str_get(text, 5) Length: 5 dtype: str (expression) --------------------------------- 0 h 1 p 2 m 3 4
packages/vaex-core/vaex/functions.py
def str_get(x, i): """Extract a character from each sample at the specified position from a string column. Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan. :param int i: The index location, at which to extract the character. :returns: an expression containing the extracted characters. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.get(5) Expression = str_get(text, 5) Length: 5 dtype: str (expression) --------------------------------- 0 h 1 p 2 m 3 4 """ x = _to_string_sequence(x) if i == -1: sl = x.slice_string_end(-1) else: sl = x.slice_string(i, i+1) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_get(x, i): """Extract a character from each sample at the specified position from a string column. Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan. :param int i: The index location, at which to extract the character. :returns: an expression containing the extracted characters. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.get(5) Expression = str_get(text, 5) Length: 5 dtype: str (expression) --------------------------------- 0 h 1 p 2 m 3 4 """ x = _to_string_sequence(x) if i == -1: sl = x.slice_string_end(-1) else: sl = x.slice_string(i, i+1) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Extract", "a", "character", "from", "each", "sample", "at", "the", "specified", "position", "from", "a", "string", "column", ".", "Note", "that", "if", "the", "specified", "position", "is", "out", "of", "bound", "of", "the", "string", "sample", "this", "method", "returns", "while", "pandas", "retunrs", "nan", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L770-L805
[ "def", "str_get", "(", "x", ",", "i", ")", ":", "x", "=", "_to_string_sequence", "(", "x", ")", "if", "i", "==", "-", "1", ":", "sl", "=", "x", ".", "slice_string_end", "(", "-", "1", ")", "else", ":", "sl", "=", "x", ".", "slice_string", "(", "i", ",", "i", "+", "1", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_index
Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. It is the same as `str.find`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.index(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1
packages/vaex-core/vaex/functions.py
def str_index(x, sub, start=0, end=None): """Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. It is the same as `str.find`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.index(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return str_find(x, sub, start, end)
def str_index(x, sub, start=0, end=None): """Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. It is the same as `str.find`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the lowest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.index(sub="et") Expression = str_find(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return str_find(x, sub, start, end)
[ "Returns", "the", "lowest", "indices", "in", "each", "string", "in", "a", "column", "where", "the", "provided", "substring", "is", "fully", "contained", "between", "within", "a", "sample", ".", "If", "the", "substring", "is", "not", "found", "-", "1", "is", "returned", ".", "It", "is", "the", "same", "as", "str", ".", "find", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L808-L840
[ "def", "str_index", "(", "x", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "str_find", "(", "x", ",", "sub", ",", "start", ",", "end", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_join
Same as find (difference with pandas is that it does not raise a ValueError)
packages/vaex-core/vaex/functions.py
def str_join(x, sep): """Same as find (difference with pandas is that it does not raise a ValueError)""" sl = _to_string_list_sequence(x).join(sep) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_join(x, sep): """Same as find (difference with pandas is that it does not raise a ValueError)""" sl = _to_string_list_sequence(x).join(sep) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Same", "as", "find", "(", "difference", "with", "pandas", "is", "that", "it", "does", "not", "raise", "a", "ValueError", ")" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L843-L846
[ "def", "str_join", "(", "x", ",", "sep", ")", ":", "sl", "=", "_to_string_list_sequence", "(", "x", ")", ".", "join", "(", "sep", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_lower
Converts string samples to lower case. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lower() Expression = str_lower(text) Length: 5 dtype: str (expression) --------------------------------- 0 something 1 very pretty 2 is coming 3 our 4 way.
packages/vaex-core/vaex/functions.py
def str_lower(x): """Converts string samples to lower case. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lower() Expression = str_lower(text) Length: 5 dtype: str (expression) --------------------------------- 0 something 1 very pretty 2 is coming 3 our 4 way. """ sl = _to_string_sequence(x).lower() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_lower(x): """Converts string samples to lower case. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lower() Expression = str_lower(text) Length: 5 dtype: str (expression) --------------------------------- 0 something 1 very pretty 2 is coming 3 our 4 way. """ sl = _to_string_sequence(x).lower() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Converts", "string", "samples", "to", "lower", "case", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L945-L974
[ "def", "str_lower", "(", "x", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "lower", "(", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_lstrip
Remove leading characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lstrip(to_strip='very ') Expression = str_lstrip(text, to_strip='very ') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 pretty 2 is coming 3 our 4 way.
packages/vaex-core/vaex/functions.py
def str_lstrip(x, to_strip=None): """Remove leading characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lstrip(to_strip='very ') Expression = str_lstrip(text, to_strip='very ') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 pretty 2 is coming 3 our 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).lstrip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_lstrip(x, to_strip=None): """Remove leading characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.lstrip(to_strip='very ') Expression = str_lstrip(text, to_strip='very ') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 pretty 2 is coming 3 our 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).lstrip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Remove", "leading", "characters", "from", "a", "string", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L977-L1008
[ "def", "str_lstrip", "(", "x", ",", "to_strip", "=", "None", ")", ":", "# in c++ we give empty string the same meaning as None", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "lstrip", "(", "''", "if", "to_strip", "is", "None", "else", "to_strip", ")", "if", "to_strip", "!=", "''", "else", "x", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_pad
Pad strings in a given column. :param int width: The total width of the string :param str side: If 'left' than pad on the left, if 'right' than pad on the right side the string. :param str fillchar: The character used for padding. :returns: an expression containing the padded strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.pad(width=10, side='left', fillchar='!') Expression = str_pad(text, width=10, side='left', fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way.
packages/vaex-core/vaex/functions.py
def str_pad(x, width, side='left', fillchar=' '): """Pad strings in a given column. :param int width: The total width of the string :param str side: If 'left' than pad on the left, if 'right' than pad on the right side the string. :param str fillchar: The character used for padding. :returns: an expression containing the padded strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.pad(width=10, side='left', fillchar='!') Expression = str_pad(text, width=10, side='left', fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way. """ sl = _to_string_sequence(x).pad(width, fillchar, side in ['left', 'both'], side in ['right', 'both']) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_pad(x, width, side='left', fillchar=' '): """Pad strings in a given column. :param int width: The total width of the string :param str side: If 'left' than pad on the left, if 'right' than pad on the right side the string. :param str fillchar: The character used for padding. :returns: an expression containing the padded strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.pad(width=10, side='left', fillchar='!') Expression = str_pad(text, width=10, side='left', fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way. """ sl = _to_string_sequence(x).pad(width, fillchar, side in ['left', 'both'], side in ['right', 'both']) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Pad", "strings", "in", "a", "given", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1045-L1077
[ "def", "str_pad", "(", "x", ",", "width", ",", "side", "=", "'left'", ",", "fillchar", "=", "' '", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "pad", "(", "width", ",", "fillchar", ",", "side", "in", "[", "'left'", ",", "'both'", "]", ",", "side", "in", "[", "'right'", ",", "'both'", "]", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_repeat
Duplicate each string in a column. :param int repeats: number of times each string sample is to be duplicated. :returns: an expression containing the duplicated strings Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.repeat(3) Expression = str_repeat(text, 3) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomethingSomething 1 very prettyvery prettyvery pretty 2 is comingis comingis coming 3 ourourour 4 way.way.way.
packages/vaex-core/vaex/functions.py
def str_repeat(x, repeats): """Duplicate each string in a column. :param int repeats: number of times each string sample is to be duplicated. :returns: an expression containing the duplicated strings Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.repeat(3) Expression = str_repeat(text, 3) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomethingSomething 1 very prettyvery prettyvery pretty 2 is comingis comingis coming 3 ourourour 4 way.way.way. """ sl = _to_string_sequence(x).repeat(repeats) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_repeat(x, repeats): """Duplicate each string in a column. :param int repeats: number of times each string sample is to be duplicated. :returns: an expression containing the duplicated strings Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.repeat(3) Expression = str_repeat(text, 3) Length: 5 dtype: str (expression) --------------------------------- 0 SomethingSomethingSomething 1 very prettyvery prettyvery pretty 2 is comingis comingis coming 3 ourourour 4 way.way.way. """ sl = _to_string_sequence(x).repeat(repeats) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Duplicate", "each", "string", "in", "a", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1080-L1110
[ "def", "str_repeat", "(", "x", ",", "repeats", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "repeat", "(", "repeats", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_replace
Replace occurences of a pattern/regex in a column with some other string. :param str pattern: string or a regex pattern :param str replace: a replacement string :param int n: number of replacements to be made from the start. If -1 make all replacements. :param int flags: ?? :param bool regex: If True, ...? :returns: an expression containing the string replacements. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.replace(pat='et', repl='__') Expression = str_replace(text, pat='et', repl='__') Length: 5 dtype: str (expression) --------------------------------- 0 Som__hing 1 very pr__ty 2 is coming 3 our 4 way.
packages/vaex-core/vaex/functions.py
def str_replace(x, pat, repl, n=-1, flags=0, regex=False): """Replace occurences of a pattern/regex in a column with some other string. :param str pattern: string or a regex pattern :param str replace: a replacement string :param int n: number of replacements to be made from the start. If -1 make all replacements. :param int flags: ?? :param bool regex: If True, ...? :returns: an expression containing the string replacements. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.replace(pat='et', repl='__') Expression = str_replace(text, pat='et', repl='__') Length: 5 dtype: str (expression) --------------------------------- 0 Som__hing 1 very pr__ty 2 is coming 3 our 4 way. """ sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_replace(x, pat, repl, n=-1, flags=0, regex=False): """Replace occurences of a pattern/regex in a column with some other string. :param str pattern: string or a regex pattern :param str replace: a replacement string :param int n: number of replacements to be made from the start. If -1 make all replacements. :param int flags: ?? :param bool regex: If True, ...? :returns: an expression containing the string replacements. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.replace(pat='et', repl='__') Expression = str_replace(text, pat='et', repl='__') Length: 5 dtype: str (expression) --------------------------------- 0 Som__hing 1 very pr__ty 2 is coming 3 our 4 way. """ sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Replace", "occurences", "of", "a", "pattern", "/", "regex", "in", "a", "column", "with", "some", "other", "string", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1113-L1147
[ "def", "str_replace", "(", "x", ",", "pat", ",", "repl", ",", "n", "=", "-", "1", ",", "flags", "=", "0", ",", "regex", "=", "False", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "replace", "(", "pat", ",", "repl", ",", "n", ",", "flags", ",", "regex", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_rfind
Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rfind(sub="et") Expression = str_rfind(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1
packages/vaex-core/vaex/functions.py
def str_rfind(x, sub, start=0, end=None): """Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rfind(sub="et") Expression = str_rfind(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, False)
def str_rfind(x, sub, start=0, end=None): """Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rfind(sub="et") Expression = str_rfind(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, False)
[ "Returns", "the", "highest", "indices", "in", "each", "string", "in", "a", "column", "where", "the", "provided", "substring", "is", "fully", "contained", "between", "within", "a", "sample", ".", "If", "the", "substring", "is", "not", "found", "-", "1", "is", "returned", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1150-L1182
[ "def", "str_rfind", "(", "x", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "_to_string_sequence", "(", "x", ")", ".", "find", "(", "sub", ",", "start", ",", "0", "if", "end", "is", "None", "else", "end", ",", "end", "is", "None", ",", "False", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_rindex
Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. Same as `str.rfind`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rindex(sub="et") Expression = str_rindex(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1
packages/vaex-core/vaex/functions.py
def str_rindex(x, sub, start=0, end=None): """Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. Same as `str.rfind`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rindex(sub="et") Expression = str_rindex(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return str_rfind(x, sub, start, end)
def str_rindex(x, sub, start=0, end=None): """Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. Same as `str.rfind`. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rindex(sub="et") Expression = str_rindex(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1 """ return str_rfind(x, sub, start, end)
[ "Returns", "the", "highest", "indices", "in", "each", "string", "in", "a", "column", "where", "the", "provided", "substring", "is", "fully", "contained", "between", "within", "a", "sample", ".", "If", "the", "substring", "is", "not", "found", "-", "1", "is", "returned", ".", "Same", "as", "str", ".", "rfind", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1185-L1217
[ "def", "str_rindex", "(", "x", ",", "sub", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "return", "str_rfind", "(", "x", ",", "sub", ",", "start", ",", "end", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_rjust
Fills the left side of string samples with a specified character such that the strings are left-hand justified. :param int width: The minimal width of the strings. :param str fillchar: The character used for filling. :returns: an expression containing the filled strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rjust(width=10, fillchar='!') Expression = str_rjust(text, width=10, fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way.
packages/vaex-core/vaex/functions.py
def str_rjust(x, width, fillchar=' '): """Fills the left side of string samples with a specified character such that the strings are left-hand justified. :param int width: The minimal width of the strings. :param str fillchar: The character used for filling. :returns: an expression containing the filled strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rjust(width=10, fillchar='!') Expression = str_rjust(text, width=10, fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way. """ sl = _to_string_sequence(x).pad(width, fillchar, True, False) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_rjust(x, width, fillchar=' '): """Fills the left side of string samples with a specified character such that the strings are left-hand justified. :param int width: The minimal width of the strings. :param str fillchar: The character used for filling. :returns: an expression containing the filled strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rjust(width=10, fillchar='!') Expression = str_rjust(text, width=10, fillchar='!') Length: 5 dtype: str (expression) --------------------------------- 0 !Something 1 very pretty 2 !is coming 3 !!!!!!!our 4 !!!!!!way. """ sl = _to_string_sequence(x).pad(width, fillchar, True, False) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Fills", "the", "left", "side", "of", "string", "samples", "with", "a", "specified", "character", "such", "that", "the", "strings", "are", "left", "-", "hand", "justified", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1220-L1251
[ "def", "str_rjust", "(", "x", ",", "width", ",", "fillchar", "=", "' '", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "pad", "(", "width", ",", "fillchar", ",", "True", ",", "False", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_rstrip
Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way.
packages/vaex-core/vaex/functions.py
def str_rstrip(x, to_strip=None): """Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_rstrip(x, to_strip=None): """Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Remove", "trailing", "characters", "from", "a", "string", "sample", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1256-L1287
[ "def", "str_rstrip", "(", "x", ",", "to_strip", "=", "None", ")", ":", "# in c++ we give empty string the same meaning as None", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "rstrip", "(", "''", "if", "to_strip", "is", "None", "else", "to_strip", ")", "if", "to_strip", "!=", "''", "else", "x", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_slice
Slice substrings from each string element in a column. :param int start: The start position for the slice operation. :param int end: The stop position for the slice operation. :returns: an expression containing the sliced substrings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.slice(start=2, stop=5) Expression = str_pandas_slice(text, start=2, stop=5) Length: 5 dtype: str (expression) --------------------------------- 0 met 1 ry 2 co 3 r 4 y.
packages/vaex-core/vaex/functions.py
def str_slice(x, start=0, stop=None): # TODO: support n """Slice substrings from each string element in a column. :param int start: The start position for the slice operation. :param int end: The stop position for the slice operation. :returns: an expression containing the sliced substrings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.slice(start=2, stop=5) Expression = str_pandas_slice(text, start=2, stop=5) Length: 5 dtype: str (expression) --------------------------------- 0 met 1 ry 2 co 3 r 4 y. """ if stop is None: sll = _to_string_sequence(x).slice_string_end(start) else: sll = _to_string_sequence(x).slice_string(start, stop) return sll
def str_slice(x, start=0, stop=None): # TODO: support n """Slice substrings from each string element in a column. :param int start: The start position for the slice operation. :param int end: The stop position for the slice operation. :returns: an expression containing the sliced substrings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.slice(start=2, stop=5) Expression = str_pandas_slice(text, start=2, stop=5) Length: 5 dtype: str (expression) --------------------------------- 0 met 1 ry 2 co 3 r 4 y. """ if stop is None: sll = _to_string_sequence(x).slice_string_end(start) else: sll = _to_string_sequence(x).slice_string(start, stop) return sll
[ "Slice", "substrings", "from", "each", "string", "element", "in", "a", "column", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1290-L1324
[ "def", "str_slice", "(", "x", ",", "start", "=", "0", ",", "stop", "=", "None", ")", ":", "# TODO: support n", "if", "stop", "is", "None", ":", "sll", "=", "_to_string_sequence", "(", "x", ")", ".", "slice_string_end", "(", "start", ")", "else", ":", "sll", "=", "_to_string_sequence", "(", "x", ")", ".", "slice_string", "(", "start", ",", "stop", ")", "return", "sll" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_strip
Removes leading and trailing characters. Strips whitespaces (including new lines), or a set of specified characters from each string saple in a column, both from the left right sides. :param str to_strip: The characters to be removed. All combinations of the characters will be removed. If None, it removes whitespaces. :param returns: an expression containing the modified string samples. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.strip(to_strip='very') Expression = str_strip(text, to_strip='very') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 prett 2 is coming 3 ou 4 way.
packages/vaex-core/vaex/functions.py
def str_strip(x, to_strip=None): """Removes leading and trailing characters. Strips whitespaces (including new lines), or a set of specified characters from each string saple in a column, both from the left right sides. :param str to_strip: The characters to be removed. All combinations of the characters will be removed. If None, it removes whitespaces. :param returns: an expression containing the modified string samples. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.strip(to_strip='very') Expression = str_strip(text, to_strip='very') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 prett 2 is coming 3 ou 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).strip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_strip(x, to_strip=None): """Removes leading and trailing characters. Strips whitespaces (including new lines), or a set of specified characters from each string saple in a column, both from the left right sides. :param str to_strip: The characters to be removed. All combinations of the characters will be removed. If None, it removes whitespaces. :param returns: an expression containing the modified string samples. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.strip(to_strip='very') Expression = str_strip(text, to_strip='very') Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 prett 2 is coming 3 ou 4 way. """ # in c++ we give empty string the same meaning as None sl = _to_string_sequence(x).strip('' if to_strip is None else to_strip) if to_strip != '' else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Removes", "leading", "and", "trailing", "characters", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1371-L1407
[ "def", "str_strip", "(", "x", ",", "to_strip", "=", "None", ")", ":", "# in c++ we give empty string the same meaning as None", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "strip", "(", "''", "if", "to_strip", "is", "None", "else", "to_strip", ")", "if", "to_strip", "!=", "''", "else", "x", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_title
Converts all string samples to titlecase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.title() Expression = str_title(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very Pretty 2 Is Coming 3 Our 4 Way.
packages/vaex-core/vaex/functions.py
def str_title(x): """Converts all string samples to titlecase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.title() Expression = str_title(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very Pretty 2 Is Coming 3 Our 4 Way. """ sl = _to_string_sequence(x).title() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_title(x): """Converts all string samples to titlecase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.title() Expression = str_title(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very Pretty 2 Is Coming 3 Our 4 Way. """ sl = _to_string_sequence(x).title() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Converts", "all", "string", "samples", "to", "titlecase", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1412-L1441
[ "def", "str_title", "(", "x", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "title", "(", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
str_upper
Converts all strings in a column to uppercase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.upper() Expression = str_upper(text) Length: 5 dtype: str (expression) --------------------------------- 0 SOMETHING 1 VERY PRETTY 2 IS COMING 3 OUR 4 WAY.
packages/vaex-core/vaex/functions.py
def str_upper(x): """Converts all strings in a column to uppercase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.upper() Expression = str_upper(text) Length: 5 dtype: str (expression) --------------------------------- 0 SOMETHING 1 VERY PRETTY 2 IS COMING 3 OUR 4 WAY. """ sl = _to_string_sequence(x).upper() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def str_upper(x): """Converts all strings in a column to uppercase. :returns: an expression containing the converted strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.upper() Expression = str_upper(text) Length: 5 dtype: str (expression) --------------------------------- 0 SOMETHING 1 VERY PRETTY 2 IS COMING 3 OUR 4 WAY. """ sl = _to_string_sequence(x).upper() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Converts", "all", "strings", "in", "a", "column", "to", "uppercase", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1445-L1476
[ "def", "str_upper", "(", "x", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "upper", "(", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
format
Uses http://www.cplusplus.com/reference/string/to_string/ for formatting
packages/vaex-core/vaex/functions.py
def format(x, format): """Uses http://www.cplusplus.com/reference/string/to_string/ for formatting""" # don't change the dtype, otherwise for each block the dtype may be different (string length) sl = vaex.strings.format(x, format) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
def format(x, format): """Uses http://www.cplusplus.com/reference/string/to_string/ for formatting""" # don't change the dtype, otherwise for each block the dtype may be different (string length) sl = vaex.strings.format(x, format) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "Uses", "http", ":", "//", "www", ".", "cplusplus", ".", "com", "/", "reference", "/", "string", "/", "to_string", "/", "for", "formatting" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L1720-L1724
[ "def", "format", "(", "x", ",", "format", ")", ":", "# don't change the dtype, otherwise for each block the dtype may be different (string length)", "sl", "=", "vaex", ".", "strings", ".", "format", "(", "x", ",", "format", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
Hdf5MemoryMapped.write_meta
ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as the default :func:`Dataset.write_meta`.
packages/vaex-hdf5/vaex/hdf5/dataset.py
def write_meta(self): """ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as the default :func:`Dataset.write_meta`. """ with h5py.File(self.filename, "r+") as h5file_output: h5table_root = h5file_output[self.h5table_root_name] if self.description is not None: h5table_root.attrs["description"] = self.description h5columns = h5table_root if self._version == 1 else h5table_root['columns'] for column_name in self.columns.keys(): h5dataset = None if column_name in h5columns: h5dataset = h5columns[column_name] else: for group in h5columns.values(): if 'type' in group.attrs: if group.attrs['type'] in ['csr_matrix']: for name, column in group.items(): if name == column_name: h5dataset = column if h5dataset is None: raise ValueError('column {} not found'.format(column_name)) for name, values in [("ucd", self.ucds), ("unit", self.units), ("description", self.descriptions)]: if column_name in values: value = ensure_string(values[column_name], cast=True) h5dataset.attrs[name] = value else: if name in h5columns.attrs: del h5dataset.attrs[name]
def write_meta(self): """ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as the default :func:`Dataset.write_meta`. """ with h5py.File(self.filename, "r+") as h5file_output: h5table_root = h5file_output[self.h5table_root_name] if self.description is not None: h5table_root.attrs["description"] = self.description h5columns = h5table_root if self._version == 1 else h5table_root['columns'] for column_name in self.columns.keys(): h5dataset = None if column_name in h5columns: h5dataset = h5columns[column_name] else: for group in h5columns.values(): if 'type' in group.attrs: if group.attrs['type'] in ['csr_matrix']: for name, column in group.items(): if name == column_name: h5dataset = column if h5dataset is None: raise ValueError('column {} not found'.format(column_name)) for name, values in [("ucd", self.ucds), ("unit", self.units), ("description", self.descriptions)]: if column_name in values: value = ensure_string(values[column_name], cast=True) h5dataset.attrs[name] = value else: if name in h5columns.attrs: del h5dataset.attrs[name]
[ "ucds", "descriptions", "and", "units", "are", "written", "as", "attributes", "in", "the", "hdf5", "file", "instead", "of", "a", "seperate", "file", "as", "the", "default", ":", "func", ":", "Dataset", ".", "write_meta", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-hdf5/vaex/hdf5/dataset.py#L70-L98
[ "def", "write_meta", "(", "self", ")", ":", "with", "h5py", ".", "File", "(", "self", ".", "filename", ",", "\"r+\"", ")", "as", "h5file_output", ":", "h5table_root", "=", "h5file_output", "[", "self", ".", "h5table_root_name", "]", "if", "self", ".", "description", "is", "not", "None", ":", "h5table_root", ".", "attrs", "[", "\"description\"", "]", "=", "self", ".", "description", "h5columns", "=", "h5table_root", "if", "self", ".", "_version", "==", "1", "else", "h5table_root", "[", "'columns'", "]", "for", "column_name", "in", "self", ".", "columns", ".", "keys", "(", ")", ":", "h5dataset", "=", "None", "if", "column_name", "in", "h5columns", ":", "h5dataset", "=", "h5columns", "[", "column_name", "]", "else", ":", "for", "group", "in", "h5columns", ".", "values", "(", ")", ":", "if", "'type'", "in", "group", ".", "attrs", ":", "if", "group", ".", "attrs", "[", "'type'", "]", "in", "[", "'csr_matrix'", "]", ":", "for", "name", ",", "column", "in", "group", ".", "items", "(", ")", ":", "if", "name", "==", "column_name", ":", "h5dataset", "=", "column", "if", "h5dataset", "is", "None", ":", "raise", "ValueError", "(", "'column {} not found'", ".", "format", "(", "column_name", ")", ")", "for", "name", ",", "values", "in", "[", "(", "\"ucd\"", ",", "self", ".", "ucds", ")", ",", "(", "\"unit\"", ",", "self", ".", "units", ")", ",", "(", "\"description\"", ",", "self", ".", "descriptions", ")", "]", ":", "if", "column_name", "in", "values", ":", "value", "=", "ensure_string", "(", "values", "[", "column_name", "]", ",", "cast", "=", "True", ")", "h5dataset", ".", "attrs", "[", "name", "]", "=", "value", "else", ":", "if", "name", "in", "h5columns", ".", "attrs", ":", "del", "h5dataset", ".", "attrs", "[", "name", "]" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
Hdf5MemoryMapped.create
Create a new (empty) hdf5 file with columns given by column names, of length N Optionally, numpy dtypes can be passed, default is floats
packages/vaex-hdf5/vaex/hdf5/dataset.py
def create(cls, path, N, column_names, dtypes=None, write=True): """Create a new (empty) hdf5 file with columns given by column names, of length N Optionally, numpy dtypes can be passed, default is floats """ dtypes = dtypes or [np.float] * len(column_names) if N == 0: raise ValueError("Cannot export empty table") with h5py.File(path, "w") as h5file_output: h5data_output = h5file_output.require_group("data") for column_name, dtype in zip(column_names, dtypes): shape = (N,) print(dtype) if dtype.type == np.datetime64: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=np.int64) array.attrs["dtype"] = dtype.name else: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=dtype) array[0] = array[0] # make sure the array really exists return Hdf5MemoryMapped(path, write=write)
def create(cls, path, N, column_names, dtypes=None, write=True): """Create a new (empty) hdf5 file with columns given by column names, of length N Optionally, numpy dtypes can be passed, default is floats """ dtypes = dtypes or [np.float] * len(column_names) if N == 0: raise ValueError("Cannot export empty table") with h5py.File(path, "w") as h5file_output: h5data_output = h5file_output.require_group("data") for column_name, dtype in zip(column_names, dtypes): shape = (N,) print(dtype) if dtype.type == np.datetime64: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=np.int64) array.attrs["dtype"] = dtype.name else: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=dtype) array[0] = array[0] # make sure the array really exists return Hdf5MemoryMapped(path, write=write)
[ "Create", "a", "new", "(", "empty", ")", "hdf5", "file", "with", "columns", "given", "by", "column", "names", "of", "length", "N" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-hdf5/vaex/hdf5/dataset.py#L101-L122
[ "def", "create", "(", "cls", ",", "path", ",", "N", ",", "column_names", ",", "dtypes", "=", "None", ",", "write", "=", "True", ")", ":", "dtypes", "=", "dtypes", "or", "[", "np", ".", "float", "]", "*", "len", "(", "column_names", ")", "if", "N", "==", "0", ":", "raise", "ValueError", "(", "\"Cannot export empty table\"", ")", "with", "h5py", ".", "File", "(", "path", ",", "\"w\"", ")", "as", "h5file_output", ":", "h5data_output", "=", "h5file_output", ".", "require_group", "(", "\"data\"", ")", "for", "column_name", ",", "dtype", "in", "zip", "(", "column_names", ",", "dtypes", ")", ":", "shape", "=", "(", "N", ",", ")", "print", "(", "dtype", ")", "if", "dtype", ".", "type", "==", "np", ".", "datetime64", ":", "array", "=", "h5file_output", ".", "require_dataset", "(", "\"/data/%s\"", "%", "column_name", ",", "shape", "=", "shape", ",", "dtype", "=", "np", ".", "int64", ")", "array", ".", "attrs", "[", "\"dtype\"", "]", "=", "dtype", ".", "name", "else", ":", "array", "=", "h5file_output", ".", "require_dataset", "(", "\"/data/%s\"", "%", "column_name", ",", "shape", "=", "shape", ",", "dtype", "=", "dtype", ")", "array", "[", "0", "]", "=", "array", "[", "0", "]", "# make sure the array really exists", "return", "Hdf5MemoryMapped", "(", "path", ",", "write", "=", "write", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
readcol
The default return is a two dimensional float array. If you want a list of columns output instead of a 2D array, pass 'twod=False'. In this case, each column's data type will be automatically detected. Example usage: CASE 1) a table has the format: X Y Z 0.0 2.4 8.2 1.0 3.4 5.6 0.7 3.2 2.1 ... names,(x,y,z)=readcol("myfile.tbl",names=True,twod=False) or x,y,z=readcol("myfile.tbl",skipline=1,twod=False) or names,xx = readcol("myfile.tbl",names=True) or xxdict = readcol("myfile.tbl",asdict=True) or xxstruct = readcol("myfile.tbl",asStruct=True) CASE 2) no title is contained into the table, then there is no need to skipline: x,y,z=readcol("myfile.tbl") CASE 3) there is a names column and then more descriptive text: X Y Z (deg) (deg) (km/s) 0.0 2.4 8.2 1.0 3.4. 5.6 ... then use: names,x,y,z=readcol("myfile.tbl",names=True,skipline=1,twod=False) or x,y,z=readcol("myfile.tbl",skipline=2,twod=False) INPUTS: fsep - field separator, e.g. for comma separated value (csv) files skipline - number of lines to ignore at the start of the file names - read / don't read in the first line as a list of column names can specify an integer line number too, though it will be the line number after skipping lines twod - two dimensional or one dimensional output nullval - if specified, all instances of this value will be replaced with a floating NaN asdict - zips names with data to create a dict with column headings tied to column data. If asdict=True, names will be set to True asStruct - same as asdict, but returns a structure instead of a dictionary (i.e. you call struct.key instead of struct['key']) fixedformat - if you have a fixed format file, this is a python list of column lengths. e.g. the first table above would be [3,5,5]. Note that if you specify the wrong fixed format, you will get junk; if your format total is greater than the line length, the last entries will all be blank but readcol will not report an error. namecomment - assumed that "Name" row is on a comment line. If it is not - e.g., it is the first non-comment line, change this to False removeblanks - remove all blank entries from split lines. This can cause lost data if you have blank entries on some lines. header_badchars - remove these characters from a header before parsing it (helpful for IPAC tables that are delimited with | ) If you get this error: "scipy could not be imported. Your table must have full rows." it means readcol cannot automatically guess which columns contain data. If you have scipy and columns of varying length, readcol will read in all of the rows with length=mode(row lengths).
packages/vaex-core/vaex/ext/readcol.py
def readcol(filename,skipline=0,skipafter=0,names=False,fsep=None,twod=True, fixedformat=None,asdict=False,comment='#',verbose=True,nullval=None, asStruct=False,namecomment=True,removeblanks=False,header_badchars=None, asRecArray=False): """ The default return is a two dimensional float array. If you want a list of columns output instead of a 2D array, pass 'twod=False'. In this case, each column's data type will be automatically detected. Example usage: CASE 1) a table has the format: X Y Z 0.0 2.4 8.2 1.0 3.4 5.6 0.7 3.2 2.1 ... names,(x,y,z)=readcol("myfile.tbl",names=True,twod=False) or x,y,z=readcol("myfile.tbl",skipline=1,twod=False) or names,xx = readcol("myfile.tbl",names=True) or xxdict = readcol("myfile.tbl",asdict=True) or xxstruct = readcol("myfile.tbl",asStruct=True) CASE 2) no title is contained into the table, then there is no need to skipline: x,y,z=readcol("myfile.tbl") CASE 3) there is a names column and then more descriptive text: X Y Z (deg) (deg) (km/s) 0.0 2.4 8.2 1.0 3.4. 5.6 ... then use: names,x,y,z=readcol("myfile.tbl",names=True,skipline=1,twod=False) or x,y,z=readcol("myfile.tbl",skipline=2,twod=False) INPUTS: fsep - field separator, e.g. for comma separated value (csv) files skipline - number of lines to ignore at the start of the file names - read / don't read in the first line as a list of column names can specify an integer line number too, though it will be the line number after skipping lines twod - two dimensional or one dimensional output nullval - if specified, all instances of this value will be replaced with a floating NaN asdict - zips names with data to create a dict with column headings tied to column data. If asdict=True, names will be set to True asStruct - same as asdict, but returns a structure instead of a dictionary (i.e. you call struct.key instead of struct['key']) fixedformat - if you have a fixed format file, this is a python list of column lengths. e.g. the first table above would be [3,5,5]. Note that if you specify the wrong fixed format, you will get junk; if your format total is greater than the line length, the last entries will all be blank but readcol will not report an error. namecomment - assumed that "Name" row is on a comment line. If it is not - e.g., it is the first non-comment line, change this to False removeblanks - remove all blank entries from split lines. This can cause lost data if you have blank entries on some lines. header_badchars - remove these characters from a header before parsing it (helpful for IPAC tables that are delimited with | ) If you get this error: "scipy could not be imported. Your table must have full rows." it means readcol cannot automatically guess which columns contain data. If you have scipy and columns of varying length, readcol will read in all of the rows with length=mode(row lengths). """ with open(filename,'r') as f: f = f.readlines() null=[f.pop(0) for i in range(skipline)] commentfilter = make_commentfilter(comment) if not asStruct: asStruct = asRecArray if namecomment is False and (names or asdict or asStruct): while 1: line = f.pop(0) if line[0] != comment: nameline = line if header_badchars: for c in header_badchars: nameline = nameline.replace(c,' ') nms=nameline.split(fsep) break elif len(f) == 0: raise Exception("No uncommented lines found.") else: if names or asdict or asStruct: # can specify name line if type(names) == type(1): nameline = f.pop(names) else: nameline = f.pop(0) if nameline[0]==comment: nameline = nameline[1:] if header_badchars: for c in header_badchars: nameline = nameline.replace(c,' ') nms=list([name.strip() for name in nameline.split(fsep)]) null=[f.pop(0) for i in range(skipafter)] if fixedformat: myreadff = lambda x: readff(x,fixedformat) splitarr = list(map(myreadff,f)) splitarr = list(filter(commentfilter,splitarr)) else: fstrip = list(map(str.strip,f)) fseps = [ fsep for i in range(len(f)) ] splitarr = list(map(str.split,fstrip,fseps)) if removeblanks: for i in range(splitarr.count([''])): splitarr.remove(['']) splitarr = list(filter(commentfilter,splitarr)) # check to make sure each line has the same number of columns to avoid # "ValueError: setting an array element with a sequence." nperline = list(map(len,splitarr)) if hasmode: ncols,nrows = mode(nperline) if nrows != len(splitarr): if verbose: print("Removing %i rows that don't match most common length %i. \ \n%i rows read into array." % (len(splitarr) - nrows,ncols,nrows)) for i in range(len(splitarr)-1,-1,-1): # need to go backwards if nperline[i] != ncols: splitarr.pop(i) try: x = numpy.asarray( splitarr , dtype='float') except ValueError: if verbose: print("WARNING: reading as string array because %s array failed" % 'float') try: x = numpy.asarray( splitarr , dtype='S') except ValueError: if hasmode: raise Exception( "ValueError when converting data to array." + \ " You have scipy.mode on your system, so this is " + \ "probably not an issue of differing row lengths." ) else: raise Exception( "Conversion to array error. You probably " + \ "have different row lengths and scipy.mode was not " + \ "imported." ) if nullval is not None: x[x==nullval] = numpy.nan x = get_autotype(x) if asdict or asStruct: mydict = OrderedDict(zip(nms,x.T)) for k,v in mydict.items(): mydict[k] = get_autotype(v) if asdict: return mydict elif asRecArray: return Struct(mydict).as_recarray() elif asStruct: return Struct(mydict) elif names and twod: return nms,x elif names: # if not returning a twod array, try to return each vector as the spec. type return nms,[ get_autotype(x.T[i]) for i in range(x.shape[1]) ] else: if twod: return x else: return [ get_autotype(x.T[i]) for i in range(x.shape[1]) ]
def readcol(filename,skipline=0,skipafter=0,names=False,fsep=None,twod=True, fixedformat=None,asdict=False,comment='#',verbose=True,nullval=None, asStruct=False,namecomment=True,removeblanks=False,header_badchars=None, asRecArray=False): """ The default return is a two dimensional float array. If you want a list of columns output instead of a 2D array, pass 'twod=False'. In this case, each column's data type will be automatically detected. Example usage: CASE 1) a table has the format: X Y Z 0.0 2.4 8.2 1.0 3.4 5.6 0.7 3.2 2.1 ... names,(x,y,z)=readcol("myfile.tbl",names=True,twod=False) or x,y,z=readcol("myfile.tbl",skipline=1,twod=False) or names,xx = readcol("myfile.tbl",names=True) or xxdict = readcol("myfile.tbl",asdict=True) or xxstruct = readcol("myfile.tbl",asStruct=True) CASE 2) no title is contained into the table, then there is no need to skipline: x,y,z=readcol("myfile.tbl") CASE 3) there is a names column and then more descriptive text: X Y Z (deg) (deg) (km/s) 0.0 2.4 8.2 1.0 3.4. 5.6 ... then use: names,x,y,z=readcol("myfile.tbl",names=True,skipline=1,twod=False) or x,y,z=readcol("myfile.tbl",skipline=2,twod=False) INPUTS: fsep - field separator, e.g. for comma separated value (csv) files skipline - number of lines to ignore at the start of the file names - read / don't read in the first line as a list of column names can specify an integer line number too, though it will be the line number after skipping lines twod - two dimensional or one dimensional output nullval - if specified, all instances of this value will be replaced with a floating NaN asdict - zips names with data to create a dict with column headings tied to column data. If asdict=True, names will be set to True asStruct - same as asdict, but returns a structure instead of a dictionary (i.e. you call struct.key instead of struct['key']) fixedformat - if you have a fixed format file, this is a python list of column lengths. e.g. the first table above would be [3,5,5]. Note that if you specify the wrong fixed format, you will get junk; if your format total is greater than the line length, the last entries will all be blank but readcol will not report an error. namecomment - assumed that "Name" row is on a comment line. If it is not - e.g., it is the first non-comment line, change this to False removeblanks - remove all blank entries from split lines. This can cause lost data if you have blank entries on some lines. header_badchars - remove these characters from a header before parsing it (helpful for IPAC tables that are delimited with | ) If you get this error: "scipy could not be imported. Your table must have full rows." it means readcol cannot automatically guess which columns contain data. If you have scipy and columns of varying length, readcol will read in all of the rows with length=mode(row lengths). """ with open(filename,'r') as f: f = f.readlines() null=[f.pop(0) for i in range(skipline)] commentfilter = make_commentfilter(comment) if not asStruct: asStruct = asRecArray if namecomment is False and (names or asdict or asStruct): while 1: line = f.pop(0) if line[0] != comment: nameline = line if header_badchars: for c in header_badchars: nameline = nameline.replace(c,' ') nms=nameline.split(fsep) break elif len(f) == 0: raise Exception("No uncommented lines found.") else: if names or asdict or asStruct: # can specify name line if type(names) == type(1): nameline = f.pop(names) else: nameline = f.pop(0) if nameline[0]==comment: nameline = nameline[1:] if header_badchars: for c in header_badchars: nameline = nameline.replace(c,' ') nms=list([name.strip() for name in nameline.split(fsep)]) null=[f.pop(0) for i in range(skipafter)] if fixedformat: myreadff = lambda x: readff(x,fixedformat) splitarr = list(map(myreadff,f)) splitarr = list(filter(commentfilter,splitarr)) else: fstrip = list(map(str.strip,f)) fseps = [ fsep for i in range(len(f)) ] splitarr = list(map(str.split,fstrip,fseps)) if removeblanks: for i in range(splitarr.count([''])): splitarr.remove(['']) splitarr = list(filter(commentfilter,splitarr)) # check to make sure each line has the same number of columns to avoid # "ValueError: setting an array element with a sequence." nperline = list(map(len,splitarr)) if hasmode: ncols,nrows = mode(nperline) if nrows != len(splitarr): if verbose: print("Removing %i rows that don't match most common length %i. \ \n%i rows read into array." % (len(splitarr) - nrows,ncols,nrows)) for i in range(len(splitarr)-1,-1,-1): # need to go backwards if nperline[i] != ncols: splitarr.pop(i) try: x = numpy.asarray( splitarr , dtype='float') except ValueError: if verbose: print("WARNING: reading as string array because %s array failed" % 'float') try: x = numpy.asarray( splitarr , dtype='S') except ValueError: if hasmode: raise Exception( "ValueError when converting data to array." + \ " You have scipy.mode on your system, so this is " + \ "probably not an issue of differing row lengths." ) else: raise Exception( "Conversion to array error. You probably " + \ "have different row lengths and scipy.mode was not " + \ "imported." ) if nullval is not None: x[x==nullval] = numpy.nan x = get_autotype(x) if asdict or asStruct: mydict = OrderedDict(zip(nms,x.T)) for k,v in mydict.items(): mydict[k] = get_autotype(v) if asdict: return mydict elif asRecArray: return Struct(mydict).as_recarray() elif asStruct: return Struct(mydict) elif names and twod: return nms,x elif names: # if not returning a twod array, try to return each vector as the spec. type return nms,[ get_autotype(x.T[i]) for i in range(x.shape[1]) ] else: if twod: return x else: return [ get_autotype(x.T[i]) for i in range(x.shape[1]) ]
[ "The", "default", "return", "is", "a", "two", "dimensional", "float", "array", ".", "If", "you", "want", "a", "list", "of", "columns", "output", "instead", "of", "a", "2D", "array", "pass", "twod", "=", "False", ".", "In", "this", "case", "each", "column", "s", "data", "type", "will", "be", "automatically", "detected", ".", "Example", "usage", ":", "CASE", "1", ")", "a", "table", "has", "the", "format", ":", "X", "Y", "Z", "0", ".", "0", "2", ".", "4", "8", ".", "2", "1", ".", "0", "3", ".", "4", "5", ".", "6", "0", ".", "7", "3", ".", "2", "2", ".", "1", "...", "names", "(", "x", "y", "z", ")", "=", "readcol", "(", "myfile", ".", "tbl", "names", "=", "True", "twod", "=", "False", ")", "or", "x", "y", "z", "=", "readcol", "(", "myfile", ".", "tbl", "skipline", "=", "1", "twod", "=", "False", ")", "or", "names", "xx", "=", "readcol", "(", "myfile", ".", "tbl", "names", "=", "True", ")", "or", "xxdict", "=", "readcol", "(", "myfile", ".", "tbl", "asdict", "=", "True", ")", "or", "xxstruct", "=", "readcol", "(", "myfile", ".", "tbl", "asStruct", "=", "True", ")" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L45-L221
[ "def", "readcol", "(", "filename", ",", "skipline", "=", "0", ",", "skipafter", "=", "0", ",", "names", "=", "False", ",", "fsep", "=", "None", ",", "twod", "=", "True", ",", "fixedformat", "=", "None", ",", "asdict", "=", "False", ",", "comment", "=", "'#'", ",", "verbose", "=", "True", ",", "nullval", "=", "None", ",", "asStruct", "=", "False", ",", "namecomment", "=", "True", ",", "removeblanks", "=", "False", ",", "header_badchars", "=", "None", ",", "asRecArray", "=", "False", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "f", "=", "f", ".", "readlines", "(", ")", "null", "=", "[", "f", ".", "pop", "(", "0", ")", "for", "i", "in", "range", "(", "skipline", ")", "]", "commentfilter", "=", "make_commentfilter", "(", "comment", ")", "if", "not", "asStruct", ":", "asStruct", "=", "asRecArray", "if", "namecomment", "is", "False", "and", "(", "names", "or", "asdict", "or", "asStruct", ")", ":", "while", "1", ":", "line", "=", "f", ".", "pop", "(", "0", ")", "if", "line", "[", "0", "]", "!=", "comment", ":", "nameline", "=", "line", "if", "header_badchars", ":", "for", "c", "in", "header_badchars", ":", "nameline", "=", "nameline", ".", "replace", "(", "c", ",", "' '", ")", "nms", "=", "nameline", ".", "split", "(", "fsep", ")", "break", "elif", "len", "(", "f", ")", "==", "0", ":", "raise", "Exception", "(", "\"No uncommented lines found.\"", ")", "else", ":", "if", "names", "or", "asdict", "or", "asStruct", ":", "# can specify name line", "if", "type", "(", "names", ")", "==", "type", "(", "1", ")", ":", "nameline", "=", "f", ".", "pop", "(", "names", ")", "else", ":", "nameline", "=", "f", ".", "pop", "(", "0", ")", "if", "nameline", "[", "0", "]", "==", "comment", ":", "nameline", "=", "nameline", "[", "1", ":", "]", "if", "header_badchars", ":", "for", "c", "in", "header_badchars", ":", "nameline", "=", "nameline", ".", "replace", "(", "c", ",", "' '", ")", "nms", "=", "list", "(", "[", "name", ".", "strip", "(", ")", "for", "name", "in", "nameline", ".", "split", "(", "fsep", ")", "]", ")", "null", "=", "[", "f", ".", "pop", "(", "0", ")", "for", "i", "in", "range", "(", "skipafter", ")", "]", "if", "fixedformat", ":", "myreadff", "=", "lambda", "x", ":", "readff", "(", "x", ",", "fixedformat", ")", "splitarr", "=", "list", "(", "map", "(", "myreadff", ",", "f", ")", ")", "splitarr", "=", "list", "(", "filter", "(", "commentfilter", ",", "splitarr", ")", ")", "else", ":", "fstrip", "=", "list", "(", "map", "(", "str", ".", "strip", ",", "f", ")", ")", "fseps", "=", "[", "fsep", "for", "i", "in", "range", "(", "len", "(", "f", ")", ")", "]", "splitarr", "=", "list", "(", "map", "(", "str", ".", "split", ",", "fstrip", ",", "fseps", ")", ")", "if", "removeblanks", ":", "for", "i", "in", "range", "(", "splitarr", ".", "count", "(", "[", "''", "]", ")", ")", ":", "splitarr", ".", "remove", "(", "[", "''", "]", ")", "splitarr", "=", "list", "(", "filter", "(", "commentfilter", ",", "splitarr", ")", ")", "# check to make sure each line has the same number of columns to avoid", "# \"ValueError: setting an array element with a sequence.\"", "nperline", "=", "list", "(", "map", "(", "len", ",", "splitarr", ")", ")", "if", "hasmode", ":", "ncols", ",", "nrows", "=", "mode", "(", "nperline", ")", "if", "nrows", "!=", "len", "(", "splitarr", ")", ":", "if", "verbose", ":", "print", "(", "\"Removing %i rows that don't match most common length %i. \\\n \\n%i rows read into array.\"", "%", "(", "len", "(", "splitarr", ")", "-", "nrows", ",", "ncols", ",", "nrows", ")", ")", "for", "i", "in", "range", "(", "len", "(", "splitarr", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "# need to go backwards", "if", "nperline", "[", "i", "]", "!=", "ncols", ":", "splitarr", ".", "pop", "(", "i", ")", "try", ":", "x", "=", "numpy", ".", "asarray", "(", "splitarr", ",", "dtype", "=", "'float'", ")", "except", "ValueError", ":", "if", "verbose", ":", "print", "(", "\"WARNING: reading as string array because %s array failed\"", "%", "'float'", ")", "try", ":", "x", "=", "numpy", ".", "asarray", "(", "splitarr", ",", "dtype", "=", "'S'", ")", "except", "ValueError", ":", "if", "hasmode", ":", "raise", "Exception", "(", "\"ValueError when converting data to array.\"", "+", "\" You have scipy.mode on your system, so this is \"", "+", "\"probably not an issue of differing row lengths.\"", ")", "else", ":", "raise", "Exception", "(", "\"Conversion to array error. You probably \"", "+", "\"have different row lengths and scipy.mode was not \"", "+", "\"imported.\"", ")", "if", "nullval", "is", "not", "None", ":", "x", "[", "x", "==", "nullval", "]", "=", "numpy", ".", "nan", "x", "=", "get_autotype", "(", "x", ")", "if", "asdict", "or", "asStruct", ":", "mydict", "=", "OrderedDict", "(", "zip", "(", "nms", ",", "x", ".", "T", ")", ")", "for", "k", ",", "v", "in", "mydict", ".", "items", "(", ")", ":", "mydict", "[", "k", "]", "=", "get_autotype", "(", "v", ")", "if", "asdict", ":", "return", "mydict", "elif", "asRecArray", ":", "return", "Struct", "(", "mydict", ")", ".", "as_recarray", "(", ")", "elif", "asStruct", ":", "return", "Struct", "(", "mydict", ")", "elif", "names", "and", "twod", ":", "return", "nms", ",", "x", "elif", "names", ":", "# if not returning a twod array, try to return each vector as the spec. type", "return", "nms", ",", "[", "get_autotype", "(", "x", ".", "T", "[", "i", "]", ")", "for", "i", "in", "range", "(", "x", ".", "shape", "[", "1", "]", ")", "]", "else", ":", "if", "twod", ":", "return", "x", "else", ":", "return", "[", "get_autotype", "(", "x", ".", "T", "[", "i", "]", ")", "for", "i", "in", "range", "(", "x", ".", "shape", "[", "1", "]", ")", "]" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
get_autotype
Attempts to return a numpy array converted to the most sensible dtype Value errors will be caught and simply return the original array Tries to make dtype int, then float, then no change
packages/vaex-core/vaex/ext/readcol.py
def get_autotype(arr): """ Attempts to return a numpy array converted to the most sensible dtype Value errors will be caught and simply return the original array Tries to make dtype int, then float, then no change """ try: narr = arr.astype('float') if (narr < sys.maxsize).all() and (narr % 1).sum() == 0: return narr.astype('int') else: return narr except ValueError: return arr
def get_autotype(arr): """ Attempts to return a numpy array converted to the most sensible dtype Value errors will be caught and simply return the original array Tries to make dtype int, then float, then no change """ try: narr = arr.astype('float') if (narr < sys.maxsize).all() and (narr % 1).sum() == 0: return narr.astype('int') else: return narr except ValueError: return arr
[ "Attempts", "to", "return", "a", "numpy", "array", "converted", "to", "the", "most", "sensible", "dtype", "Value", "errors", "will", "be", "caught", "and", "simply", "return", "the", "original", "array", "Tries", "to", "make", "dtype", "int", "then", "float", "then", "no", "change" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L223-L236
[ "def", "get_autotype", "(", "arr", ")", ":", "try", ":", "narr", "=", "arr", ".", "astype", "(", "'float'", ")", "if", "(", "narr", "<", "sys", ".", "maxsize", ")", ".", "all", "(", ")", "and", "(", "narr", "%", "1", ")", ".", "sum", "(", ")", "==", "0", ":", "return", "narr", ".", "astype", "(", "'int'", ")", "else", ":", "return", "narr", "except", "ValueError", ":", "return", "arr" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
readff
Fixed-format reader Pass in a single line string (s) and a format list, which needs to be a python list of string lengths
packages/vaex-core/vaex/ext/readcol.py
def readff(s,format): """ Fixed-format reader Pass in a single line string (s) and a format list, which needs to be a python list of string lengths """ F = numpy.array([0]+format).cumsum() bothF = zip(F[:-1],F[1:]) strarr = [s[l:u] for l,u in bothF] return strarr
def readff(s,format): """ Fixed-format reader Pass in a single line string (s) and a format list, which needs to be a python list of string lengths """ F = numpy.array([0]+format).cumsum() bothF = zip(F[:-1],F[1:]) strarr = [s[l:u] for l,u in bothF] return strarr
[ "Fixed", "-", "format", "reader", "Pass", "in", "a", "single", "line", "string", "(", "s", ")", "and", "a", "format", "list", "which", "needs", "to", "be", "a", "python", "list", "of", "string", "lengths" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L270-L281
[ "def", "readff", "(", "s", ",", "format", ")", ":", "F", "=", "numpy", ".", "array", "(", "[", "0", "]", "+", "format", ")", ".", "cumsum", "(", ")", "bothF", "=", "zip", "(", "F", "[", ":", "-", "1", "]", ",", "F", "[", "1", ":", "]", ")", "strarr", "=", "[", "s", "[", "l", ":", "u", "]", "for", "l", ",", "u", "in", "bothF", "]", "return", "strarr" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
Struct.as_recarray
Convert into numpy recordarray
packages/vaex-core/vaex/ext/readcol.py
def as_recarray(self): """ Convert into numpy recordarray """ dtype = [(k,v.dtype) for k,v in self.__dict__.iteritems()] R = numpy.recarray(len(self.__dict__[k]),dtype=dtype) for key in self.__dict__: R[key] = self.__dict__[key] return R
def as_recarray(self): """ Convert into numpy recordarray """ dtype = [(k,v.dtype) for k,v in self.__dict__.iteritems()] R = numpy.recarray(len(self.__dict__[k]),dtype=dtype) for key in self.__dict__: R[key] = self.__dict__[key] return R
[ "Convert", "into", "numpy", "recordarray" ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L259-L265
[ "def", "as_recarray", "(", "self", ")", ":", "dtype", "=", "[", "(", "k", ",", "v", ".", "dtype", ")", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "iteritems", "(", ")", "]", "R", "=", "numpy", ".", "recarray", "(", "len", "(", "self", ".", "__dict__", "[", "k", "]", ")", ",", "dtype", "=", "dtype", ")", "for", "key", "in", "self", ".", "__dict__", ":", "R", "[", "key", "]", "=", "self", ".", "__dict__", "[", "key", "]", "return", "R" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
store_properties
Writes properties to the file in Java properties format. :param fh: a writable file-like object :param props: a mapping (dict) or iterable of key/value pairs :param comment: comment to write to the beginning of the file :param timestamp: boolean indicating whether to write a timestamp comment
packages/vaex-core/vaex/ext/jprops.py
def store_properties(fh, props, comment=None, timestamp=True): """ Writes properties to the file in Java properties format. :param fh: a writable file-like object :param props: a mapping (dict) or iterable of key/value pairs :param comment: comment to write to the beginning of the file :param timestamp: boolean indicating whether to write a timestamp comment """ if comment is not None: write_comment(fh, comment) if timestamp: write_comment(fh, time.strftime('%a %b %d %H:%M:%S %Z %Y')) if hasattr(props, 'keys'): for key in props: write_property(fh, key, props[key]) else: for key, value in props: write_property(fh, key, value)
def store_properties(fh, props, comment=None, timestamp=True): """ Writes properties to the file in Java properties format. :param fh: a writable file-like object :param props: a mapping (dict) or iterable of key/value pairs :param comment: comment to write to the beginning of the file :param timestamp: boolean indicating whether to write a timestamp comment """ if comment is not None: write_comment(fh, comment) if timestamp: write_comment(fh, time.strftime('%a %b %d %H:%M:%S %Z %Y')) if hasattr(props, 'keys'): for key in props: write_property(fh, key, props[key]) else: for key, value in props: write_property(fh, key, value)
[ "Writes", "properties", "to", "the", "file", "in", "Java", "properties", "format", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/jprops.py#L33-L53
[ "def", "store_properties", "(", "fh", ",", "props", ",", "comment", "=", "None", ",", "timestamp", "=", "True", ")", ":", "if", "comment", "is", "not", "None", ":", "write_comment", "(", "fh", ",", "comment", ")", "if", "timestamp", ":", "write_comment", "(", "fh", ",", "time", ".", "strftime", "(", "'%a %b %d %H:%M:%S %Z %Y'", ")", ")", "if", "hasattr", "(", "props", ",", "'keys'", ")", ":", "for", "key", "in", "props", ":", "write_property", "(", "fh", ",", "key", ",", "props", "[", "key", "]", ")", "else", ":", "for", "key", ",", "value", "in", "props", ":", "write_property", "(", "fh", ",", "key", ",", "value", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
write_comment
Writes a comment to the file in Java properties format. Newlines in the comment text are automatically turned into a continuation of the comment by adding a "#" to the beginning of each line. :param fh: a writable file-like object :param comment: comment string to write
packages/vaex-core/vaex/ext/jprops.py
def write_comment(fh, comment): """ Writes a comment to the file in Java properties format. Newlines in the comment text are automatically turned into a continuation of the comment by adding a "#" to the beginning of each line. :param fh: a writable file-like object :param comment: comment string to write """ _require_string(comment, 'comments') fh.write(_escape_comment(comment)) fh.write(b'\n')
def write_comment(fh, comment): """ Writes a comment to the file in Java properties format. Newlines in the comment text are automatically turned into a continuation of the comment by adding a "#" to the beginning of each line. :param fh: a writable file-like object :param comment: comment string to write """ _require_string(comment, 'comments') fh.write(_escape_comment(comment)) fh.write(b'\n')
[ "Writes", "a", "comment", "to", "the", "file", "in", "Java", "properties", "format", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/jprops.py#L56-L68
[ "def", "write_comment", "(", "fh", ",", "comment", ")", ":", "_require_string", "(", "comment", ",", "'comments'", ")", "fh", ".", "write", "(", "_escape_comment", "(", "comment", ")", ")", "fh", ".", "write", "(", "b'\\n'", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
write_property
Write a single property to the file in Java properties format. :param fh: a writable file-like object :param key: the key to write :param value: the value to write
packages/vaex-core/vaex/ext/jprops.py
def write_property(fh, key, value): """ Write a single property to the file in Java properties format. :param fh: a writable file-like object :param key: the key to write :param value: the value to write """ if key is COMMENT: write_comment(fh, value) return _require_string(key, 'keys') _require_string(value, 'values') fh.write(_escape_key(key)) fh.write(b'=') fh.write(_escape_value(value)) fh.write(b'\n')
def write_property(fh, key, value): """ Write a single property to the file in Java properties format. :param fh: a writable file-like object :param key: the key to write :param value: the value to write """ if key is COMMENT: write_comment(fh, value) return _require_string(key, 'keys') _require_string(value, 'values') fh.write(_escape_key(key)) fh.write(b'=') fh.write(_escape_value(value)) fh.write(b'\n')
[ "Write", "a", "single", "property", "to", "the", "file", "in", "Java", "properties", "format", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/jprops.py#L80-L98
[ "def", "write_property", "(", "fh", ",", "key", ",", "value", ")", ":", "if", "key", "is", "COMMENT", ":", "write_comment", "(", "fh", ",", "value", ")", "return", "_require_string", "(", "key", ",", "'keys'", ")", "_require_string", "(", "value", ",", "'values'", ")", "fh", ".", "write", "(", "_escape_key", "(", "key", ")", ")", "fh", ".", "write", "(", "b'='", ")", "fh", ".", "write", "(", "_escape_value", "(", "value", ")", ")", "fh", ".", "write", "(", "b'\\n'", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
iter_properties
Incrementally read properties from a Java .properties file. Yields tuples of key/value pairs. If ``comments`` is `True`, comments will be included with ``jprops.COMMENT`` in place of the key. :param fh: a readable file-like object :param comments: should include comments (default: False)
packages/vaex-core/vaex/ext/jprops.py
def iter_properties(fh, comments=False): """ Incrementally read properties from a Java .properties file. Yields tuples of key/value pairs. If ``comments`` is `True`, comments will be included with ``jprops.COMMENT`` in place of the key. :param fh: a readable file-like object :param comments: should include comments (default: False) """ for line in _property_lines(fh): key, value = _split_key_value(line) if key is not COMMENT: key = _unescape(key) elif not comments: continue yield key, _unescape(value)
def iter_properties(fh, comments=False): """ Incrementally read properties from a Java .properties file. Yields tuples of key/value pairs. If ``comments`` is `True`, comments will be included with ``jprops.COMMENT`` in place of the key. :param fh: a readable file-like object :param comments: should include comments (default: False) """ for line in _property_lines(fh): key, value = _split_key_value(line) if key is not COMMENT: key = _unescape(key) elif not comments: continue yield key, _unescape(value)
[ "Incrementally", "read", "properties", "from", "a", "Java", ".", "properties", "file", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/jprops.py#L101-L119
[ "def", "iter_properties", "(", "fh", ",", "comments", "=", "False", ")", ":", "for", "line", "in", "_property_lines", "(", "fh", ")", ":", "key", ",", "value", "=", "_split_key_value", "(", "line", ")", "if", "key", "is", "not", "COMMENT", ":", "key", "=", "_unescape", "(", "key", ")", "elif", "not", "comments", ":", "continue", "yield", "key", ",", "_unescape", "(", "value", ")" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
_universal_newlines
Wrap a file to convert newlines regardless of whether the file was opened with the "universal newlines" option or not.
packages/vaex-core/vaex/ext/jprops.py
def _universal_newlines(fp): """ Wrap a file to convert newlines regardless of whether the file was opened with the "universal newlines" option or not. """ # if file was opened with universal newline support we don't need to convert if 'U' in getattr(fp, 'mode', ''): for line in fp: yield line else: for line in fp: line = line.replace(b'\r\n', b'\n').replace(b'\r', b'\n') for piece in line.split(b'\n'): yield piece
def _universal_newlines(fp): """ Wrap a file to convert newlines regardless of whether the file was opened with the "universal newlines" option or not. """ # if file was opened with universal newline support we don't need to convert if 'U' in getattr(fp, 'mode', ''): for line in fp: yield line else: for line in fp: line = line.replace(b'\r\n', b'\n').replace(b'\r', b'\n') for piece in line.split(b'\n'): yield piece
[ "Wrap", "a", "file", "to", "convert", "newlines", "regardless", "of", "whether", "the", "file", "was", "opened", "with", "the", "universal", "newlines", "option", "or", "not", "." ]
vaexio/vaex
python
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/jprops.py#L260-L273
[ "def", "_universal_newlines", "(", "fp", ")", ":", "# if file was opened with universal newline support we don't need to convert", "if", "'U'", "in", "getattr", "(", "fp", ",", "'mode'", ",", "''", ")", ":", "for", "line", "in", "fp", ":", "yield", "line", "else", ":", "for", "line", "in", "fp", ":", "line", "=", "line", ".", "replace", "(", "b'\\r\\n'", ",", "b'\\n'", ")", ".", "replace", "(", "b'\\r'", ",", "b'\\n'", ")", "for", "piece", "in", "line", ".", "split", "(", "b'\\n'", ")", ":", "yield", "piece" ]
a45b672f8287afca2ada8e36b74b604b9b28dd85
test
show_versions
Return the version information for all librosa dependencies.
librosa/version.py
def show_versions(): '''Return the version information for all librosa dependencies.''' core_deps = ['audioread', 'numpy', 'scipy', 'sklearn', 'joblib', 'decorator', 'six', 'soundfile', 'resampy', 'numba'] extra_deps = ['numpydoc', 'sphinx', 'sphinx_rtd_theme', 'sphinxcontrib.versioning', 'sphinx-gallery', 'pytest', 'pytest-mpl', 'pytest-cov', 'matplotlib'] print('INSTALLED VERSIONS') print('------------------') print('python: {}\n'.format(sys.version)) print('librosa: {}\n'.format(version)) for dep in core_deps: print('{}: {}'.format(dep, __get_mod_version(dep))) print('') for dep in extra_deps: print('{}: {}'.format(dep, __get_mod_version(dep))) pass
def show_versions(): '''Return the version information for all librosa dependencies.''' core_deps = ['audioread', 'numpy', 'scipy', 'sklearn', 'joblib', 'decorator', 'six', 'soundfile', 'resampy', 'numba'] extra_deps = ['numpydoc', 'sphinx', 'sphinx_rtd_theme', 'sphinxcontrib.versioning', 'sphinx-gallery', 'pytest', 'pytest-mpl', 'pytest-cov', 'matplotlib'] print('INSTALLED VERSIONS') print('------------------') print('python: {}\n'.format(sys.version)) print('librosa: {}\n'.format(version)) for dep in core_deps: print('{}: {}'.format(dep, __get_mod_version(dep))) print('') for dep in extra_deps: print('{}: {}'.format(dep, __get_mod_version(dep))) pass
[ "Return", "the", "version", "information", "for", "all", "librosa", "dependencies", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/version.py#L28-L61
[ "def", "show_versions", "(", ")", ":", "core_deps", "=", "[", "'audioread'", ",", "'numpy'", ",", "'scipy'", ",", "'sklearn'", ",", "'joblib'", ",", "'decorator'", ",", "'six'", ",", "'soundfile'", ",", "'resampy'", ",", "'numba'", "]", "extra_deps", "=", "[", "'numpydoc'", ",", "'sphinx'", ",", "'sphinx_rtd_theme'", ",", "'sphinxcontrib.versioning'", ",", "'sphinx-gallery'", ",", "'pytest'", ",", "'pytest-mpl'", ",", "'pytest-cov'", ",", "'matplotlib'", "]", "print", "(", "'INSTALLED VERSIONS'", ")", "print", "(", "'------------------'", ")", "print", "(", "'python: {}\\n'", ".", "format", "(", "sys", ".", "version", ")", ")", "print", "(", "'librosa: {}\\n'", ".", "format", "(", "version", ")", ")", "for", "dep", "in", "core_deps", ":", "print", "(", "'{}: {}'", ".", "format", "(", "dep", ",", "__get_mod_version", "(", "dep", ")", ")", ")", "print", "(", "''", ")", "for", "dep", "in", "extra_deps", ":", "print", "(", "'{}: {}'", ".", "format", "(", "dep", ",", "__get_mod_version", "(", "dep", ")", ")", ")", "pass" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
rename_kw
Handle renamed arguments. Parameters ---------- old_name : str old_value The name and value of the old argument new_name : str new_value The name and value of the new argument version_deprecated : str The version at which the old name became deprecated version_removed : str The version at which the old name will be removed Returns ------- value - `new_value` if `old_value` of type `Deprecated` - `old_value` otherwise Warnings -------- if `old_value` is not of type `Deprecated`
librosa/util/deprecation.py
def rename_kw(old_name, old_value, new_name, new_value, version_deprecated, version_removed): '''Handle renamed arguments. Parameters ---------- old_name : str old_value The name and value of the old argument new_name : str new_value The name and value of the new argument version_deprecated : str The version at which the old name became deprecated version_removed : str The version at which the old name will be removed Returns ------- value - `new_value` if `old_value` of type `Deprecated` - `old_value` otherwise Warnings -------- if `old_value` is not of type `Deprecated` ''' if isinstance(old_value, Deprecated): return new_value else: stack = inspect.stack() dep_func = stack[1] caller = stack[2] warnings.warn_explicit("{:s}() keyword argument '{:s}' has been " "renamed to '{:s}' in version {:}." "\n\tThis alias will be removed in version " "{:}.".format(dep_func[3], old_name, new_name, version_deprecated, version_removed), category=DeprecationWarning, filename=caller[1], lineno=caller[2]) return old_value
def rename_kw(old_name, old_value, new_name, new_value, version_deprecated, version_removed): '''Handle renamed arguments. Parameters ---------- old_name : str old_value The name and value of the old argument new_name : str new_value The name and value of the new argument version_deprecated : str The version at which the old name became deprecated version_removed : str The version at which the old name will be removed Returns ------- value - `new_value` if `old_value` of type `Deprecated` - `old_value` otherwise Warnings -------- if `old_value` is not of type `Deprecated` ''' if isinstance(old_value, Deprecated): return new_value else: stack = inspect.stack() dep_func = stack[1] caller = stack[2] warnings.warn_explicit("{:s}() keyword argument '{:s}' has been " "renamed to '{:s}' in version {:}." "\n\tThis alias will be removed in version " "{:}.".format(dep_func[3], old_name, new_name, version_deprecated, version_removed), category=DeprecationWarning, filename=caller[1], lineno=caller[2]) return old_value
[ "Handle", "renamed", "arguments", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/deprecation.py#L15-L64
[ "def", "rename_kw", "(", "old_name", ",", "old_value", ",", "new_name", ",", "new_value", ",", "version_deprecated", ",", "version_removed", ")", ":", "if", "isinstance", "(", "old_value", ",", "Deprecated", ")", ":", "return", "new_value", "else", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "dep_func", "=", "stack", "[", "1", "]", "caller", "=", "stack", "[", "2", "]", "warnings", ".", "warn_explicit", "(", "\"{:s}() keyword argument '{:s}' has been \"", "\"renamed to '{:s}' in version {:}.\"", "\"\\n\\tThis alias will be removed in version \"", "\"{:}.\"", ".", "format", "(", "dep_func", "[", "3", "]", ",", "old_name", ",", "new_name", ",", "version_deprecated", ",", "version_removed", ")", ",", "category", "=", "DeprecationWarning", ",", "filename", "=", "caller", "[", "1", "]", ",", "lineno", "=", "caller", "[", "2", "]", ")", "return", "old_value" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
set_fftlib
Set the FFT library used by librosa. Parameters ---------- lib : None or module Must implement an interface compatible with `numpy.fft`. If `None`, reverts to `numpy.fft`. Examples -------- Use `pyfftw`: >>> import pyfftw >>> librosa.set_fftlib(pyfftw.interfaces.numpy_fft) Reset to default `numpy` implementation >>> librosa.set_fftlib()
librosa/core/fft.py
def set_fftlib(lib=None): '''Set the FFT library used by librosa. Parameters ---------- lib : None or module Must implement an interface compatible with `numpy.fft`. If `None`, reverts to `numpy.fft`. Examples -------- Use `pyfftw`: >>> import pyfftw >>> librosa.set_fftlib(pyfftw.interfaces.numpy_fft) Reset to default `numpy` implementation >>> librosa.set_fftlib() ''' global __FFTLIB if lib is None: from numpy import fft lib = fft __FFTLIB = lib
def set_fftlib(lib=None): '''Set the FFT library used by librosa. Parameters ---------- lib : None or module Must implement an interface compatible with `numpy.fft`. If `None`, reverts to `numpy.fft`. Examples -------- Use `pyfftw`: >>> import pyfftw >>> librosa.set_fftlib(pyfftw.interfaces.numpy_fft) Reset to default `numpy` implementation >>> librosa.set_fftlib() ''' global __FFTLIB if lib is None: from numpy import fft lib = fft __FFTLIB = lib
[ "Set", "the", "FFT", "library", "used", "by", "librosa", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/fft.py#L11-L38
[ "def", "set_fftlib", "(", "lib", "=", "None", ")", ":", "global", "__FFTLIB", "if", "lib", "is", "None", ":", "from", "numpy", "import", "fft", "lib", "=", "fft", "__FFTLIB", "=", "lib" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
beat_track
Beat tracking function :parameters: - input_file : str Path to input audio file (wav, mp3, m4a, flac, etc.) - output_file : str Path to save beat event timestamps as a CSV file
examples/beat_tracker.py
def beat_track(input_file, output_csv): '''Beat tracking function :parameters: - input_file : str Path to input audio file (wav, mp3, m4a, flac, etc.) - output_file : str Path to save beat event timestamps as a CSV file ''' print('Loading ', input_file) y, sr = librosa.load(input_file, sr=22050) # Use a default hop size of 512 samples @ 22KHz ~= 23ms hop_length = 512 # This is the window length used by default in stft print('Tracking beats') tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length) print('Estimated tempo: {:0.2f} beats per minute'.format(tempo)) # save output # 'beats' will contain the frame numbers of beat events. beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=hop_length) print('Saving output to ', output_csv) librosa.output.times_csv(output_csv, beat_times) print('done!')
def beat_track(input_file, output_csv): '''Beat tracking function :parameters: - input_file : str Path to input audio file (wav, mp3, m4a, flac, etc.) - output_file : str Path to save beat event timestamps as a CSV file ''' print('Loading ', input_file) y, sr = librosa.load(input_file, sr=22050) # Use a default hop size of 512 samples @ 22KHz ~= 23ms hop_length = 512 # This is the window length used by default in stft print('Tracking beats') tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length) print('Estimated tempo: {:0.2f} beats per minute'.format(tempo)) # save output # 'beats' will contain the frame numbers of beat events. beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=hop_length) print('Saving output to ', output_csv) librosa.output.times_csv(output_csv, beat_times) print('done!')
[ "Beat", "tracking", "function" ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/beat_tracker.py#L16-L45
[ "def", "beat_track", "(", "input_file", ",", "output_csv", ")", ":", "print", "(", "'Loading '", ",", "input_file", ")", "y", ",", "sr", "=", "librosa", ".", "load", "(", "input_file", ",", "sr", "=", "22050", ")", "# Use a default hop size of 512 samples @ 22KHz ~= 23ms", "hop_length", "=", "512", "# This is the window length used by default in stft", "print", "(", "'Tracking beats'", ")", "tempo", ",", "beats", "=", "librosa", ".", "beat", ".", "beat_track", "(", "y", "=", "y", ",", "sr", "=", "sr", ",", "hop_length", "=", "hop_length", ")", "print", "(", "'Estimated tempo: {:0.2f} beats per minute'", ".", "format", "(", "tempo", ")", ")", "# save output", "# 'beats' will contain the frame numbers of beat events.", "beat_times", "=", "librosa", ".", "frames_to_time", "(", "beats", ",", "sr", "=", "sr", ",", "hop_length", "=", "hop_length", ")", "print", "(", "'Saving output to '", ",", "output_csv", ")", "librosa", ".", "output", ".", "times_csv", "(", "output_csv", ",", "beat_times", ")", "print", "(", "'done!'", ")" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
adjust_tuning
Load audio, estimate tuning, apply pitch correction, and save.
examples/adjust_tuning.py
def adjust_tuning(input_file, output_file): '''Load audio, estimate tuning, apply pitch correction, and save.''' print('Loading ', input_file) y, sr = librosa.load(input_file) print('Separating harmonic component ... ') y_harm = librosa.effects.harmonic(y) print('Estimating tuning ... ') # Just track the pitches associated with high magnitude tuning = librosa.estimate_tuning(y=y_harm, sr=sr) print('{:+0.2f} cents'.format(100 * tuning)) print('Applying pitch-correction of {:+0.2f} cents'.format(-100 * tuning)) y_tuned = librosa.effects.pitch_shift(y, sr, -tuning) print('Saving tuned audio to: ', output_file) librosa.output.write_wav(output_file, y_tuned, sr)
def adjust_tuning(input_file, output_file): '''Load audio, estimate tuning, apply pitch correction, and save.''' print('Loading ', input_file) y, sr = librosa.load(input_file) print('Separating harmonic component ... ') y_harm = librosa.effects.harmonic(y) print('Estimating tuning ... ') # Just track the pitches associated with high magnitude tuning = librosa.estimate_tuning(y=y_harm, sr=sr) print('{:+0.2f} cents'.format(100 * tuning)) print('Applying pitch-correction of {:+0.2f} cents'.format(-100 * tuning)) y_tuned = librosa.effects.pitch_shift(y, sr, -tuning) print('Saving tuned audio to: ', output_file) librosa.output.write_wav(output_file, y_tuned, sr)
[ "Load", "audio", "estimate", "tuning", "apply", "pitch", "correction", "and", "save", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/adjust_tuning.py#L15-L32
[ "def", "adjust_tuning", "(", "input_file", ",", "output_file", ")", ":", "print", "(", "'Loading '", ",", "input_file", ")", "y", ",", "sr", "=", "librosa", ".", "load", "(", "input_file", ")", "print", "(", "'Separating harmonic component ... '", ")", "y_harm", "=", "librosa", ".", "effects", ".", "harmonic", "(", "y", ")", "print", "(", "'Estimating tuning ... '", ")", "# Just track the pitches associated with high magnitude", "tuning", "=", "librosa", ".", "estimate_tuning", "(", "y", "=", "y_harm", ",", "sr", "=", "sr", ")", "print", "(", "'{:+0.2f} cents'", ".", "format", "(", "100", "*", "tuning", ")", ")", "print", "(", "'Applying pitch-correction of {:+0.2f} cents'", ".", "format", "(", "-", "100", "*", "tuning", ")", ")", "y_tuned", "=", "librosa", ".", "effects", ".", "pitch_shift", "(", "y", ",", "sr", ",", "-", "tuning", ")", "print", "(", "'Saving tuned audio to: '", ",", "output_file", ")", "librosa", ".", "output", ".", "write_wav", "(", "output_file", ",", "y_tuned", ",", "sr", ")" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
frames_to_samples
Converts frame indices to audio sample indices. Parameters ---------- frames : number or np.ndarray [shape=(n,)] frame index or vector of frame indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : number or np.ndarray time (in samples) of each given frame number: `times[i] = frames[i] * hop_length` See Also -------- frames_to_time : convert frame indices to time values samples_to_frames : convert sample indices to frame indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_samples = librosa.frames_to_samples(beats)
librosa/core/time_frequency.py
def frames_to_samples(frames, hop_length=512, n_fft=None): """Converts frame indices to audio sample indices. Parameters ---------- frames : number or np.ndarray [shape=(n,)] frame index or vector of frame indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : number or np.ndarray time (in samples) of each given frame number: `times[i] = frames[i] * hop_length` See Also -------- frames_to_time : convert frame indices to time values samples_to_frames : convert sample indices to frame indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_samples = librosa.frames_to_samples(beats) """ offset = 0 if n_fft is not None: offset = int(n_fft // 2) return (np.asanyarray(frames) * hop_length + offset).astype(int)
def frames_to_samples(frames, hop_length=512, n_fft=None): """Converts frame indices to audio sample indices. Parameters ---------- frames : number or np.ndarray [shape=(n,)] frame index or vector of frame indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : number or np.ndarray time (in samples) of each given frame number: `times[i] = frames[i] * hop_length` See Also -------- frames_to_time : convert frame indices to time values samples_to_frames : convert sample indices to frame indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_samples = librosa.frames_to_samples(beats) """ offset = 0 if n_fft is not None: offset = int(n_fft // 2) return (np.asanyarray(frames) * hop_length + offset).astype(int)
[ "Converts", "frame", "indices", "to", "audio", "sample", "indices", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L30-L68
[ "def", "frames_to_samples", "(", "frames", ",", "hop_length", "=", "512", ",", "n_fft", "=", "None", ")", ":", "offset", "=", "0", "if", "n_fft", "is", "not", "None", ":", "offset", "=", "int", "(", "n_fft", "//", "2", ")", "return", "(", "np", ".", "asanyarray", "(", "frames", ")", "*", "hop_length", "+", "offset", ")", ".", "astype", "(", "int", ")" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
samples_to_frames
Converts sample indices into STFT frames. Examples -------- >>> # Get the frame numbers for every 256 samples >>> librosa.samples_to_frames(np.arange(0, 22050, 256)) array([ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43]) Parameters ---------- samples : int or np.ndarray [shape=(n,)] sample index or vector of sample indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `- n_fft / 2` to counteract windowing effects in STFT. .. note:: This may result in negative frame indices. Returns ------- frames : int or np.ndarray [shape=(n,), dtype=int] Frame numbers corresponding to the given times: `frames[i] = floor( samples[i] / hop_length )` See Also -------- samples_to_time : convert sample indices to time values frames_to_samples : convert frame indices to sample indices
librosa/core/time_frequency.py
def samples_to_frames(samples, hop_length=512, n_fft=None): """Converts sample indices into STFT frames. Examples -------- >>> # Get the frame numbers for every 256 samples >>> librosa.samples_to_frames(np.arange(0, 22050, 256)) array([ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43]) Parameters ---------- samples : int or np.ndarray [shape=(n,)] sample index or vector of sample indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `- n_fft / 2` to counteract windowing effects in STFT. .. note:: This may result in negative frame indices. Returns ------- frames : int or np.ndarray [shape=(n,), dtype=int] Frame numbers corresponding to the given times: `frames[i] = floor( samples[i] / hop_length )` See Also -------- samples_to_time : convert sample indices to time values frames_to_samples : convert frame indices to sample indices """ offset = 0 if n_fft is not None: offset = int(n_fft // 2) samples = np.asanyarray(samples) return np.floor((samples - offset) // hop_length).astype(int)
def samples_to_frames(samples, hop_length=512, n_fft=None): """Converts sample indices into STFT frames. Examples -------- >>> # Get the frame numbers for every 256 samples >>> librosa.samples_to_frames(np.arange(0, 22050, 256)) array([ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43]) Parameters ---------- samples : int or np.ndarray [shape=(n,)] sample index or vector of sample indices hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `- n_fft / 2` to counteract windowing effects in STFT. .. note:: This may result in negative frame indices. Returns ------- frames : int or np.ndarray [shape=(n,), dtype=int] Frame numbers corresponding to the given times: `frames[i] = floor( samples[i] / hop_length )` See Also -------- samples_to_time : convert sample indices to time values frames_to_samples : convert frame indices to sample indices """ offset = 0 if n_fft is not None: offset = int(n_fft // 2) samples = np.asanyarray(samples) return np.floor((samples - offset) // hop_length).astype(int)
[ "Converts", "sample", "indices", "into", "STFT", "frames", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L71-L118
[ "def", "samples_to_frames", "(", "samples", ",", "hop_length", "=", "512", ",", "n_fft", "=", "None", ")", ":", "offset", "=", "0", "if", "n_fft", "is", "not", "None", ":", "offset", "=", "int", "(", "n_fft", "//", "2", ")", "samples", "=", "np", ".", "asanyarray", "(", "samples", ")", "return", "np", ".", "floor", "(", "(", "samples", "-", "offset", ")", "//", "hop_length", ")", ".", "astype", "(", "int", ")" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247
test
frames_to_time
Converts frame counts to time (seconds). Parameters ---------- frames : np.ndarray [shape=(n,)] frame index or vector of frame indices sr : number > 0 [scalar] audio sampling rate hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : np.ndarray [shape=(n,)] time (in seconds) of each given frame number: `times[i] = frames[i] * hop_length / sr` See Also -------- time_to_frames : convert time values to frame indices frames_to_samples : convert frame indices to sample indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_times = librosa.frames_to_time(beats, sr=sr)
librosa/core/time_frequency.py
def frames_to_time(frames, sr=22050, hop_length=512, n_fft=None): """Converts frame counts to time (seconds). Parameters ---------- frames : np.ndarray [shape=(n,)] frame index or vector of frame indices sr : number > 0 [scalar] audio sampling rate hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : np.ndarray [shape=(n,)] time (in seconds) of each given frame number: `times[i] = frames[i] * hop_length / sr` See Also -------- time_to_frames : convert time values to frame indices frames_to_samples : convert frame indices to sample indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_times = librosa.frames_to_time(beats, sr=sr) """ samples = frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft) return samples_to_time(samples, sr=sr)
def frames_to_time(frames, sr=22050, hop_length=512, n_fft=None): """Converts frame counts to time (seconds). Parameters ---------- frames : np.ndarray [shape=(n,)] frame index or vector of frame indices sr : number > 0 [scalar] audio sampling rate hop_length : int > 0 [scalar] number of samples between successive frames n_fft : None or int > 0 [scalar] Optional: length of the FFT window. If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT. Returns ------- times : np.ndarray [shape=(n,)] time (in seconds) of each given frame number: `times[i] = frames[i] * hop_length / sr` See Also -------- time_to_frames : convert time values to frame indices frames_to_samples : convert frame indices to sample indices Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr) >>> beat_times = librosa.frames_to_time(beats, sr=sr) """ samples = frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft) return samples_to_time(samples, sr=sr)
[ "Converts", "frame", "counts", "to", "time", "(", "seconds", ")", "." ]
librosa/librosa
python
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L121-L162
[ "def", "frames_to_time", "(", "frames", ",", "sr", "=", "22050", ",", "hop_length", "=", "512", ",", "n_fft", "=", "None", ")", ":", "samples", "=", "frames_to_samples", "(", "frames", ",", "hop_length", "=", "hop_length", ",", "n_fft", "=", "n_fft", ")", "return", "samples_to_time", "(", "samples", ",", "sr", "=", "sr", ")" ]
180e8e6eb8f958fa6b20b8cba389f7945d508247