INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Add a dataset and add it to the UI
def open(self, path): """Add a dataset and add it to the UI""" logger.debug("open dataset: %r", path) if path.startswith("http") or path.startswith("ws"): dataset = vaex.open(path, thread_mover=self.call_in_main_thread) else: dataset = vaex.open(path) self.add_recently_opened(path) self.dataset_selector.add(dataset) return dataset
basic support for evaluate at server at least to run some unittest do not expect this to work from strings
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, delay=False): expression = _ensure_strings_from_expressions(expression) """basic support for evaluate at server, at least to run some unittest, do not expect this to work from strings""" result = self.server._call_dataset("evaluate", self, expression=expression, i1=i1, i2=i2, selection=selection, delay=delay) # TODO: we ignore out return result
Decorator to transparantly accept delayed computation.
def delayed(f): '''Decorator to transparantly accept delayed computation. Example: >>> delayed_sum = ds.sum(ds.E, binby=ds.x, limits=limits, >>> shape=4, delay=True) >>> @vaex.delayed >>> def total_sum(sums): >>> return sums.sum() >>> sum_of_sums = total_sum(delayed_sum) >>> ds.execute() >>> sum_of_sums.get() See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations ''' def wrapped(*args, **kwargs): # print "calling", f, "with", kwargs # key_values = kwargs.items() key_promise = list([(key, promisify(value)) for key, value in kwargs.items()]) # key_promise = [(key, promisify(value)) for key, value in key_values] arg_promises = list([promisify(value) for value in args]) kwarg_promises = list([promise for key, promise in key_promise]) promises = arg_promises + kwarg_promises for promise in promises: def echo_error(exc, promise=promise): print("error with ", promise, "exception is", exc) # raise exc def echo(value, promise=promise): print("done with ", repr(promise), "value is", value) # promise.then(echo, echo_error) # print promises allarguments = aplus.listPromise(*promises) def call(_): kwargs_real = {key: promise.get() for key, promise in key_promise} args_real = list([promise.get() for promise in arg_promises]) return f(*args_real, **kwargs_real) def error(exc): print("error", exc) raise exc return allarguments.then(call, error) return wrapped
Find all columns that this selection depends on for df ds
def _depending_columns(self, ds): '''Find all columns that this selection depends on for df ds''' depending = set() for expression in self.expressions: expression = ds._expr(expression) # make sure it is an expression depending |= expression.variables() if self.previous_selection: depending |= self.previous_selection._depending_columns(ds) return depending
TODO: doc + server side implementation
def limits(self, value, square=False): """TODO: doc + server side implementation""" if isinstance(value, six.string_types): import re match = re.match(r"(\d*)(\D*)", value) if match is None: raise ValueError("do not understand limit specifier %r, examples are 90%, 3sigma") else: value, type = match.groups() import ast value = ast.literal_eval(value) type = type.strip() if type in ["s", "sigma"]: return self.limits_sigma(value) elif type in ["ss", "sigmasquare"]: return self.limits_sigma(value, square=True) elif type in ["%", "percent"]: return self.limits_percentage(value) elif type in ["%s", "%square", "percentsquare"]: return self.limits_percentage(value, square=True) if value is None: return self.limits_percentage(square=square) else: return value
Plot the subspace using sane defaults to get a quick look at the data.
def plot(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat="mean", figsize=None, aspect="auto", f="identity", axes=None, xlabel=None, ylabel=None, group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None, vmin=None, vmax=None, cmap="afmhot", **kwargs): """Plot the subspace using sane defaults to get a quick look at the data. :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram :param size: Passed to Subspace.histogram :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma :param square: argument passed to Subspace.limits_sigma :param Executor executor: responsible for executing the tasks :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param aspect: Passed to matplotlib's axes.set_aspect :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot' :return: matplotlib.image.AxesImage """ import pylab f = _parse_f(f) limits = self.limits(limits) if limits is None: limits = self.limits_sigma() # if grid is None: if group_limits is None and group_by: group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,) # grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by) if figsize is not None: pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') if axes is None: axes = pylab.gca() fig = pylab.gcf() # if xlabel: pylab.xlabel(xlabel or self.expressions[0]) # if ylabel: pylab.ylabel(ylabel or self.expressions[1]) # axes.set_aspect(aspect) rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat, f=f, axes=axes, group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count, vmin=vmin, vmax=vmax, cmap=cmap) import matplotlib if group_by: if isinstance(group_colors, six.string_types): group_colors = matplotlib.cm.get_cmap(group_colors) if isinstance(group_colors, matplotlib.colors.Colormap): group_count = group_limits[2] colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)] else: colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors] colormap = matplotlib.colors.ListedColormap(colors) gmin, gmax, group_count = group_limits # [:2] delta = (gmax - gmin) / (group_count - 1.) norm = matplotlib.colors.Normalize(gmin - delta / 2, gmax + delta / 2) sm = matplotlib.cm.ScalarMappable(norm, colormap) sm.set_array(1) # make matplotlib happy (strange behavious) colorbar = fig.colorbar(sm) if group_labels: colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta)) colorbar.set_ticklabels(group_labels) else: colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta)) colorbar.set_ticklabels(map(lambda x: "%f" % x, np.arange(gmin, gmax + delta / 2, delta))) colorbar.ax.set_ylabel(group_by) # matplotlib.colorbar.ColorbarBase(axes, norm=norm, cmap=colormap) im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs) else: norm = matplotlib.colors.Normalize(0, 23) sm = matplotlib.cm.ScalarMappable(norm, cmap) sm.set_array(1) # make matplotlib happy (strange behavious) colorbar = fig.colorbar(sm) im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs) colorbar = None return im, colorbar
Plot the subspace using sane defaults to get a quick look at the data.
def plot1d(self, grid=None, size=64, limits=None, weight=None, figsize=None, f="identity", axes=None, xlabel=None, ylabel=None, **kwargs): """Plot the subspace using sane defaults to get a quick look at the data. :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram :param size: Passed to Subspace.histogram :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param kwargs: extra argument passed to ..., """ import pylab f = _parse_f(f) limits = self.limits(limits) assert self.dimension == 1, "can only plot 1d, not %s" % self.dimension if limits is None: limits = self.limits_sigma() if grid is None: grid = self.histogram(limits=limits, size=size, weight=weight) if figsize is not None: pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') if axes is None: axes = pylab.gca() # if xlabel: pylab.xlabel(xlabel or self.expressions[0]) # if ylabel: # pylab.ylabel(ylabel or self.expressions[1]) pylab.ylabel("counts" or ylabel) # axes.set_aspect(aspect) N = len(grid) xmin, xmax = limits[0] return pylab.plot(np.arange(N) / (N - 1.0) * (xmax - xmin) + xmin, f(grid,), drawstyle="steps", **kwargs)
Returns a bounded subspace ( SubspaceBounded ) with limits given by Subspace. limits_sigma ()
def bounded_by_sigmas(self, sigmas=3, square=False): """Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma() :rtype: SubspaceBounded """ bounds = self.limits_sigma(sigmas=sigmas, square=square) return SubspaceBounded(self, bounds)
Helper function for returning tasks results result when immediate is True otherwise the task itself which is a promise
def _task(self, task, progressbar=False): """Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise""" if self.delay: # should return a task or a promise nesting it return self.executor.schedule(task) else: import vaex.utils callback = None try: if progressbar == True: def update(fraction): bar.update(fraction) return True bar = vaex.utils.progressbar(task.name) callback = self.executor.signal_progress.connect(update) elif progressbar: callback = self.executor.signal_progress.connect(progressbar) result = self.executor.run(task) if progressbar == True: bar.finish() sys.stdout.write('\n') return result finally: if callback: self.executor.signal_progress.disconnect(callback)
Sort table by given column number.
def sort(self, Ncol, order): """Sort table by given column number. """ self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()")) if Ncol == 0: print("by name") # get indices, sorted by pair name sortlist = list(zip(self.pairs, list(range(len(self.pairs))))) print(sortlist) sortlist.sort(key=operator.itemgetter(0)) print(sortlist) self.indices = list(map(operator.itemgetter(1), sortlist)) print((self.indices)) if Ncol == 1: # get indices, sorted by ranking, or no sorting if None not in self.ranking: sortlist = list(zip(self.ranking, list(range(len(self.pairs))))) sortlist.sort(key=operator.itemgetter(0)) self.indices = list(map(operator.itemgetter(1), sortlist)) else: self.indices = list(range(len(self.pairs))) print((self.indices)) if order == QtCore.Qt.DescendingOrder: self.indices.reverse() print((self.indices)) self.emit(QtCore.SIGNAL("layoutChanged()"))
: param DatasetLocal dataset: dataset to export: param str path: path for file: param lis [ str ] column_names: list of column names to export or None for all columns: param str byteorder: = for native < for little endian and > for big endian: param bool shuffle: export rows in random order: param bool selection: export selection or not: param progress: progress callback that gets a progress fraction as argument and should return True to continue or a default progress bar when progress = True: param: bool virtual: When True export virtual columns: return:
def export_hdf5_v1(dataset, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ if selection: if selection == True: # easier to work with the name selection = "default" # first open file using h5py api with h5py.File(path, "w") as h5file_output: h5data_output = h5file_output.require_group("data") # i1, i2 = dataset.current_slice N = len(dataset) if not selection else dataset.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") logger.debug("virtual=%r", virtual) logger.debug("exporting %d rows to file %s" % (N, path)) # column_names = column_names or (dataset.get_column_names() + (list(dataset.virtual_columns.keys()) if virtual else [])) column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) logger.debug("exporting columns(hdf5): %r" % column_names) for column_name in column_names: if column_name in dataset.get_column_names(strings=True): column = dataset.columns[column_name] shape = (N,) + column.shape[1:] dtype = column.dtype else: dtype = np.float64().dtype shape = (N,) if dtype.type == np.datetime64: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=np.int64) array.attrs["dtype"] = dtype.name else: try: array = h5file_output.require_dataset("/data/%s" % column_name, shape=shape, dtype=dtype.newbyteorder(byteorder)) except: logging.exception("error creating dataset for %r, with type %r " % (column_name, dtype)) array[0] = array[0] # make sure the array really exists random_index_name = None column_order = list(column_names) # copy if shuffle: random_index_name = "random_index" while random_index_name in dataset.get_column_names(): random_index_name += "_new" shuffle_array = h5file_output.require_dataset("/data/" + random_index_name, shape=(N,), dtype=byteorder + "i8") shuffle_array[0] = shuffle_array[0] column_order.append(random_index_name) # last item h5data_output.attrs["column_order"] = ",".join(column_order) # keep track or the ordering of columns # after this the file is closed,, and reopen it using out class dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True) column_names = vaex.export._export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name, column_names=column_names, selection=selection, shuffle=shuffle, byteorder=byteorder, progress=progress) import getpass import datetime user = getpass.getuser() date = str(datetime.datetime.now()) source = dataset.path description = "file exported by vaex, by user %s, on date %s, from source %s" % (user, date, source) if dataset.description: description += "previous description:\n" + dataset.description dataset_output.copy_metadata(dataset) dataset_output.description = description logger.debug("writing meta information") dataset_output.write_meta() dataset_output.close_files() return
Read header data from Gadget data file filename with Gadget file type gtype. Returns offsets of positions and velocities.
def getinfo(filename, seek=None): """Read header data from Gadget data file 'filename' with Gadget file type 'gtype'. Returns offsets of positions and velocities.""" DESC = '=I4sII' # struct formatting string HEAD = '=I6I6dddii6iiiddddii6ii60xI' # struct formatting string keys = ('Npart', 'Massarr', 'Time', 'Redshift', 'FlagSfr', 'FlagFeedback', 'Nall', 'FlagCooling', 'NumFiles', 'BoxSize', 'Omega0', 'OmegaLambda', 'HubbleParam', 'FlagAge', 'FlagMetals', 'NallHW', 'flag_entr_ics', 'filename') f = open(filename, 'rb') """Detects Gadget file type (type 1 or 2; resp. without or with the 16 byte block headers).""" firstbytes = struct.unpack('I',f.read(4)) if firstbytes[0] == 8: gtype = 2 else: gtype = 1 if gtype == 2: f.seek(16) else: f.seek(0) if seek is not None: f.seek(seek) raw = struct.unpack(HEAD,f.read(264))[1:-1] values = (raw[:6], raw[6:12]) + raw[12:16] + (raw[16:22],) + raw[22:30] + (raw[30:36], raw[36], filename) header = dict(list(zip(keys, values))) f.close() if gtype == 2: posoffset = (2*16 + (8 + 256)) else: posoffset = (8 + 256) Npart = sum(header['Npart']) if gtype == 2: veloffset = 3*16 + (8 + 256) + (8 + 3*4*Npart) else: veloffset= (8 + 256) + (8 + 3*4*Npart) return Npart, posoffset+4, veloffset+4, header
: param DatasetLocal dataset: dataset to export: param str path: path for file: param lis [ str ] column_names: list of column names to export or None for all columns: param str byteorder: = for native < for little endian and > for big endian: param bool shuffle: export rows in random order: param bool selection: export selection or not: param progress: progress callback that gets a progress fraction as argument and should return True to continue: return:
def _export(dataset_input, dataset_output, random_index_column, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue :return: """ if selection: if selection == True: # easier to work with the name selection = "default" N = len(dataset_input) if not selection else dataset_input.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") if shuffle and sort: raise ValueError("Cannot shuffle and sort at the same time") if shuffle: shuffle_array = dataset_output.columns[random_index_column] partial_shuffle = shuffle and len(dataset_input) != N order_array = None order_array_inverse = None # for strings we also need the inverse order_array, keep track of that has_strings = any([dataset_input.dtype(k) == str_type for k in column_names]) if partial_shuffle: # if we only export a portion, we need to create the full length random_index array, and shuffle_array_full = np.random.choice(len(dataset_input), len(dataset_input), replace=False) # then take a section of it shuffle_array[:] = shuffle_array_full[shuffle_array_full < N] del shuffle_array_full order_array = shuffle_array elif shuffle: # better to do this in memory shuffle_array_memory = np.random.choice(N, N, replace=False) shuffle_array[:] = shuffle_array_memory order_array = shuffle_array if order_array is not None: indices_r = np.zeros_like(order_array) indices_r[order_array] = np.arange(len(order_array)) order_array_inverse = indices_r del indices_r if sort: if selection: raise ValueError("sorting selections not yet supported") # these indices sort the input array, but we evaluate the input in sequential order and write it out in sorted order # e.g., not b[:] = a[indices] # but b[indices_r] = a logger.info("sorting...") indices = np.argsort(dataset_input.evaluate(sort)) indices_r = np.zeros_like(indices) indices_r[indices] = np.arange(len(indices)) if has_strings: # in this case we already have the inverse ready order_array_inverse = indices if ascending else indices[:--1] else: del indices order_array = indices_r if ascending else indices_r[::-1] logger.info("sorting done") if progress == True: progress = vaex.utils.progressbar_callable(title="exporting") progress = progress or (lambda value: True) progress_total = len(column_names) * len(dataset_input) progress_status = ProgressStatus() progress_status.cancelled = False progress_status.value = 0 if selection: full_mask = dataset_input.evaluate_selection_mask(selection) else: full_mask = None sparse_groups = collections.defaultdict(list) sparse_matrices = {} # alternative to a set of matrices, since they are not hashable string_columns = [] futures = [] thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) if True: for column_name in column_names: sparse_matrix = dataset_output._sparse_matrix(column_name) if sparse_matrix is not None: # sparse columns are written differently sparse_groups[id(sparse_matrix)].append(column_name) sparse_matrices[id(sparse_matrix)] = sparse_matrix continue logger.debug(" exporting column: %s " % column_name) future = thread_pool.submit(_export_column, dataset_input, dataset_output, column_name, full_mask, shuffle, sort, selection, N, order_array, order_array_inverse, progress_status) futures.append(future) done = False while not done: done = True for future in futures: try: future.result(0.1/4) except concurrent.futures.TimeoutError: done = False break if not done: if not progress(progress_status.value / float(progress_total)): progress_status.cancelled = True for sparse_matrix_id, column_names in sparse_groups.items(): sparse_matrix = sparse_matrices[sparse_matrix_id] for column_name in column_names: assert not shuffle assert selection in [None, False] column = dataset_output.columns[column_name] column.matrix.data[:] = dataset_input.columns[column_name].matrix.data column.matrix.indptr[:] = dataset_input.columns[column_name].matrix.indptr column.matrix.indices[:] = dataset_input.columns[column_name].matrix.indices return column_names
: param DatasetLocal dataset: dataset to export: param str path: path for file: param lis [ str ] column_names: list of column names to export or None for all columns: param bool shuffle: export rows in random order: param bool selection: export selection or not: param progress: progress callback that gets a progress fraction as argument and should return True to continue or a default progress bar when progress = True: param: bool virtual: When True export virtual columns: return:
def export_fits(dataset, path, column_names=None, shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ if shuffle: random_index_name = "random_index" while random_index_name in dataset.get_column_names(): random_index_name += "_new" column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) logger.debug("exporting columns(fits): %r" % column_names) N = len(dataset) if not selection else dataset.selected_length(selection) data_types = [] data_shapes = [] ucds = [] units = [] for column_name in column_names: if column_name in dataset.get_column_names(strings=True): column = dataset.columns[column_name] shape = (N,) + column.shape[1:] dtype = column.dtype if dataset.dtype(column_name) == str_type: max_length = dataset[column_name].apply(lambda x: len(x)).max(selection=selection) dtype = np.dtype('S'+str(int(max_length))) else: dtype = np.float64().dtype shape = (N,) ucds.append(dataset.ucds.get(column_name)) units.append(dataset.units.get(column_name)) data_types.append(dtype) data_shapes.append(shape) if shuffle: column_names.append(random_index_name) data_types.append(np.int64().dtype) data_shapes.append((N,)) ucds.append(None) units.append(None) else: random_index_name = None # TODO: all expressions can have missing values.. how to support that? null_values = {key: dataset.columns[key].fill_value for key in dataset.get_column_names() if dataset.is_masked(key) and dataset.dtype(key).kind != "f"} vaex.file.colfits.empty(path, N, column_names, data_types, data_shapes, ucds, units, null_values=null_values) if shuffle: del column_names[-1] del data_types[-1] del data_shapes[-1] dataset_output = vaex.file.other.FitsBinTable(path, write=True) _export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name, column_names=column_names, selection=selection, shuffle=shuffle, progress=progress, sort=sort, ascending=ascending) dataset_output.close_files()
clear the cursor
def clear(self, event): """clear the cursor""" if self.useblit: self.background = ( self.canvas.copy_from_bbox(self.canvas.figure.bbox)) for line in self.vlines + self.hlines: line.set_visible(False) self.ellipse.set_visible(False)
Used for unittesting to make sure the plots are all done
def _wait(self): """Used for unittesting to make sure the plots are all done""" logger.debug("will wait for last plot to finish") self._plot_event = threading.Event() self.queue_update._wait() self.queue_replot._wait() self.queue_redraw._wait() qt_app = QtCore.QCoreApplication.instance() sleep = 10 while not self._plot_event.is_set(): logger.debug("waiting for last plot to finish") qt_app.processEvents() QtTest.QTest.qSleep(sleep) logger.debug("waiting for plot finished")
Each layer has it s own ranges_grid computed now unless something went wrong But all layers are shown with the same ranges ( self. state. ranges_viewport ) If any of the ranges is None take the min/ max of each layer
def _update_step2(self, layers): """Each layer has it's own ranges_grid computed now, unless something went wrong But all layers are shown with the same ranges (self.state.ranges_viewport) If any of the ranges is None, take the min/max of each layer """ logger.info("done with ranges, now update step2 for layers: %r", layers) for dimension in range(self.dimensions): if self.state.ranges_viewport[dimension] is None: vmin = min([layer.state.ranges_grid[dimension][0] for layer in layers]) vmax = max([layer.state.ranges_grid[dimension][1] for layer in layers]) self.state.ranges_viewport[dimension] = [vmin, vmax] logger.debug("ranges before aspect check: %r", self.state.ranges_viewport) self.check_aspect(0) logger.debug("ranges after aspect check: %r", self.state.ranges_viewport) # now make sure the layers all have the same ranges_grid for layer in layers: # layer.state.ranges_grid = copy.deepcopy(self.state.ranges_viewport) for d in range(layer.dimensions): layer.set_range(self.state.ranges_viewport[d][0], self.state.ranges_viewport[d][1], d) # now we are ready to calculate histograms promises = [layer.add_tasks_histograms() for layer in layers] executors = list(set([layer.dataset.executor for layer in layers])) for executor in executors: executor.execute() promises_histograms_done = vaex.promise.listPromise(promises) promises_histograms_done.then(self._update_step3, self.on_error_or_cancel).end()
Generates a list with start end stop indices of length parts [ ( 0 length/ parts )... (.. length ) ]
def subdivide(length, parts=None, max_length=None): """Generates a list with start end stop indices of length parts, [(0, length/parts), ..., (.., length)]""" if max_length: i1 = 0 done = False while not done: i2 = min(length, i1 + max_length) # print i1, i2 yield i1, i2 i1 = i2 if i1 == length: done = True else: part_length = int(math.ceil(float(length) / parts)) # subblock_count = math.ceil(total_length/subblock_size) # args_list = [] for index in range(parts): i1, i2 = index * part_length, min(length, (index + 1) * part_length) yield i1, i2
Open document by the default handler of the OS could be a url opened by a browser a text file by an editor etc
def os_open(document): """Open document by the default handler of the OS, could be a url opened by a browser, a text file by an editor etc""" osname = platform.system().lower() if osname == "darwin": os.system("open \"" + document + "\"") if osname == "linux": cmd = "xdg-open \"" + document + "\"&" os.system(cmd) if osname == "windows": os.system("start \"" + document + "\"")
Flexible writing where f can be a filename or f object if filename closed after writing
def write_to(f, mode): """Flexible writing, where f can be a filename or f object, if filename, closed after writing""" if hasattr(f, 'write'): yield f else: f = open(f, mode) yield f f.close()
Combines all masks from a list of arrays and logically ors them into a single mask
def _split_and_combine_mask(arrays): '''Combines all masks from a list of arrays, and logically ors them into a single mask''' masks = [np.ma.getmaskarray(block) for block in arrays if np.ma.isMaskedArray(block)] arrays = [block.data if np.ma.isMaskedArray(block) else block for block in arrays] mask = None if masks: mask = masks[0].copy() for other in masks[1:]: mask |= other return arrays, mask
Plot conting contours on 2D grid.
def plot2d_contour(self, x=None, y=None, what="count(*)", limits=None, shape=256, selection=None, f="identity", figsize=None, xlabel=None, ylabel=None, aspect="auto", levels=None, fill=False, colorbar=False, colorbar_label=None, colormap=None, colors=None, linewidths=None, linestyles=None, vmin=None, vmax=None, grid=None, show=None, **kwargs): """ Plot conting contours on 2D grid. :param x: {expression} :param y: {expression} :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param limits: {limits} :param shape: {shape} :param selection: {selection} :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: label of the x-axis (defaults to param x) :param ylabel: label of the y-axis (defaults to param y) :param aspect: the aspect ratio of the figure :param levels: the contour levels to be passed on pylab.contour or pylab.contourf :param colorbar: plot a colorbar or not :param colorbar_label: the label of the colourbar (defaults to param what) :param colormap: matplotlib colormap to pass on to pylab.contour or pylab.contourf :param colors: the colours of the contours :param linewidths: the widths of the contours :param linestyles: the style of the contour lines :param vmin: instead of automatic normalization, scale the data between vmin and vmax :param vmax: see vmin :param grid: {grid} :param show: """ # Get the function out of the string f = vaex.dataset._parse_f(f) # Internals on what to bin binby = [] x = vaex.dataset._ensure_strings_from_expressions(x) y = vaex.dataset._ensure_strings_from_expressions(y) for expression in [y, x]: if expression is not None: binby = [expression] + binby # The shape shape = vaex.dataset._expand_shape(shape, 2) # The limits and limits = self.limits(binby, limits) # Constructing the 2d histogram if grid is None: if what: if isinstance(what, (vaex.stat.Expression)): grid = what.calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) else: what = what.strip() index = what.index("(") groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() functions = ["mean", "sum", "std", "count"] if function in functions: # grid = getattr(self, function)(arguments, binby, limits=limits, shape=shape, selection=selection) grid = getattr(vaex.stat, function)(arguments).calculate(self, binby=binby, limits=limits, shape=shape, selection=selection) elif function == "count" and arguments == "*": grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) elif function == "cumulative" and arguments == "*": # TODO: comulative should also include the tails outside limits grid = self.count(binby=binby, shape=shape, limits=limits, selection=selection) grid = np.cumsum(grid) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) else: grid = self.histogram(binby, size=shape, limits=limits, selection=selection) # Apply the function on the grid fgrid = f(grid) # Figure creation if figsize is not None: fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') fig = plt.gcf() # labels plt.xlabel(xlabel or x) plt.ylabel(ylabel or y) # The master contour plot if fill == False: value = plt.contour(fgrid.T, origin="lower", extent=np.array(limits).ravel().tolist(), linestyles=linestyles, linewidths=linewidths, levels=levels, colors=colors, cmap=colormap, vmin=vmin, vmax=vmax, **kwargs) else: value = plt.contourf(fgrid.T, origin="lower", extent=np.array(limits).ravel().tolist(), linestyles=linestyles, levels=levels, colors=colors, cmap=colormap, vmin=vmin, vmax=vmax, **kwargs) if colorbar: plt.colorbar(label=colorbar_label or what) # Wrap things up if show: plt.show() return value
: param DatasetLocal dataset: dataset to export: param str path: path for file: param lis [ str ] column_names: list of column names to export or None for all columns: param str byteorder: = for native < for little endian and > for big endian: param bool shuffle: export rows in random order: param bool selection: export selection or not: param progress: progress callback that gets a progress fraction as argument and should return True to continue or a default progress bar when progress = True: param: bool virtual: When True export virtual columns: return:
def _export_table(dataset, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) for name in column_names: if name not in dataset.columns: warnings.warn('Exporting to arrow with virtual columns is not efficient') N = len(dataset) if not selection else dataset.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") if shuffle and sort: raise ValueError("Cannot shuffle and sort at the same time") if shuffle: random_index_column = "random_index" while random_index_column in dataset.get_column_names(): random_index_column += "_new" partial_shuffle = shuffle and len(dataset) != N order_array = None if partial_shuffle: # if we only export a portion, we need to create the full length random_index array, and shuffle_array_full = np.random.choice(len(dataset), len(dataset), replace=False) # then take a section of it # shuffle_array[:] = shuffle_array_full[:N] shuffle_array = shuffle_array_full[shuffle_array_full < N] del shuffle_array_full order_array = shuffle_array elif shuffle: shuffle_array = np.random.choice(N, N, replace=False) order_array = shuffle_array if sort: if selection: raise ValueError("sorting selections not yet supported") logger.info("sorting...") indices = np.argsort(dataset.evaluate(sort)) order_array = indices if ascending else indices[::-1] logger.info("sorting done") if selection: full_mask = dataset.evaluate_selection_mask(selection) else: full_mask = None arrow_arrays = [] for column_name in column_names: mask = full_mask if selection: values = dataset.evaluate(column_name, filtered=False) values = values[mask] else: values = dataset.evaluate(column_name) if shuffle or sort: indices = order_array values = values[indices] arrow_arrays.append(arrow_array_from_numpy_array(values)) if shuffle: arrow_arrays.append(arrow_array_from_numpy_array(order_array)) column_names = column_names + [random_index_column] table = pa.Table.from_arrays(arrow_arrays, column_names) return table
Evaluates expression and drop the result usefull for benchmarking since vaex is usually lazy
def nop(self, expression, progress=False, delay=False): """Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy""" expression = _ensure_string_from_expression(expression) def map(ar): pass def reduce(a, b): pass return self.map_reduce(map, reduce, [expression], delay=delay, progress=progress, name='nop', to_numpy=False)
Estimate the mutual information between and x and y on a grid with shape mi_shape and mi_limits possibly on a grid defined by binby.
def mutual_information(self, x, y=None, mi_limits=None, mi_shape=256, binby=[], limits=None, shape=default_shape, sort=False, selection=False, delay=False): """Estimate the mutual information between and x and y on a grid with shape mi_shape and mi_limits, possibly on a grid defined by binby. If sort is True, the mutual information is returned in sorted (descending) order and the list of expressions is returned in the same order. Example: >>> df.mutual_information("x", "y") array(0.1511814526380327) >>> df.mutual_information([["x", "y"], ["x", "z"], ["E", "Lz"]]) array([ 0.15118145, 0.18439181, 1.07067379]) >>> df.mutual_information([["x", "y"], ["x", "z"], ["E", "Lz"]], sort=True) (array([ 1.07067379, 0.18439181, 0.15118145]), [['E', 'Lz'], ['x', 'z'], ['x', 'y']]) :param x: {expression} :param y: {expression} :param limits: {limits} :param shape: {shape} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param sort: return mutual information in sorted (descending) order, and also return the correspond list of expressions when sorted is True :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar}, """ if y is None: waslist, [x, ] = vaex.utils.listify(x) else: waslist, [x, y] = vaex.utils.listify(x, y) x = list(zip(x, y)) if mi_limits: mi_limits = [mi_limits] # print("x, mi_limits", x, mi_limits) limits = self.limits(binby, limits, delay=True) # print("$"*80) mi_limits = self.limits(x, mi_limits, delay=True) # print("@"*80) @delayed def calculate(counts): # TODO: mutual information doesn't take axis arguments, so ugly solution for now counts = counts.astype(np.float64) fullshape = _expand_shape(shape, len(binby)) out = np.zeros((fullshape), dtype=float) if len(fullshape) == 0: out = vaex.kld.mutual_information(counts) # print("count> ", np.sum(counts)) elif len(fullshape) == 1: for i in range(fullshape[0]): out[i] = vaex.kld.mutual_information(counts[..., i]) # print("counti> ", np.sum(counts[...,i])) # print("countt> ", np.sum(counts)) elif len(fullshape) == 2: for i in range(fullshape[0]): for j in range(fullshape[1]): out[i, j] = vaex.kld.mutual_information(counts[..., i, j]) elif len(fullshape) == 3: for i in range(fullshape[0]): for j in range(fullshape[1]): for k in range(fullshape[2]): out[i, j, k] = vaex.kld.mutual_information(counts[..., i, j, k]) else: raise ValueError("binby with dim > 3 is not yet supported") return out @delayed def has_limits(limits, mi_limits): if not _issequence(binby): limits = [list(limits)] values = [] for expressions, expression_limits in zip(x, mi_limits): # print("mi for", expressions, expression_limits) # total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby)) total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby)) # print("expressions", expressions) # print("total_shape", total_shape) # print("limits", limits,expression_limits) # print("limits>", list(limits) + list(expression_limits)) counts = self.count(binby=list(expressions) + list(binby), limits=list(expression_limits) + list(limits), shape=total_shape, delay=True, selection=selection) values.append(calculate(counts)) return values @delayed def finish(mi_list): if sort: mi_list = np.array(mi_list) indices = np.argsort(mi_list)[::-1] sorted_x = list([x[k] for k in indices]) return mi_list[indices], sorted_x else: return np.array(vaex.utils.unlistify(waslist, mi_list)) values = finish(delayed_list(has_limits(limits, mi_limits))) return self._delay(delay, values)
Count the number of non - NaN values ( or all if expression is None or * ).
def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None): """Count the number of non-NaN values (or all, if expression is None or "*"). Example: >>> df.count() 330000 >>> df.count("*") 330000.0 >>> df.count("*", binby=["x"], shape=4) array([ 10925., 155427., 152007., 10748.]) :param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :param edges: {edges} :return: {return_stat_scalar} """ return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress)
Return the first element of a binned expression where the values each bin are sorted by order_expression.
def first(self, expression, order_expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None): """Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`. Example: >>> import vaex >>> df = vaex.example() >>> df.first(df.x, df.y, shape=8) >>> df.first(df.x, df.y, shape=8, binby=[df.y]) >>> df.first(df.x, df.y, shape=8, binby=[df.y]) array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977, 8.47446537, -5.73602629, 10.18783 ]) :param expression: The value to be placed in the bin. :param order_expression: Order the values in the bins by this expression. :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :param edges: {edges} :return: Ndarray containing the first elements. :rtype: numpy.array """ return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression]) logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits) logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits) expression = _ensure_strings_from_expressions(expression) order_expression = _ensure_string_from_expression(order_expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions,] = vaex.utils.listify(expression) @delayed def finish(*counts): counts = np.asarray(counts) return vaex.utils.unlistify(waslist, counts) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True, shape=shape) stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions] var = finish(*stats) return self._delay(delay, var)
Calculate the mean for expression possibly on a grid defined by binby.
def mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the mean for expression, possibly on a grid defined by binby. Example: >>> df.mean("x") -0.067131491264005971 >>> df.mean("(x**2+y**2)**0.5", binby="E", shape=4) array([ 2.43483742, 4.41840721, 8.26742458, 15.53846476]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress) logger.debug("mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r", expression, binby, limits, shape, selection, delay) expression = _ensure_strings_from_expressions(expression) selection = _ensure_strings_from_expressions(selection) binby = _ensure_strings_from_expressions(binby) @delayed def calculate(expression, limits): task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "mean for %s" % expression) return task @delayed def finish(*stats_args): stats = np.array(stats_args) counts = stats[..., 0] with np.errstate(divide='ignore', invalid='ignore'): mean = stats[..., 1] / counts return vaex.utils.unlistify(waslist, mean) waslist, [expressions, ] = vaex.utils.listify(expression) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True) stats = [calculate(expression, limits) for expression in expressions] var = finish(*stats) return self._delay(delay, var)
Calculate the sum for the given expression possible on a grid defined by binby
def sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the sum for the given expression, possible on a grid defined by binby Example: >>> df.sum("L") 304054882.49378014 >>> df.sum("L", binby="E", shape=4) array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07, 1.40008776e+08]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ return self._compute_agg('sum', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(*sums): return vaex.utils.unlistify(waslist, sums) expression = _ensure_strings_from_expressions(expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions, ] = vaex.utils.listify(expression) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True) # stats = [calculate(expression, limits) for expression in expressions] sums = [self._sum_calculation(expression, binby=binby, limits=limits, shape=shape, selection=selection, progressbar=progressbar) for expression in expressions] s = finish(*sums) return self._delay(delay, s)
Calculate the standard deviation for the given expression possible on a grid defined by binby
def std(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the standard deviation for the given expression, possible on a grid defined by binby >>> df.std("vz") 110.31773397535071 >>> df.std("vz", binby=["(x**2+y**2)**0.5"], shape=4) array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ @delayed def finish(var): return var**0.5 return self._delay(delay, finish(self.var(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progress)))
Calculate the covariance cov [ x y ] between and x and y possibly on a grid defined by binby.
def covar(self, x, y, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the covariance cov[x,y] between and x and y, possibly on a grid defined by binby. Example: >>> df.covar("x**2+y**2+z**2", "-log(-E+1)") array(52.69461456005138) >>> df.covar("x**2+y**2+z**2", "-log(-E+1)")/(df.std("x**2+y**2+z**2") * df.std("-log(-E+1)")) 0.63666373822156686 >>> df.covar("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4) array([ 10.17387143, 51.94954078, 51.24902796, 20.2163929 ]) :param x: {expression} :param y: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ @delayed def cov(mean_x, mean_y, mean_xy): return mean_xy - mean_x * mean_y waslist, [xlist, ylist] = vaex.utils.listify(x, y) # print("limits", limits) limits = self.limits(binby, limits, selection=selection, delay=True) # print("limits", limits) @delayed def calculate(limits): results = [] for x, y in zip(xlist, ylist): mx = self.mean(x, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar) my = self.mean(y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar) cxy = self.mean("(%s)*(%s)" % (x, y), binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar) results.append(cov(mx, my, cxy)) return results progressbar = vaex.utils.progressbars(progress) covars = calculate(limits) @delayed def finish(covars): value = np.array(vaex.utils.unlistify(waslist, covars)) return value return self._delay(delay, finish(delayed_list(covars)))
Calculate the correlation coefficient cov [ x y ]/ ( std [ x ] * std [ y ] ) between and x and y possibly on a grid defined by binby.
def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None): """Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between and x and y, possibly on a grid defined by binby. Example: >>> df.correlation("x**2+y**2+z**2", "-log(-E+1)") array(0.6366637382215669) >>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4) array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318]) :param x: {expression} :param y: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ @delayed def corr(cov): with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex return cov[..., 0, 1] / (cov[..., 0, 0] * cov[..., 1, 1])**0.5 if y is None: if not isinstance(x, (tuple, list)): raise ValueError("if y not given, x is expected to be a list or tuple, not %r" % x) if _issequence(x) and not _issequence(x[0]) and len(x) == 2: x = [x] if not(_issequence(x) and all([_issequence(k) and len(k) == 2 for k in x])): raise ValueError("if y not given, x is expected to be a list of lists with length 2, not %r" % x) # waslist, [xlist,ylist] = vaex.utils.listify(*x) waslist = True xlist, ylist = zip(*x) # print xlist, ylist else: waslist, [xlist, ylist] = vaex.utils.listify(x, y) limits = self.limits(binby, limits, selection=selection, delay=True) @delayed def echo(limits): logger.debug(">>>>>>>>: %r %r", limits, np.array(limits).shape) echo(limits) @delayed def calculate(limits): results = [] for x, y in zip(xlist, ylist): task = self.cov(x, y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar) results.append(corr(task)) return results progressbar = vaex.utils.progressbars(progress) correlations = calculate(limits) @delayed def finish(correlations): if sort: correlations = np.array(correlations) indices = np.argsort(sort_key(correlations) if sort_key else correlations)[::-1] sorted_x = list([x[k] for k in indices]) return correlations[indices], sorted_x value = np.array(vaex.utils.unlistify(waslist, correlations)) return value return self._delay(delay, finish(delayed_list(correlations)))
Calculate the covariance matrix for x and y or more expressions possibly on a grid defined by binby.
def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the covariance matrix for x and y or more expressions, possibly on a grid defined by binby. Either x and y are expressions, e.g: >>> df.cov("x", "y") Or only the x argument is given with a list of expressions, e,g.: >>> df.cov(["x, "y, "z"]) Example: >>> df.cov("x", "y") array([[ 53.54521742, -3.8123135 ], [ -3.8123135 , 60.62257881]]) >>> df.cov(["x", "y", "z"]) array([[ 53.54521742, -3.8123135 , -0.98260511], [ -3.8123135 , 60.62257881, 1.21381057], [ -0.98260511, 1.21381057, 25.55517638]]) >>> df.cov("x", "y", binby="E", shape=2) array([[[ 9.74852878e+00, -3.02004780e-02], [ -3.02004780e-02, 9.99288215e+00]], [[ 8.43996546e+01, -6.51984181e+00], [ -6.51984181e+00, 9.68938284e+01]]]) :param x: {expression} :param y: {expression_single} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar}, the last dimensions are of shape (2,2) """ selection = _ensure_strings_from_expressions(selection) if y is None: if not _issequence(x): raise ValueError("if y argument is not given, x is expected to be sequence, not %r", x) expressions = x else: expressions = [x, y] N = len(expressions) binby = _ensure_list(binby) shape = _expand_shape(shape, len(binby)) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, selection=selection, delay=True) @delayed def calculate(expressions, limits): # print('limits', limits) task = tasks.TaskStatistic(self, binby, shape, limits, weights=expressions, op=tasks.OP_COV, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "covariance values for %r" % expressions) return task @delayed def finish(values): N = len(expressions) counts = values[..., :N] sums = values[..., N:2 * N] with np.errstate(divide='ignore', invalid='ignore'): means = sums / counts # matrix of means * means.T meansxy = means[..., None] * means[..., None, :] counts = values[..., 2 * N:2 * N + N**2] sums = values[..., 2 * N + N**2:] shape = counts.shape[:-1] + (N, N) counts = counts.reshape(shape) sums = sums.reshape(shape) with np.errstate(divide='ignore', invalid='ignore'): moments2 = sums / counts cov_matrix = moments2 - meansxy return cov_matrix progressbar = vaex.utils.progressbars(progress) values = calculate(expressions, limits) cov_matrix = finish(values) return self._delay(delay, cov_matrix)
Calculate the minimum and maximum for expressions possibly on a grid defined by binby.
def minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the minimum and maximum for expressions, possibly on a grid defined by binby. Example: >>> df.minmax("x") array([-128.293991, 271.365997]) >>> df.minmax(["x", "y"]) array([[-128.293991 , 271.365997 ], [ -71.5523682, 146.465836 ]]) >>> df.minmax("x", binby="x", shape=5, limits=[-10, 10]) array([[-9.99919128, -6.00010443], [-5.99972439, -2.00002384], [-1.99991322, 1.99998057], [ 2.0000093 , 5.99983597], [ 6.0004878 , 9.99984646]]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar}, the last dimension is of shape (2) """ # vmin = self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress) # vmax = self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(*minmax_list): value = vaex.utils.unlistify(waslist, np.array(minmax_list)) value = value.astype(dtype0) return value @delayed def calculate(expression, limits): task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_MIN_MAX, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "minmax for %s" % expression) return task @delayed def finish(*minmax_list): value = vaex.utils.unlistify(waslist, np.array(minmax_list)) value = value.astype(dtype0) return value expression = _ensure_strings_from_expressions(expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions, ] = vaex.utils.listify(expression) dtypes = [self.dtype(expr) for expr in expressions] dtype0 = dtypes[0] if not all([k.kind == dtype0.kind for k in dtypes]): raise ValueError("cannot mix datetime and non-datetime expressions") progressbar = vaex.utils.progressbars(progress, name="minmaxes") limits = self.limits(binby, limits, selection=selection, delay=True) all_tasks = [calculate(expression, limits) for expression in expressions] result = finish(*all_tasks) return self._delay(delay, result)
Calculate the minimum for given expressions possibly on a grid defined by binby.
def min(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the minimum for given expressions, possibly on a grid defined by binby. Example: >>> df.min("x") array(-128.293991) >>> df.min(["x", "y"]) array([-128.293991 , -71.5523682]) >>> df.min("x", binby="x", shape=5, limits=[-10, 10]) array([-9.99919128, -5.99972439, -1.99991322, 2.0000093 , 6.0004878 ]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar}, the last dimension is of shape (2) """ return self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(result): return result[..., 0] return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))
Calculate the median possibly on a grid defined by binby.
def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, delay=False): """Calculate the median , possibly on a grid defined by binby. NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by percentile_shape and percentile_limits :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param percentile_limits: {percentile_limits} :param percentile_shape: {percentile_shape} :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar} """ return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay)
Calculate the percentile given by percentage possibly on a grid defined by binby.
def percentile_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=1024, percentile_limits="minmax", selection=False, delay=False): """Calculate the percentile given by percentage, possibly on a grid defined by binby. NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by percentile_shape and percentile_limits. Example: >>> df.percentile_approx("x", 10), df.percentile_approx("x", 90) (array([-8.3220355]), array([ 7.92080358])) >>> df.percentile_approx("x", 50, binby="x", shape=5, limits=[-10, 10]) array([[-7.56462982], [-3.61036641], [-0.01296306], [ 3.56697863], [ 7.45838367]]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param percentile_limits: {percentile_limits} :param percentile_shape: {percentile_shape} :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar} """ waslist, [expressions, ] = vaex.utils.listify(expression) if not isinstance(binby, (tuple, list)): binby = [binby] else: binby = binby @delayed def calculate(expression, shape, limits): # task = TaskStatistic(self, [expression] + binby, shape, limits, op=OP_ADD1, selection=selection) # self.executor.schedule(task) # return task return self.count(binby=list(binby) + [expression], shape=shape, limits=limits, selection=selection, delay=True, edges=True) @delayed def finish(percentile_limits, counts_list): results = [] for i, counts in enumerate(counts_list): counts = counts.astype(np.float) # remove the nan and boundary edges from the first dimension, nonnans = list([slice(2, -1, None) for k in range(len(counts.shape) - 1)]) nonnans.append(slice(1, None, None)) # we're gonna get rid only of the nan's, and keep the overflow edges nonnans = tuple(nonnans) cumulative_grid = np.cumsum(counts.__getitem__(nonnans), -1) # convert to cumulative grid totalcounts = np.sum(counts.__getitem__(nonnans), -1) empty = totalcounts == 0 original_shape = counts.shape shape = cumulative_grid.shape # + (original_shape[-1] - 1,) # counts = np.sum(counts, -1) edges_floor = np.zeros(shape[:-1] + (2,), dtype=np.int64) edges_ceil = np.zeros(shape[:-1] + (2,), dtype=np.int64) # if we have an off # of elements, say, N=3, the center is at i=1=(N-1)/2 # if we have an even # of elements, say, N=4, the center is between i=1=(N-2)/2 and i=2=(N/2) # index = (shape[-1] -1-3) * percentage/100. # the -3 is for the edges values = np.array((totalcounts + 1) * percentage / 100.) # make sure it's an ndarray values[empty] = 0 floor_values = np.array(np.floor(values)) ceil_values = np.array(np.ceil(values)) vaex.vaexfast.grid_find_edges(cumulative_grid, floor_values, edges_floor) vaex.vaexfast.grid_find_edges(cumulative_grid, ceil_values, edges_ceil) def index_choose(a, indices): # alternative to np.choise, which doesn't like the last dim to be >= 32 # print(a, indices) out = np.zeros(a.shape[:-1]) # print(out.shape) for i in np.ndindex(out.shape): # print(i, indices[i]) out[i] = a[i + (indices[i],)] return out def calculate_x(edges, values): left, right = edges[..., 0], edges[..., 1] left_value = index_choose(cumulative_grid, left) right_value = index_choose(cumulative_grid, right) u = np.array((values - left_value) / (right_value - left_value)) # TODO: should it really be -3? not -2 xleft, xright = percentile_limits[i][0] + (left - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3),\ percentile_limits[i][0] + (right - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3) x = xleft + (xright - xleft) * u # /2 return x x1 = calculate_x(edges_floor, floor_values) x2 = calculate_x(edges_ceil, ceil_values) u = values - floor_values x = x1 + (x2 - x1) * u results.append(x) return results shape = _expand_shape(shape, len(binby)) percentile_shapes = _expand_shape(percentile_shape, len(expressions)) if percentile_limits: percentile_limits = _expand_limits(percentile_limits, len(expressions)) limits = self.limits(binby, limits, selection=selection, delay=True) percentile_limits = self.limits(expressions, percentile_limits, selection=selection, delay=True) @delayed def calculation(limits, percentile_limits): # print(">>>", expressions, percentile_limits) # print(percentile_limits[0], list(percentile_limits[0])) # print(list(np.array(limits).tolist()) + list(percentile_limits[0])) # print("limits", limits, expressions, percentile_limits, ">>", list(limits) + [list(percentile_limits[0])) tasks = [calculate(expression, tuple(shape) + (percentile_shape, ), list(limits) + [list(percentile_limit)]) for percentile_shape, percentile_limit, expression in zip(percentile_shapes, percentile_limits, expressions)] return finish(percentile_limits, delayed_args(*tasks)) # return tasks result = calculation(limits, percentile_limits) @delayed def finish2(grid): value = vaex.utils.unlistify(waslist, np.array(grid)) return value return self._delay(delay, finish2(result))
Calculate the [ min max ] range for expression containing approximately a percentage of the data as defined by percentage.
def limits_percentage(self, expression, percentage=99.73, square=False, delay=False): """Calculate the [min, max] range for expression, containing approximately a percentage of the data as defined by percentage. The range is symmetric around the median, i.e., for a percentage of 90, this gives the same results as: Example: >>> df.limits_percentage("x", 90) array([-12.35081376, 12.14858052] >>> df.percentile_approx("x", 5), df.percentile_approx("x", 95) (array([-12.36813152]), array([ 12.13275818])) NOTE: this value is approximated by calculating the cumulative distribution on a grid. NOTE 2: The values above are not exactly the same, since percentile and limits_percentage do not share the same code :param expression: {expression_limits} :param float percentage: Value between 0 and 100 :param delay: {delay} :return: {return_limits} """ # percentiles = self.percentile(expression, [100-percentage/2, 100-(100-percentage/2.)], delay=True) # return self._delay(delay, percentiles) # print(percentage) import scipy logger.info("limits_percentage for %r, with percentage=%r", expression, percentage) waslist, [expressions, ] = vaex.utils.listify(expression) limits = [] for expr in expressions: subspace = self(expr) limits_minmax = subspace.minmax() vmin, vmax = limits_minmax[0] size = 1024 * 16 counts = subspace.histogram(size=size, limits=limits_minmax) cumcounts = np.concatenate([[0], np.cumsum(counts)]) cumcounts /= cumcounts.max() # TODO: this is crude.. see the details! f = (1 - percentage / 100.) / 2 x = np.linspace(vmin, vmax, size + 1) l = scipy.interp([f, 1 - f], cumcounts, x) limits.append(l) # return limits return vaex.utils.unlistify(waslist, limits)
Calculate the [ min max ] range for expression as described by value which is 99. 7% by default.
def limits(self, expression, value=None, square=False, selection=None, delay=False, shape=None): """Calculate the [min, max] range for expression, as described by value, which is '99.7%' by default. If value is a list of the form [minvalue, maxvalue], it is simply returned, this is for convenience when using mixed forms. Example: >>> df.limits("x") array([-28.86381927, 28.9261226 ]) >>> df.limits(["x", "y"]) (array([-28.86381927, 28.9261226 ]), array([-28.60476934, 28.96535249])) >>> df.limits(["x", "y"], "minmax") (array([-128.293991, 271.365997]), array([ -71.5523682, 146.465836 ])) >>> df.limits(["x", "y"], ["minmax", "90%"]) (array([-128.293991, 271.365997]), array([-13.37438402, 13.4224423 ])) >>> df.limits(["x", "y"], ["minmax", [0, 10]]) (array([-128.293991, 271.365997]), [0, 10]) :param expression: {expression_limits} :param value: {limits} :param selection: {selection} :param delay: {delay} :return: {return_limits} """ if expression == []: return [] if shape is None else ([], []) waslist, [expressions, ] = vaex.utils.listify(expression) expressions = _ensure_strings_from_expressions(expressions) selection = _ensure_strings_from_expressions(selection) # values = # values = _expand_limits(value, len(expressions)) # logger.debug("limits %r", list(zip(expressions, values))) if value is None: value = "99.73%" # print("value is seq/limit?", _issequence(value), _is_limit(value), value) if _is_limit(value) or not _issequence(value): values = (value,) * len(expressions) else: values = value # print("expressions 1)", expressions) # print("values 1)", values) initial_expressions, initial_values = expressions, values expression_values = dict() expression_shapes = dict() for i, (expression, value) in enumerate(zip(expressions, values)): # print(">>>", expression, value) if _issequence(expression): expressions = expression nested = True else: expressions = [expression] nested = False if _is_limit(value) or not _issequence(value): values = (value,) * len(expressions) else: values = value # print("expressions 2)", expressions) # print("values 2)", values) for j, (expression, value) in enumerate(zip(expressions, values)): if shape is not None: if _issequence(shape): shapes = shape else: shapes = (shape, ) * (len(expressions) if nested else len(initial_expressions)) shape_index = j if nested else i if not _is_limit(value): # if a # value = tuple(value) # list is not hashable expression_values[(expression, value)] = None if self.is_category(expression): N = self._categories[_ensure_string_from_expression(expression)]['N'] expression_shapes[expression] = min(N, shapes[shape_index] if shape is not None else default_shape) else: expression_shapes[expression] = shapes[shape_index] if shape is not None else default_shape # print("##### 1)", expression_values.keys()) limits_list = [] # for expression, value in zip(expressions, values): for expression, value in expression_values.keys(): if self.is_category(expression): N = self._categories[_ensure_string_from_expression(expression)]['N'] limits = [-0.5, N-0.5] else: if isinstance(value, six.string_types): if value == "minmax": limits = self.minmax(expression, selection=selection, delay=True) else: match = re.match(r"([\d.]*)(\D*)", value) if match is None: raise ValueError("do not understand limit specifier %r, examples are 90%, 3sigma") else: number, type = match.groups() import ast number = ast.literal_eval(number) type = type.strip() if type in ["s", "sigma"]: limits = self.limits_sigma(number) elif type in ["ss", "sigmasquare"]: limits = self.limits_sigma(number, square=True) elif type in ["%", "percent"]: limits = self.limits_percentage(expression, number, delay=False) elif type in ["%s", "%square", "percentsquare"]: limits = self.limits_percentage(expression, number, square=True, delay=True) elif value is None: limits = self.limits_percentage(expression, square=square, delay=True) else: limits = value limits_list.append(limits) if limits is None: raise ValueError("limit %r not understood" % value) expression_values[(expression, value)] = limits logger.debug("!!!!!!!!!! limits: %r %r", limits, np.array(limits).shape) @delayed def echo(limits): logger.debug(">>>>>>>> limits: %r %r", limits, np.array(limits).shape) echo(limits) limits_list = delayed_args(*limits_list) @delayed def finish(limits_list): # print("##### 2)", expression_values.keys()) limits_outer = [] shapes_list = [] for expression, value in zip(initial_expressions, initial_values): if _issequence(expression): expressions = expression waslist2 = True else: expressions = [expression] waslist2 = False if _is_limit(value) or not _issequence(value): values = (value,) * len(expressions) else: values = value # print("expressions 3)", expressions) # print("values 3)", values) limits = [] shapes = [] for expression, value in zip(expressions, values): if not _is_limit(value): value = expression_values[(expression, value)] if not _is_limit(value): # print(">>> value", value) value = value.get() limits.append(value) shapes.append(expression_shapes[expression]) # if not _is_limit(value): # if a # #value = tuple(value) # list is not hashable # expression_values[(expression, value)] = expression_values[(expression, value)].get() # else: # #value = tuple(value) # list is not hashable # expression_values[(expression, value)] = () if waslist2: limits_outer.append(limits) shapes_list.append(shapes) else: limits_outer.append(limits[0]) shapes_list.append(shapes[0]) # logger.debug(">>>>>>>> complete list of limits: %r %r", limits_list, np.array(limits_list).shape) # print("limits", limits_outer) if shape: return vaex.utils.unlistify(waslist, limits_outer), vaex.utils.unlistify(waslist, shapes_list) else: return vaex.utils.unlistify(waslist, limits_outer) return self._delay(delay, finish(limits_list))
Calculate/ estimate the mode.
def mode(self, expression, binby=[], limits=None, shape=256, mode_shape=64, mode_limits=None, progressbar=False, selection=None): """Calculate/estimate the mode.""" if len(binby) == 0: raise ValueError("only supported with binby argument given") else: # todo, fix progressbar into two... try: len(shape) shape = tuple(shape) except: shape = len(binby) * (shape,) shape = (mode_shape,) + shape subspace = self(*(list(binby) + [expression])) if selection: subspace = subspace.selected() limits = self.limits(list(binby), limits) mode_limits = self.limits([expression], mode_limits) limits = list(limits) + list(mode_limits) counts = subspace.histogram(limits=limits, size=shape, progressbar=progressbar) indices = np.argmax(counts, axis=0) pmin, pmax = limits[-1] centers = np.linspace(pmin, pmax, mode_shape + 1)[:-1] # ignore last bin centers += (centers[1] - centers[0]) / 2 # and move half a bin to the right modes = centers[indices] ok = counts.sum(axis=0) > 0 modes[~ok] = np.nan return modes
Viz 1d 2d or 3d in a Jupyter notebook
def plot_widget(self, x, y, z=None, grid=None, shape=256, limits=None, what="count(*)", figsize=None, f="identity", figure_key=None, fig=None, axes=None, xlabel=None, ylabel=None, title=None, show=True, selection=[None, True], colormap="afmhot", grid_limits=None, normalize="normalize", grid_before=None, what_kwargs={}, type="default", scales=None, tool_select=False, bq_cleanup=True, backend="bqplot", **kwargs): """Viz 1d, 2d or 3d in a Jupyter notebook .. note:: This API is not fully settled and may change in the future Example: >>> df.plot_widget(df.x, df.y, backend='bqplot') >>> df.plot_widget(df.pickup_longitude, df.pickup_latitude, backend='ipyleaflet') :param backend: Widget backend to use: 'bqplot', 'ipyleaflet', 'ipyvolume', 'matplotlib' """ import vaex.jupyter.plot backend = vaex.jupyter.plot.create_backend(backend) cls = vaex.jupyter.plot.get_type(type) x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) z = _ensure_strings_from_expressions(z) for name in 'vx vy vz'.split(): if name in kwargs: kwargs[name] = _ensure_strings_from_expressions(kwargs[name]) plot2d = cls(backend=backend, dataset=self, x=x, y=y, z=z, grid=grid, shape=shape, limits=limits, what=what, f=f, figure_key=figure_key, fig=fig, selection=selection, grid_before=grid_before, grid_limits=grid_limits, normalize=normalize, colormap=colormap, what_kwargs=what_kwargs, **kwargs) if show: plot2d.show() return plot2d
Count non missing value for expression on an array which represents healpix data.
def healpix_count(self, expression=None, healpix_expression=None, healpix_max_level=12, healpix_level=8, binby=None, limits=None, shape=default_shape, delay=False, progress=None, selection=None): """Count non missing value for expression on an array which represents healpix data. :param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param binby: {binby}, these dimension follow the first healpix dimension. :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: """ # if binby is None: import healpy as hp if healpix_expression is None: if self.ucds.get("source_id", None) == 'meta.id;meta.main': # we now assume we have gaia data healpix_expression = "source_id/34359738368" if healpix_expression is None: raise ValueError("no healpix_expression given, and was unable to guess") reduce_level = healpix_max_level - healpix_level NSIDE = 2**healpix_level nmax = hp.nside2npix(NSIDE) scaling = 4**reduce_level expr = "%s/%s" % (healpix_expression, scaling) binby = [expr] + ([] if binby is None else _ensure_list(binby)) shape = (nmax,) + _expand_shape(shape, len(binby) - 1) epsilon = 1. / scaling / 2 limits = [[-epsilon, nmax - epsilon]] + ([] if limits is None else limits) return self.count(expression, binby=binby, limits=limits, shape=shape, delay=delay, progress=progress, selection=selection)
Viz data in 2d using a healpix column.
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None, grid=None, healpix_input="equatorial", healpix_output="galactic", f=None, colormap="afmhot", grid_limits=None, image_size=800, nest=True, figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True, rotation=(0, 0, 0), **kwargs): """Viz data in 2d using a healpix column. :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param what: {what} :param selection: {selection} :param grid: {grid} :param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic". :param healpix_output: Plot in "equatorial", "galactic" or "ecliptic". :param f: function to apply to the data :param colormap: matplotlib colormap :param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid))) :param image_size: size for the image that healpy uses for rendering :param nest: If the healpix data is in nested (True) or ring (False) :param figsize: If given, modify the matplotlib figure size. Example (14,9) :param interactive: (Experimental, uses healpy.mollzoom is True) :param title: Title of figure :param smooth: apply gaussian smoothing, in degrees :param show: Call matplotlib's show (True) or not (False, defaut) :param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees. :return: """ # plot_level = healpix_level #healpix_max_level-reduce_level import healpy as hp import pylab as plt if grid is None: reduce_level = healpix_max_level - healpix_level NSIDE = 2**healpix_level nmax = hp.nside2npix(NSIDE) # print nmax, np.sqrt(nmax) scaling = 4**reduce_level # print nmax epsilon = 1. / scaling / 2 grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection) if grid_limits: grid_min, grid_max = grid_limits else: grid_min = grid_max = None f_org = f f = _parse_f(f) if smooth: if nest: grid = hp.reorder(grid, inp="NEST", out="RING") nest = False # grid[np.isnan(grid)] = np.nanmean(grid) grid = hp.smoothing(grid, sigma=np.radians(smooth)) fgrid = f(grid) coord_map = dict(equatorial='C', galactic='G', ecliptic="E") fig = plt.gcf() if figsize is not None: fig.set_size_inches(*figsize) what_label = what if f_org: what_label = f_org + " " + what_label f = hp.mollzoom if interactive else hp.mollview with warnings.catch_warnings(): warnings.simplefilter("ignore") coord = coord_map[healpix_input], coord_map[healpix_output] if coord_map[healpix_input] == coord_map[healpix_output]: coord = None f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord, cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs) if show: plt.show()
Use at own risk requires ipyvolume
def plot3d(self, x, y, z, vx=None, vy=None, vz=None, vwhat=None, limits=None, grid=None, what="count(*)", shape=128, selection=[None, True], f=None, vcount_limits=None, smooth_pre=None, smooth_post=None, grid_limits=None, normalize="normalize", colormap="afmhot", figure_key=None, fig=None, lighting=True, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1, show=True, **kwargs): """Use at own risk, requires ipyvolume""" import vaex.ext.ipyvolume # vaex.ext.ipyvolume. cls = vaex.ext.ipyvolume.PlotDefault plot3d = cls(df=self, x=x, y=y, z=z, vx=vx, vy=vy, vz=vz, grid=grid, shape=shape, limits=limits, what=what, f=f, figure_key=figure_key, fig=fig, selection=selection, smooth_pre=smooth_pre, smooth_post=smooth_post, grid_limits=grid_limits, vcount_limits=vcount_limits, normalize=normalize, colormap=colormap, **kwargs) if show: plot3d.show() return plot3d
Gives direct access to the columns only ( useful for tab completion ).
def col(self): """Gives direct access to the columns only (useful for tab completion). Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion. Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can do computations with them. Example >>> ds = vaex.example() >>> df.plot(df.col.x, df.col.y) """ class ColumnList(object): pass data = ColumnList() for name in self.get_column_names(): expression = getattr(self, name, None) if not isinstance(expression, Expression): expression = Expression(self, name) setattr(data, name, expression) return data
Return the size in bytes the whole DataFrame requires ( or the selection ) respecting the active_fraction.
def byte_size(self, selection=False, virtual=False): """Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.""" bytes_per_row = 0 N = self.count(selection=selection) extra = 0 for column in list(self.get_column_names(virtual=virtual)): dtype = self.dtype(column) dtype_internal = self.dtype(column, internal=True) #if dtype in [str_type, str] and dtype_internal.kind == 'O': if isinstance(self.columns[column], ColumnString): # TODO: document or fix this # is it too expensive to calculate this exactly? extra += self.columns[column].nbytes else: bytes_per_row += dtype_internal.itemsize if np.ma.isMaskedArray(self.columns[column]): bytes_per_row += 1 return bytes_per_row * self.count(selection=selection) + extra
Return the numpy dtype for the given expression if not a column the first row will be evaluated to get the dtype.
def dtype(self, expression, internal=False): """Return the numpy dtype for the given expression, if not a column, the first row will be evaluated to get the dtype.""" expression = _ensure_string_from_expression(expression) if expression in self.variables: return np.float64(1).dtype elif expression in self.columns.keys(): column = self.columns[expression] data = column[0:1] dtype = data.dtype else: data = self.evaluate(expression, 0, 1, filtered=False) dtype = data.dtype if not internal: if dtype != str_type: if dtype.kind in 'US': return str_type if dtype.kind == 'O': # we lie about arrays containing strings if isinstance(data[0], six.string_types): return str_type return dtype
Gives a Pandas series object containing all numpy dtypes of all columns ( except hidden ).
def dtypes(self): """Gives a Pandas series object containing all numpy dtypes of all columns (except hidden).""" from pandas import Series return Series({column_name:self.dtype(column_name) for column_name in self.get_column_names()})
Return if a column is a masked ( numpy. ma ) column.
def is_masked(self, column): '''Return if a column is a masked (numpy.ma) column.''' column = _ensure_string_from_expression(column) if column in self.columns: return np.ma.isMaskedArray(self.columns[column]) return False
Returns the unit ( an astropy. unit. Units object ) for the expression.
def unit(self, expression, default=None): """Returns the unit (an astropy.unit.Units object) for the expression. Example >>> import vaex >>> ds = vaex.example() >>> df.unit("x") Unit("kpc") >>> df.unit("x*L") Unit("km kpc2 / s") :param expression: Expression, which can be a column name :param default: if no unit is known, it will return this :return: The resulting unit of the expression :rtype: astropy.units.Unit """ expression = _ensure_string_from_expression(expression) try: # if an expression like pi * <some_expr> it will evaluate to a quantity instead of a unit unit_or_quantity = eval(expression, expression_namespace, scopes.UnitScope(self)) unit = unit_or_quantity.unit if hasattr(unit_or_quantity, "unit") else unit_or_quantity return unit if isinstance(unit, astropy.units.Unit) else None except: # logger.exception("error evaluating unit expression: %s", expression) # astropy doesn't add units, so we try with a quatiti try: return eval(expression, expression_namespace, scopes.UnitScope(self, 1.)).unit except: # logger.exception("error evaluating unit expression: %s", expression) return default
Find a set of columns ( names ) which have the ucd or part of the ucd.
def ucd_find(self, ucds, exclude=[]): """Find a set of columns (names) which have the ucd, or part of the ucd. Prefixed with a ^, it will only match the first part of the ucd. Example >>> df.ucd_find('pos.eq.ra', 'pos.eq.dec') ['RA', 'DEC'] >>> df.ucd_find('pos.eq.ra', 'doesnotexist') >>> df.ucds[df.ucd_find('pos.eq.ra')] 'pos.eq.ra;meta.main' >>> df.ucd_find('meta.main')] 'dec' >>> df.ucd_find('^meta.main')] """ if isinstance(ucds, six.string_types): ucds = [ucds] if len(ucds) == 1: ucd = ucds[0] if ucd[0] == "^": # we want it to start with ucd = ucd[1:] columns = [name for name in self.get_column_names() if self.ucds.get(name, "").startswith(ucd) and name not in exclude] else: columns = [name for name in self.get_column_names() if ucd in self.ucds.get(name, "") and name not in exclude] return None if len(columns) == 0 else columns[0] else: columns = [self.ucd_find([ucd], exclude=exclude) for ucd in ucds] return None if None in columns else columns
Each DataFrame has a directory where files are stored for metadata etc.
def get_private_dir(self, create=False): """Each DataFrame has a directory where files are stored for metadata etc. Example >>> import vaex >>> ds = vaex.example() >>> vaex.get_private_dir() '/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5' :param bool create: is True, it will create the directory if it does not exist """ if self.is_local(): name = os.path.abspath(self.path).replace(os.path.sep, "_")[:250] # should not be too long for most os'es name = name.replace(":", "_") # for windows drive names else: server = self.server name = "%s_%s_%s_%s" % (server.hostname, server.port, server.base_path.replace("/", "_"), self.name) dir = os.path.join(vaex.utils.get_private_dir(), "dfs", name) if create and not os.path.exists(dir): os.makedirs(dir) return dir
Return the internal state of the DataFrame in a dictionary
def state_get(self): """Return the internal state of the DataFrame in a dictionary Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> df.state_get() {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} """ virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items()} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items()} import vaex.serialize def check(key, value): if not vaex.serialize.can_serialize(value.f): warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(key)) return False return True def clean(value): return vaex.serialize.to_dict(value.f) functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)} virtual_columns = {key: value for key, value in self.virtual_columns.items()} selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()} selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()} # if selection is not None} state = dict(virtual_columns=virtual_columns, column_names=self.column_names, renamed_columns=self._renamed_columns, variables=self.variables, functions=functions, selections=selections, ucds=ucds, units=units, descriptions=descriptions, description=self.description, active_range=[self._index_start, self._index_end]) return state
Sets the internal state of the df
def state_set(self, state, use_active_range=False): """Sets the internal state of the df Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df # x y r 0 1 2 2.23607 >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> state = df.state_get() >>> state {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} >>> df2 = vaex.from_scalars(x=3, y=4) >>> df2.state_set(state) # now the virtual functions are 'copied' >>> df2 # x y r 0 3 4 5 :param state: dict as returned by :meth:`DataFrame.state_get`. :param bool use_active_range: Whether to use the active range or not. """ self.description = state['description'] if use_active_range: self._index_start, self._index_end = state['active_range'] self._length_unfiltered = self._index_end - self._index_start if 'renamed_columns' in state: for old, new in state['renamed_columns']: self._rename(old, new) for name, value in state['functions'].items(): self.add_function(name, vaex.serialize.from_dict(value)) if 'column_names' in state: # we clear all columns, and add them later on, since otherwise self[name] = ... will try # to rename the columns (which is unsupported for remote dfs) self.column_names = [] self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) # self._save_assign_expression(name) self.column_names = state['column_names'] else: # old behaviour self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) self.variables = state['variables'] import astropy # TODO: make this dep optional? units = {key: astropy.units.Unit(value) for key, value in state["units"].items()} self.units.update(units) for name, selection_dict in state['selections'].items(): # TODO: make selection use the vaex.serialize framework if selection_dict is None: selection = None else: selection = selections.selection_from_dict(selection_dict) self.set_selection(selection, name=name)
Load a state previously stored by: meth: DataFrame. state_store see also: meth: DataFrame. state_set.
def state_load(self, f, use_active_range=False): """Load a state previously stored by :meth:`DataFrame.state_store`, see also :meth:`DataFrame.state_set`.""" state = vaex.utils.read_json_or_yaml(f) self.state_set(state, use_active_range=use_active_range)
Removes the file with the virtual column etc it does not change the current virtual columns etc.
def remove_virtual_meta(self): """Removes the file with the virtual column etc, it does not change the current virtual columns etc.""" dir = self.get_private_dir(create=True) path = os.path.join(dir, "virtual_meta.yaml") try: if os.path.exists(path): os.remove(path) if not os.listdir(dir): os.rmdir(dir) except: logger.exception("error while trying to remove %s or %s", path, dir)
Writes virtual columns variables and their ucd description and units.
def write_virtual_meta(self): """Writes virtual columns, variables and their ucd,description and units. The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file. """ path = os.path.join(self.get_private_dir(create=True), "virtual_meta.yaml") virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items() if key in virtual_names} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items() if key in virtual_names} meta_info = dict(virtual_columns=self.virtual_columns, variables=self.variables, ucds=ucds, units=units, descriptions=descriptions) vaex.utils.write_json_or_yaml(path, meta_info)
Will read back the virtual column etc written by: func: DataFrame. write_virtual_meta. This will be done when opening a DataFrame.
def update_virtual_meta(self): """Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame.""" import astropy.units try: path = os.path.join(self.get_private_dir(create=False), "virtual_meta.yaml") if os.path.exists(path): meta_info = vaex.utils.read_json_or_yaml(path) if 'virtual_columns' not in meta_info: return self.virtual_columns.update(meta_info["virtual_columns"]) self.variables.update(meta_info["variables"]) self.ucds.update(meta_info["ucds"]) self.descriptions.update(meta_info["descriptions"]) units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()} self.units.update(units) except: logger.exception("non fatal error")
Writes all meta data ucd description and units
def write_meta(self): """Writes all meta data, ucd,description and units The default implementation is to write this to a file called meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. (For instance the vaex hdf5 implementation does this) This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file. """ # raise NotImplementedError path = os.path.join(self.get_private_dir(create=True), "meta.yaml") units = {key: str(value) for key, value in self.units.items()} meta_info = dict(description=self.description, ucds=self.ucds, units=units, descriptions=self.descriptions, ) vaex.utils.write_json_or_yaml(path, meta_info)
Generate a Subspaces object based on a custom list of expressions or all possible combinations based on dimension
def subspaces(self, expressions_list=None, dimensions=None, exclude=None, **kwargs): """Generate a Subspaces object, based on a custom list of expressions or all possible combinations based on dimension :param expressions_list: list of list of expressions, where the inner list defines the subspace :param dimensions: if given, generates a subspace with all possible combinations for that dimension :param exclude: list of """ if dimensions is not None: expressions_list = list(itertools.combinations(self.get_column_names(), dimensions)) if exclude is not None: import six def excluded(expressions): if callable(exclude): return exclude(expressions) elif isinstance(exclude, six.string_types): return exclude in expressions elif isinstance(exclude, (list, tuple)): # $#expressions = set(expressions) for e in exclude: if isinstance(e, six.string_types): if e in expressions: return True elif isinstance(e, (list, tuple)): if set(e).issubset(expressions): return True else: raise ValueError("elements of exclude should contain a string or a sequence of strings") else: raise ValueError("exclude should contain a string, a sequence of strings, or should be a callable") return False # test if any of the elements of exclude are a subset of the expression expressions_list = [expr for expr in expressions_list if not excluded(expr)] logger.debug("expression list generated: %r", expressions_list) import vaex.legacy return vaex.legacy.Subspaces([self(*expressions, **kwargs) for expressions in expressions_list])
Set the variable to an expression or value defined by expression_or_value.
def set_variable(self, name, expression_or_value, write=True): """Set the variable to an expression or value defined by expression_or_value. Example >>> df.set_variable("a", 2.) >>> df.set_variable("b", "a**2") >>> df.get_variable("b") 'a**2' >>> df.evaluate_variable("b") 4.0 :param name: Name of the variable :param write: write variable to meta file :param expression: value or expression """ self.variables[name] = expression_or_value
Evaluates the variable given by name.
def evaluate_variable(self, name): """Evaluates the variable given by name.""" if isinstance(self.variables[name], six.string_types): # TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const value = eval(self.variables[name], expression_namespace, self.variables) return value else: return self.variables[name]
Internal use ignores the filter
def _evaluate_selection_mask(self, name="default", i1=None, i2=None, selection=None, cache=False): """Internal use, ignores the filter""" i1 = i1 or 0 i2 = i2 or len(self) scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache) return scope.evaluate(name)
Return a list of [ ( column_name ndarray )... ) ] pairs where the ndarray corresponds to the evaluated data
def to_items(self, column_names=None, selection=None, strings=True, virtual=False): """Return a list of [(column_name, ndarray), ...)] pairs where the ndarray corresponds to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: list of (name, ndarray) pairs """ items = [] for name in column_names or self.get_column_names(strings=strings, virtual=virtual): items.append((name, self.evaluate(name, selection=selection))) return items
Return a dict containing the ndarray corresponding to the evaluated data
def to_dict(self, column_names=None, selection=None, strings=True, virtual=False): """Return a dict containing the ndarray corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: dict """ return dict(self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual))
Return a copy of the DataFrame if selection is None it does not copy the data it just has a reference
def to_copy(self, column_names=None, selection=None, strings=True, virtual=False, selections=True): """Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference :param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param selections: copy selections to a new DataFrame :return: dict """ if column_names: column_names = _ensure_strings_from_expressions(column_names) df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False)) if virtual: for name, value in self.virtual_columns.items(): df.add_virtual_column(name, value) if selections: # the filter selection does not need copying for key, value in self.selection_histories.items(): if key != FILTER_SELECTION_NAME: df.selection_histories[key] = list(value) for key, value in self.selection_history_indices.items(): if key != FILTER_SELECTION_NAME: df.selection_history_indices[key] = value df.functions.update(self.functions) df.copy_metadata(self) return df
Return a pandas DataFrame containing the ndarray corresponding to the evaluated data
def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=False, index_name=None): """Return a pandas DataFrame containing the ndarray corresponding to the evaluated data If index is given, that column is used for the index of the dataframe. Example >>> df_pandas = df.to_pandas_df(["x", "y", "z"]) >>> df_copy = vaex.from_pandas(df_pandas) :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param index_column: if this column is given it is used for the index of the DataFrame :return: pandas.DataFrame object """ import pandas as pd data = self.to_dict(column_names=column_names, selection=selection, strings=strings, virtual=virtual) if index_name is not None: if index_name in data: index = data.pop(index_name) else: index = self.evaluate(index_name, selection=selection) else: index = None df = pd.DataFrame(data=data, index=index) if index is not None: df.index.name = index_name return df
Returns an arrow Table object containing the arrays corresponding to the evaluated data
def to_arrow_table(self, column_names=None, selection=None, strings=True, virtual=False): """Returns an arrow Table object containing the arrays corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: pyarrow.Table object """ from vaex_arrow.convert import arrow_table_from_vaex_df return arrow_table_from_vaex_df(self, column_names, selection, strings, virtual)
Returns a astropy table object containing the ndarrays corresponding to the evaluated data
def to_astropy_table(self, column_names=None, selection=None, strings=True, virtual=False, index=None): """Returns a astropy table object containing the ndarrays corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param index: if this column is given it is used for the index of the DataFrame :return: astropy.table.Table object """ from astropy.table import Table, Column, MaskedColumn meta = dict() meta["name"] = self.name meta["description"] = self.description table = Table(meta=meta) for name, data in self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual): if self.dtype(name) == str_type: # for astropy we convert it to unicode, it seems to ignore object type data = np.array(data).astype('U') meta = dict() if name in self.ucds: meta["ucd"] = self.ucds[name] if np.ma.isMaskedArray(data): cls = MaskedColumn else: cls = Column table[name] = cls(data, unit=self.unit(name), description=self.descriptions.get(name), meta=meta) return table
Validate an expression ( may throw Exceptions )
def validate_expression(self, expression): """Validate an expression (may throw Exceptions)""" # return self.evaluate(expression, 0, 2) vars = set(self.get_column_names()) | set(self.variables.keys()) funcs = set(expression_namespace.keys()) return vaex.expresso.validate_expression(expression, vars, funcs)
Add an in memory array as a column.
def add_column(self, name, f_or_array): """Add an in memory array as a column.""" if isinstance(f_or_array, (np.ndarray, Column)): data = ar = f_or_array # it can be None when we have an 'empty' DataFrameArrays if self._length_original is None: self._length_unfiltered = _len(data) self._length_original = _len(data) self._index_end = self._length_unfiltered if _len(ar) != self.length_original(): if self.filtered: # give a better warning to avoid confusion if len(self) == len(ar): raise ValueError("Array is of length %s, while the length of the DataFrame is %s due to the filtering, the (unfiltered) length is %s." % (len(ar), len(self), self.length_unfiltered())) raise ValueError("array is of length %s, while the length of the DataFrame is %s" % (len(ar), self.length_original())) # assert self.length_unfiltered() == len(data), "columns should be of equal length, length should be %d, while it is %d" % ( self.length_unfiltered(), len(data)) self.columns[name] = f_or_array if name not in self.column_names: self.column_names.append(name) else: raise ValueError("functions not yet implemented") self._save_assign_expression(name, Expression(self, name))
Renames a column not this is only the in memory name this will not be reflected on disk
def rename_column(self, name, new_name, unique=False, store_in_state=True): """Renames a column, not this is only the in memory name, this will not be reflected on disk""" new_name = vaex.utils.find_valid_name(new_name, used=[] if not unique else list(self)) data = self.columns.get(name) if data is not None: del self.columns[name] self.column_names[self.column_names.index(name)] = new_name self.columns[new_name] = data else: expression = self.virtual_columns[name] del self.virtual_columns[name] self.virtual_columns[new_name] = expression if store_in_state: self._renamed_columns.append((name, new_name)) for d in [self.ucds, self.units, self.descriptions]: if name in d: d[new_name] = d[name] del d[name] return new_name
Add a healpix ( in memory ) column based on a longitude and latitude
def add_column_healpix(self, name="healpix", longitude="ra", latitude="dec", degrees=True, healpix_order=12, nest=True): """Add a healpix (in memory) column based on a longitude and latitude :param name: Name of column :param longitude: longitude expression :param latitude: latitude expression (astronomical convenction latitude=90 is north pole) :param degrees: If lon/lat are in degrees (default) or radians. :param healpix_order: healpix order, >= 0 :param nest: Nested healpix (default) or ring. """ import healpy as hp if degrees: scale = "*pi/180" else: scale = "" # TODO: multithread this phi = self.evaluate("(%s)%s" % (longitude, scale)) theta = self.evaluate("pi/2-(%s)%s" % (latitude, scale)) hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest) self.add_column("healpix", hp_index)
Propagates uncertainties ( full covariance matrix ) for a set of virtual columns.
def propagate_uncertainties(self, columns, depending_variables=None, cov_matrix='auto', covariance_format="{}_{}_covariance", uncertainty_format="{}_uncertainty"): """Propagates uncertainties (full covariance matrix) for a set of virtual columns. Covariance matrix of the depending variables is guessed by finding columns prefixed by "e" or `"e_"` or postfixed by "_error", "_uncertainty", "e" and `"_e"`. Off diagonals (covariance or correlation) by postfixes with "_correlation" or "_corr" for correlation or "_covariance" or "_cov" for covariances. (Note that x_y_cov = x_e * y_e * x_y_correlation.) Example >>> df = vaex.from_scalars(x=1, y=2, e_x=0.1, e_y=0.2) >>> df["u"] = df.x + df.y >>> df["v"] = np.log10(df.x) >>> df.propagate_uncertainties([df.u, df.v]) >>> df.u_uncertainty, df.v_uncertainty :param columns: list of columns for which to calculate the covariance matrix. :param depending_variables: If not given, it is found out automatically, otherwise a list of columns which have uncertainties. :param cov_matrix: List of list with expressions giving the covariance matrix, in the same order as depending_variables. If 'full' or 'auto', the covariance matrix for the depending_variables will be guessed, where 'full' gives an error if an entry was not found. """ names = _ensure_strings_from_expressions(columns) virtual_columns = self._expr(*columns, always_list=True) if depending_variables is None: depending_variables = set() for expression in virtual_columns: depending_variables |= expression.variables() depending_variables = list(sorted(list(depending_variables))) fs = [self[self.virtual_columns[name]] for name in names] jacobian = self._jacobian(fs, depending_variables) m = len(fs) n = len(depending_variables) # n x n matrix cov_matrix = self._covariance_matrix_guess(depending_variables, full=cov_matrix == "full", as_expression=True) # empty m x m matrix cov_matrix_out = [[self['0'] for __ in range(m)] for __ in range(m)] for i in range(m): for j in range(m): for k in range(n): for l in range(n): if jacobian[i][k].expression == '0' or jacobian[j][l].expression == '0' or cov_matrix[k][l].expression == '0': pass else: cov_matrix_out[i][j] = cov_matrix_out[i][j] + jacobian[i][k] * cov_matrix[k][l] * jacobian[j][l] for i in range(m): for j in range(i + 1): sigma = cov_matrix_out[i][j] sigma = self._expr(vaex.expresso.simplify(_ensure_string_from_expression(sigma))) if i != j: self.add_virtual_column(covariance_format.format(names[i], names[j]), sigma) else: self.add_virtual_column(uncertainty_format.format(names[i]), np.sqrt(sigma))
Convert cartesian to polar coordinates
def add_virtual_columns_cartesian_to_polar(self, x="x", y="y", radius_out="r_polar", azimuth_out="phi_polar", propagate_uncertainties=False, radians=False): """Convert cartesian to polar coordinates :param x: expression for x :param y: expression for y :param radius_out: name for the virtual column for the radius :param azimuth_out: name for the virtual column for the azimuth angle :param propagate_uncertainties: {propagate_uncertainties} :param radians: if True, azimuth is in radians, defaults to degrees :return: """ x = self[x] y = self[y] if radians: to_degrees = "" else: to_degrees = "*180/pi" r = np.sqrt(x**2 + y**2) self[radius_out] = r phi = np.arctan2(y, x) if not radians: phi = phi * 180/np.pi self[azimuth_out] = phi if propagate_uncertainties: self.propagate_uncertainties([self[radius_out], self[azimuth_out]])
Concert velocities from a cartesian to a spherical coordinate system
def add_virtual_columns_cartesian_velocities_to_spherical(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", vlong="vlong", vlat="vlat", distance=None): """Concert velocities from a cartesian to a spherical coordinate system TODO: errors :param x: name of x column (input) :param y: y :param z: z :param vx: vx :param vy: vy :param vz: vz :param vr: name of the column for the radial velocity in the r direction (output) :param vlong: name of the column for the velocity component in the longitude direction (output) :param vlat: name of the column for the velocity component in the latitude direction, positive points to the north pole (output) :param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance :return: """ # see http://www.astrosurf.com/jephem/library/li110spherCart_en.htm if distance is None: distance = "sqrt({x}**2+{y}**2+{z}**2)".format(**locals()) self.add_virtual_column(vr, "({x}*{vx}+{y}*{vy}+{z}*{vz})/{distance}".format(**locals())) self.add_virtual_column(vlong, "-({vx}*{y}-{x}*{vy})/sqrt({x}**2+{y}**2)".format(**locals())) self.add_virtual_column(vlat, "-({z}*({x}*{vx}+{y}*{vy}) - ({x}**2+{y}**2)*{vz})/( {distance}*sqrt({x}**2+{y}**2) )".format(**locals()))
Convert cartesian to polar velocities.
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar", propagate_uncertainties=False,): """Convert cartesian to polar velocities. :param x: :param y: :param vx: :param radius_polar: Optional expression for the radius, may lead to a better performance when given. :param vy: :param vr_out: :param vazimuth_out: :param propagate_uncertainties: {propagate_uncertainties} :return: """ x = self._expr(x) y = self._expr(y) vx = self._expr(vx) vy = self._expr(vy) if radius_polar is None: radius_polar = np.sqrt(x**2 + y**2) radius_polar = self._expr(radius_polar) self[vr_out] = (x*vx + y*vy) / radius_polar self[vazimuth_out] = (x*vy - y*vx) / radius_polar if propagate_uncertainties: self.propagate_uncertainties([self[vr_out], self[vazimuth_out]])
Convert cylindrical polar velocities to Cartesian.
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False): """ Convert cylindrical polar velocities to Cartesian. :param x: :param y: :param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given. :param vr: :param vazimuth: :param vx_out: :param vy_out: :param propagate_uncertainties: {propagate_uncertainties} """ x = self._expr(x) y = self._expr(y) vr = self._expr(vr) vazimuth = self._expr(vazimuth) if azimuth is not None: azimuth = self._expr(azimuth) azimuth = np.deg2rad(azimuth) else: azimuth = np.arctan2(y, x) azimuth = self._expr(azimuth) self[vx_out] = vr * np.cos(azimuth) - vazimuth * np.sin(azimuth) self[vy_out] = vr * np.sin(azimuth) + vazimuth * np.cos(azimuth) if propagate_uncertainties: self.propagate_uncertainties([self[vx_out], self[vy_out]])
Rotation in 2d.
def add_virtual_columns_rotation(self, x, y, xnew, ynew, angle_degrees, propagate_uncertainties=False): """Rotation in 2d. :param str x: Name/expression of x column :param str y: idem for y :param str xnew: name of transformed x column :param str ynew: :param float angle_degrees: rotation in degrees, anti clockwise :return: """ x = _ensure_string_from_expression(x) y = _ensure_string_from_expression(y) theta = np.radians(angle_degrees) matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) m = matrix_name = x + "_" + y + "_rot" for i in range(2): for j in range(2): self.set_variable(matrix_name + "_%d%d" % (i, j), matrix[i, j].item()) self[xnew] = self._expr("{m}_00 * {x} + {m}_01 * {y}".format(**locals())) self[ynew] = self._expr("{m}_10 * {x} + {m}_11 * {y}".format(**locals())) if propagate_uncertainties: self.propagate_uncertainties([self[xnew], self[ynew]])
Convert spherical to cartesian coordinates.
def add_virtual_columns_spherical_to_cartesian(self, alpha, delta, distance, xname="x", yname="y", zname="z", propagate_uncertainties=False, center=[0, 0, 0], center_name="solar_position", radians=False): """Convert spherical to cartesian coordinates. :param alpha: :param delta: polar angle, ranging from the -90 (south pole) to 90 (north pole) :param distance: radial distance, determines the units of x, y and z :param xname: :param yname: :param zname: :param propagate_uncertainties: {propagate_uncertainties} :param center: :param center_name: :param radians: :return: """ alpha = self._expr(alpha) delta = self._expr(delta) distance = self._expr(distance) if not radians: alpha = alpha * self._expr('pi')/180 delta = delta * self._expr('pi')/180 # TODO: use sth like .optimize by default to get rid of the +0 ? if center[0]: self[xname] = np.cos(alpha) * np.cos(delta) * distance + center[0] else: self[xname] = np.cos(alpha) * np.cos(delta) * distance if center[1]: self[yname] = np.sin(alpha) * np.cos(delta) * distance + center[1] else: self[yname] = np.sin(alpha) * np.cos(delta) * distance if center[2]: self[zname] = np.sin(delta) * distance + center[2] else: self[zname] = np.sin(delta) * distance if propagate_uncertainties: self.propagate_uncertainties([self[xname], self[yname], self[zname]])
Convert cartesian to spherical coordinates.
def add_virtual_columns_cartesian_to_spherical(self, x="x", y="y", z="z", alpha="l", delta="b", distance="distance", radians=False, center=None, center_name="solar_position"): """Convert cartesian to spherical coordinates. :param x: :param y: :param z: :param alpha: :param delta: name for polar angle, ranges from -90 to 90 (or -pi to pi when radians is True). :param distance: :param radians: :param center: :param center_name: :return: """ transform = "" if radians else "*180./pi" if center is not None: self.add_variable(center_name, center) if center is not None and center[0] != 0: x = "({x} - {center_name}[0])".format(**locals()) if center is not None and center[1] != 0: y = "({y} - {center_name}[1])".format(**locals()) if center is not None and center[2] != 0: z = "({z} - {center_name}[2])".format(**locals()) self.add_virtual_column(distance, "sqrt({x}**2 + {y}**2 + {z}**2)".format(**locals())) # self.add_virtual_column(alpha, "((arctan2({y}, {x}) + 2*pi) % (2*pi)){transform}".format(**locals())) self.add_virtual_column(alpha, "arctan2({y}, {x}){transform}".format(**locals())) self.add_virtual_column(delta, "(-arccos({z}/{distance})+pi/2){transform}".format(**locals()))
Add aitoff ( https:// en. wikipedia. org/ wiki/ Aitoff_projection ) projection
def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True): """Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection :param alpha: azimuth angle :param delta: polar angle :param x: output name for x coordinate :param y: output name for y coordinate :param radians: input and output in radians (True), or degrees (False) :return: """ transform = "" if radians else "*pi/180." aitoff_alpha = "__aitoff_alpha_%s_%s" % (alpha, delta) # sanatize aitoff_alpha = re.sub("[^a-zA-Z_]", "_", aitoff_alpha) self.add_virtual_column(aitoff_alpha, "arccos(cos({delta}{transform})*cos({alpha}{transform}/2))".format(**locals())) self.add_virtual_column(x, "2*cos({delta}{transform})*sin({alpha}{transform}/2)/sinc({aitoff_alpha}/pi)/pi".format(**locals())) self.add_virtual_column(y, "sin({delta}{transform})/sinc({aitoff_alpha}/pi)/pi".format(**locals()))
Add a virtual column to the DataFrame.
def add_virtual_column(self, name, expression, unique=False): """Add a virtual column to the DataFrame. Example: >>> df.add_virtual_column("r", "sqrt(x**2 + y**2 + z**2)") >>> df.select("r < 10") :param: str name: name of virtual column :param: expression: expression for the column :param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2 """ type = "change" if name in self.virtual_columns else "add" expression = _ensure_string_from_expression(expression) if name in self.get_column_names(virtual=False): renamed = '__' +vaex.utils.find_valid_name(name, used=self.get_column_names()) expression = self._rename(name, renamed, expression)[0].expression name = vaex.utils.find_valid_name(name, used=[] if not unique else self.get_column_names()) self.virtual_columns[name] = expression self.column_names.append(name) self._save_assign_expression(name) self.signal_column_changed.emit(self, name, "add")
Deletes a virtual column from a DataFrame.
def delete_virtual_column(self, name): """Deletes a virtual column from a DataFrame.""" del self.virtual_columns[name] self.signal_column_changed.emit(self, name, "delete")
Add a variable to to a DataFrame.
def add_variable(self, name, expression, overwrite=True, unique=True): """Add a variable to to a DataFrame. A variable may refer to other variables, and virtual columns and expression may refer to variables. Example >>> df.add_variable('center', 0) >>> df.add_virtual_column('x_prime', 'x-center') >>> df.select('x_prime < 0') :param: str name: name of virtual varible :param: expression: expression for the variable """ if unique or overwrite or name not in self.variables: existing_names = self.get_column_names(virtual=False) + list(self.variables.keys()) name = vaex.utils.find_valid_name(name, used=[] if not unique else existing_names) self.variables[name] = expression self.signal_variable_changed.emit(self, name, "add") if unique: return name
Deletes a variable from a DataFrame.
def delete_variable(self, name): """Deletes a variable from a DataFrame.""" del self.variables[name] self.signal_variable_changed.emit(self, name, "delete")
Return a shallow copy a DataFrame with the last n rows.
def tail(self, n=10): """Return a shallow copy a DataFrame with the last n rows.""" N = len(self) # self.cat(i1=max(0, N-n), i2=min(len(self), N)) return self[max(0, N - n):min(len(self), N)]
Display the first and last n elements of a DataFrame.
def head_and_tail_print(self, n=5): """Display the first and last n elements of a DataFrame.""" from IPython import display display.display(display.HTML(self._head_and_tail_table(n)))
Give a description of the DataFrame.
def describe(self, strings=True, virtual=True, selection=None): """Give a description of the DataFrame. >>> import vaex >>> df = vaex.example()[['x', 'y', 'z']] >>> df.describe() x y z dtype float64 float64 float64 count 330000 330000 330000 missing 0 0 0 mean -0.0671315 -0.0535899 0.0169582 std 7.31746 7.78605 5.05521 min -128.294 -71.5524 -44.3342 max 271.366 146.466 50.7185 >>> df.describe(selection=df.x > 0) x y z dtype float64 float64 float64 count 164060 164060 164060 missing 165940 165940 165940 mean 5.13572 -0.486786 -0.0868073 std 5.18701 7.61621 5.02831 min 1.51635e-05 -71.5524 -44.3342 max 271.366 78.0724 40.2191 :param bool strings: Describe string columns or not :param bool virtual: Describe virtual columns or not :param selection: Optional selection to use. :return: Pandas dataframe """ import pandas as pd N = len(self) columns = {} for feature in self.get_column_names(strings=strings, virtual=virtual)[:]: dtype = str(self.dtype(feature)) if self.dtype(feature) != str else 'str' if self.dtype(feature) == str_type or self.dtype(feature).kind in ['S', 'U', 'O']: count = self.count(feature, selection=selection, delay=True) self.execute() count = count.get() columns[feature] = ((dtype, count, N-count, '--', '--', '--', '--')) else: count = self.count(feature, selection=selection, delay=True) mean = self.mean(feature, selection=selection, delay=True) std = self.std(feature, selection=selection, delay=True) minmax = self.minmax(feature, selection=selection, delay=True) self.execute() count, mean, std, minmax = count.get(), mean.get(), std.get(), minmax.get() count = int(count) columns[feature] = ((dtype, count, N-count, mean, std, minmax[0], minmax[1])) return pd.DataFrame(data=columns, index=['dtype', 'count', 'missing', 'mean', 'std', 'min', 'max'])
Display the DataFrame from row i1 till i2
def cat(self, i1, i2, format='html'): """Display the DataFrame from row i1 till i2 For format, see https://pypi.org/project/tabulate/ :param int i1: Start row :param int i2: End row. :param str format: Format to use, e.g. 'html', 'plain', 'latex' """ from IPython import display if format == 'html': output = self._as_html_table(i1, i2) display.display(display.HTML(output)) else: output = self._as_table(i1, i2, format=format) print(output)
Set the current row and emit the signal signal_pick.
def set_current_row(self, value): """Set the current row, and emit the signal signal_pick.""" if (value is not None) and ((value < 0) or (value >= len(self))): raise IndexError("index %d out of range [0,%d]" % (value, len(self))) self._current_row = value self.signal_pick.emit(self, value)
Return a list of column names
def get_column_names(self, virtual=True, strings=True, hidden=False, regex=None): """Return a list of column names Example: >>> import vaex >>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string') >>> df['r'] = (df.x**2 + df.y**2)**2 >>> df.get_column_names() ['x', 'x2', 'y', 's', 'r'] >>> df.get_column_names(virtual=False) ['x', 'x2', 'y', 's'] >>> df.get_column_names(regex='x.*') ['x', 'x2'] :param virtual: If False, skip virtual columns :param hidden: If False, skip hidden columns :param strings: If False, skip string columns :param regex: Only return column names matching the (optional) regular expression :rtype: list of str Example: >>> import vaex >>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string') >>> df['r'] = (df.x**2 + df.y**2)**2 >>> df.get_column_names() ['x', 'x2', 'y', 's', 'r'] >>> df.get_column_names(virtual=False) ['x', 'x2', 'y', 's'] >>> df.get_column_names(regex='x.*') ['x', 'x2'] """ def column_filter(name): '''Return True if column with specified name should be returned''' if regex and not re.match(regex, name): return False if not virtual and name in self.virtual_columns: return False if not strings and (self.dtype(name) == str_type or self.dtype(name).type == np.string_): return False if not hidden and name.startswith('__'): return False return True return [name for name in self.column_names if column_filter(name)]
Sets the active_fraction set picked row to None and remove selection.
def set_active_fraction(self, value): """Sets the active_fraction, set picked row to None, and remove selection. TODO: we may be able to keep the selection, if we keep the expression, and also the picked row """ if value != self._active_fraction: self._active_fraction = value # self._fraction_length = int(self._length * self._active_fraction) self.select(None) self.set_current_row(None) self._length_unfiltered = int(round(self._length_original * self._active_fraction)) self._index_start = 0 self._index_end = self._length_unfiltered self.signal_active_fraction_changed.emit(self, value)
Sets the active_fraction set picked row to None and remove selection.
def set_active_range(self, i1, i2): """Sets the active_fraction, set picked row to None, and remove selection. TODO: we may be able to keep the selection, if we keep the expression, and also the picked row """ logger.debug("set active range to: %r", (i1, i2)) self._active_fraction = (i2 - i1) / float(self.length_original()) # self._fraction_length = int(self._length * self._active_fraction) self._index_start = i1 self._index_end = i2 self.select(None) self.set_current_row(None) self._length_unfiltered = i2 - i1 self.signal_active_fraction_changed.emit(self, self._active_fraction)
Return a DataFrame where all columns are trimmed by the active range.
def trim(self, inplace=False): '''Return a DataFrame, where all columns are 'trimmed' by the active range. For the returned DataFrame, df.get_active_range() returns (0, df.length_original()). {note_copy} :param inplace: {inplace} :rtype: DataFrame ''' df = self if inplace else self.copy() for name in df: column = df.columns.get(name) if column is not None: if self._index_start == 0 and len(column) == self._index_end: pass # we already assigned it in .copy else: if isinstance(column, np.ndarray): # real array df.columns[name] = column[self._index_start:self._index_end] else: df.columns[name] = column.trim(self._index_start, self._index_end) df._length_original = self.length_unfiltered() df._length_unfiltered = df._length_original df._index_start = 0 df._index_end = df._length_original df._active_fraction = 1 return df
Returns a DataFrame containing only rows indexed by indices
def take(self, indices): '''Returns a DataFrame containing only rows indexed by indices {note_copy} Example: >>> import vaex, numpy as np >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5)) >>> df.take([0,2]) # s x 0 a 1 1 c 3 :param indices: sequence (list or numpy array) with row numbers :return: DataFrame which is a shallow copy of the original data. :rtype: DataFrame ''' df = self.copy() # if the columns in ds already have a ColumnIndex # we could do, direct_indices = df.column['bla'].indices[indices] # which should be shared among multiple ColumnIndex'es, so we store # them in this dict direct_indices_map = {} indices = np.array(indices) for name in df: column = df.columns.get(name) if column is not None: # we optimize this somewhere, so we don't do multiple # levels of indirection if isinstance(column, ColumnIndexed): # TODO: think about what happpens when the indices are masked.. ? if id(column.indices) not in direct_indices_map: direct_indices = column.indices[indices] direct_indices_map[id(column.indices)] = direct_indices else: direct_indices = direct_indices_map[id(column.indices)] df.columns[name] = ColumnIndexed(column.df, direct_indices, column.name) else: df.columns[name] = ColumnIndexed(self, indices, name) df._length_original = len(indices) df._length_unfiltered = df._length_original df.set_selection(None, name=FILTER_SELECTION_NAME) return df
Return a DataFrame containing only the filtered rows.
def extract(self): '''Return a DataFrame containing only the filtered rows. {note_copy} The resulting DataFrame may be more efficient to work with when the original DataFrame is heavily filtered (contains just a small number of rows). If no filtering is applied, it returns a trimmed view. For the returned df, len(df) == df.length_original() == df.length_unfiltered() :rtype: DataFrame ''' trimmed = self.trim() if trimmed.filtered: indices = trimmed._filtered_range_to_unfiltered_indices(0, len(trimmed)) return trimmed.take(indices) else: return trimmed
Returns a DataFrame with a random set of rows
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None): '''Returns a DataFrame with a random set of rows {note_copy} Provide either n or frac. Example: >>> import vaex, numpy as np >>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5)) >>> df # s x 0 a 1 1 b 2 2 c 3 3 d 4 >>> df.sample(n=2, random_state=42) # 2 random rows, fixed seed # s x 0 b 2 1 d 4 >>> df.sample(frac=1, random_state=42) # 'shuffling' # s x 0 c 3 1 a 1 2 d 4 3 b 2 >>> df.sample(frac=1, replace=True, random_state=42) # useful for bootstrap (may contain repeated samples) # s x 0 d 4 1 a 1 2 a 1 3 d 4 :param int n: number of samples to take (default 1 if frac is None) :param float frac: fractional number of takes to take :param bool replace: If true, a row may be drawn multiple times :param str or expression weights: (unnormalized) probability that a row can be drawn :param int or RandomState: seed or RandomState for reproducability, when None a random seed it chosen :return: {return_shallow_copy} :rtype: DataFrame ''' self = self.extract() if type(random_state) == int or random_state is None: random_state = np.random.RandomState(seed=random_state) if n is None and frac is None: n = 1 elif frac is not None: n = int(round(frac * len(self))) weights_values = None if weights is not None: weights_values = self.evaluate(weights) weights_values = weights_values / self.sum(weights) indices = random_state.choice(len(self), n, replace=replace, p=weights_values) return self.take(indices)
Returns a list containing random portions of the DataFrame.
def split_random(self, frac, random_state=None): '''Returns a list containing random portions of the DataFrame. {note_copy} Example: >>> import vaex, import numpy as np >>> np.random.seed(111) >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> for dfs in df.split_random(frac=0.3, random_state=42): ... print(dfs.x.values) ... [8 1 5] [0 7 2 9 4 3 6] >>> for split in df.split_random(frac=[0.2, 0.3, 0.5], random_state=42): ... print(dfs.x.values) [8 1] [5 0 7] [2 9 4 3 6] :param int/list frac: If int will split the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion. :param int random_state: (default, None) Random number seed for reproducibility. :return: A list of DataFrames. :rtype: list ''' self = self.extract() if type(random_state) == int or random_state is None: random_state = np.random.RandomState(seed=random_state) indices = random_state.choice(len(self), len(self), replace=False) return self.take(indices).split(frac)
Returns a list containing ordered subsets of the DataFrame.
def split(self, frac): '''Returns a list containing ordered subsets of the DataFrame. {note_copy} Example: >>> import vaex >>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> for dfs in df.split(frac=0.3): ... print(dfs.x.values) ... [0 1 3] [3 4 5 6 7 8 9] >>> for split in df.split(frac=[0.2, 0.3, 0.5]): ... print(dfs.x.values) [0 1] [2 3 4] [5 6 7 8 9] :param int/list frac: If int will split the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion. :return: A list of DataFrames. :rtype: list ''' self = self.extract() if _issequence(frac): # make sure it is normalized total = sum(frac) frac = [k / total for k in frac] else: assert frac <= 1, "fraction should be <= 1" frac = [frac, 1 - frac] offsets = np.round(np.cumsum(frac) * len(self)).astype(np.int64) start = 0 for offset in offsets: yield self[start:offset] start = offset