hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a16e423d68f797d4114932597c712fd4a3f1133
| 510
|
py
|
Python
|
script_for_creating_gif.py
|
grifguitar/ml-advanced
|
db31344ddc0ec3870adbddb3d558c6b977b890a9
|
[
"Apache-2.0"
] | 1
|
2021-11-04T14:29:10.000Z
|
2021-11-04T14:29:10.000Z
|
script_for_creating_gif.py
|
grifguitar/ml-advanced
|
db31344ddc0ec3870adbddb3d558c6b977b890a9
|
[
"Apache-2.0"
] | null | null | null |
script_for_creating_gif.py
|
grifguitar/ml-advanced
|
db31344ddc0ec3870adbddb3d558c6b977b890a9
|
[
"Apache-2.0"
] | null | null | null |
import imageio
import os
def item_to_int(item):
lst = item.split('_')
lst = lst[2].split('.')
return int(lst[0])
def solve():
# Build GIF
with imageio.get_writer('new_gif.gif', mode='I') as writer:
files = [filename for filename in os.listdir('images1/')]
files.sort(key=item_to_int)
for file in files:
image = imageio.imread('images1/' + file)
writer.append_data(image)
print('GIF created')
if __name__ == '__main__':
solve()
| 20.4
| 65
| 0.598039
|
4a16e43f408e80ba517e72e20d5758f4d0f3cade
| 22,119
|
py
|
Python
|
joypy/joyplot.py
|
sbebo/joypy
|
fb74cb6c725199cca4f5260fdbc0a90ff579c7f3
|
[
"MIT"
] | 342
|
2017-08-02T05:59:57.000Z
|
2021-05-29T13:10:54.000Z
|
joypy/joyplot.py
|
sbebo/joypy
|
fb74cb6c725199cca4f5260fdbc0a90ff579c7f3
|
[
"MIT"
] | 48
|
2017-08-04T19:50:18.000Z
|
2021-04-08T11:30:37.000Z
|
joypy/joyplot.py
|
sbebo/joypy
|
fb74cb6c725199cca4f5260fdbc0a90ff579c7f3
|
[
"MIT"
] | 39
|
2017-09-01T06:33:59.000Z
|
2021-05-06T08:28:39.000Z
|
import numpy as np
from scipy.stats import gaussian_kde
import scipy.stats as stats
import warnings
try:
# pandas < 0.25
from pandas.plotting._tools import (_subplots, _flatten)
except ImportError:
try:
#pandas >= 0.25, <1.2.0
from pandas.plotting._matplotlib.tools import (_subplots, _flatten)
except ImportError:
#pandas >= 1.2.0
from pandas.plotting._matplotlib.tools import create_subplots as _subplots
from pandas.plotting._matplotlib.tools import flatten_axes as _flatten
from pandas import (DataFrame, Series)
from pandas.core.dtypes.common import is_number
from pandas.core.groupby import DataFrameGroupBy
from matplotlib import pyplot as plt
from warnings import warn
_DEBUG = False
def _x_range(data, extra=0.2):
""" Compute the x_range, i.e., the values for which the
density will be computed. It should be slightly larger than
the max and min so that the plot actually reaches 0, and
also has a bit of a tail on both sides.
"""
try:
sample_range = np.nanmax(data) - np.nanmin(data)
except ValueError:
return []
if sample_range < 1e-6:
return [np.nanmin(data), np.nanmax(data)]
return np.linspace(np.nanmin(data) - extra*sample_range,
np.nanmax(data) + extra*sample_range, 1000)
def _setup_axis(ax, x_range, col_name=None, grid=False, ylabelsize=None, yrot=None):
""" Setup the axis for the joyplot:
- add the y label if required (as an ytick)
- add y grid if required
- make the background transparent
- set the xlim according to the x_range
- hide the xaxis and the spines
"""
if col_name is not None:
ax.set_yticks([0])
ax.set_yticklabels([col_name], fontsize=ylabelsize, rotation=yrot)
ax.yaxis.grid(grid)
else:
ax.yaxis.set_visible(False)
ax.patch.set_alpha(0)
ax.set_xlim([min(x_range), max(x_range)])
ax.tick_params(axis='both', which='both', length=0, pad=10)
ax.xaxis.set_visible(_DEBUG)
ax.set_frame_on(_DEBUG)
def _is_numeric(x):
""" Whether the array x is numeric. """
return all(is_number(i) for i in x)
def _get_alpha(i, n, start=0.4, end=1.0):
""" Compute alpha value at position i out of n """
return start + (1 + i)*(end - start)/n
def _remove_na(l):
""" Remove NA values. Should work for lists, arrays, series. """
return Series(l).dropna().values
def _moving_average(a, n=3, zero_padded=False):
""" Moving average of order n.
If zero padded, returns an array of the same size as
the input: the values before a[0] are considered to be 0.
Otherwise, returns an array of length len(a) - n + 1 """
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
if zero_padded:
return ret / n
else:
return ret[n - 1:] / n
def joyplot(data, column=None, by=None, grid=False,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
ax=None, figsize=None,
hist=False, bins=10,
fade=False, ylim='max',
fill=True, linecolor=None,
overlap=1, background=None,
labels=None, xlabels=True, ylabels=True,
range_style='all',
x_range=None,
title=None,
colormap=None,
color=None,
normalize=True,
floc=None,
**kwds):
"""
Draw joyplot of a DataFrame, or appropriately nested collection,
using matplotlib and pandas.
A joyplot is a stack of vertically aligned density plots / histograms.
By default, if 'data' is a DataFrame,
this function will plot a density plot for each column.
This wrapper method tries to convert whatever structure is given
to a nested collection of lists with additional information
on labels, and use the private _joyplot function to actually
draw theh plot.
Parameters
----------
data : DataFrame, Series or nested collection
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, used to form separate plot groups
grid : boolean, default True
Whether to show axis grid lines
labels : boolean or list, default True.
If list, must be the same size of the de
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
figsize : tuple
The size of the figure to create in inches by default
hist : boolean, default False
bins : integer, default 10
Number of histogram bins to be used
color : color or colors to be used in the plots. It can be:
a string or anything interpretable as color by matplotib;
a list of colors. See docs / examples for more details.
kwds : other plotting keyword arguments
To be passed to hist/kde plot function
"""
if column is not None:
if not isinstance(column, (list, np.ndarray)):
column = [column]
def _grouped_df_to_standard(grouped, column):
converted = []
labels = []
for i, (key, group) in enumerate(grouped):
if column is not None:
group = group[column]
labels.append(key)
converted.append([_remove_na(group[c]) for c in group.columns if _is_numeric(group[c])])
if i == 0:
sublabels = [col for col in group.columns if _is_numeric(group[col])]
return converted, labels, sublabels
#################################################################
# GROUPED
# - given a grouped DataFrame, a group by key, or a dict of dicts of Series/lists/arrays
# - select the required columns/Series/lists/arrays
# - convert to standard format: list of lists of non-null arrays
# + extra parameters (labels and sublabels)
#################################################################
if isinstance(data, DataFrameGroupBy):
grouped = data
converted, _labels, sublabels = _grouped_df_to_standard(grouped, column)
if labels is None:
labels = _labels
elif by is not None and isinstance(data, DataFrame):
grouped = data.groupby(by)
if column is None:
# Remove the groupby key. It's not automatically removed by pandas.
column = list(data.columns)
column.remove(by)
converted, _labels, sublabels = _grouped_df_to_standard(grouped, column)
if labels is None:
labels = _labels
# If there is at least an element which is not a list of lists.. go on.
elif isinstance(data, dict) and all(isinstance(g, dict) for g in data.values()):
grouped = data
if labels is None:
labels = list(grouped.keys())
converted = []
for i, (key, group) in enumerate(grouped.items()):
if column is not None:
converted.append([_remove_na(g) for k,g in group.items() if _is_numeric(g) and k in column])
if i == 0:
sublabels = [k for k,g in group.items() if _is_numeric(g)]
else:
converted.append([_remove_na(g) for k,g in group.items() if _is_numeric(g)])
if i == 0:
sublabels = [k for k,g in group.items() if _is_numeric(g)]
#################################################################
# PLAIN:
# - given a DataFrame or list/dict of Series/lists/arrays
# - select the required columns/Series/lists/arrays
# - convert to standard format: list of lists of non-null arrays + extra parameter (labels)
#################################################################
elif isinstance(data, DataFrame):
if column is not None:
data = data[column]
converted = [[_remove_na(data[col])] for col in data.columns if _is_numeric(data[col])]
labels = [col for col in data.columns if _is_numeric(data[col])]
sublabels = None
elif isinstance(data, dict):
if column is not None:
converted = [[_remove_na(g)] for k,g in data.items() if _is_numeric(g) and k in column]
labels = [k for k,g in data.items() if _is_numeric(g) and k in column]
else:
converted = [[_remove_na(g)] for k,g in data.items() if _is_numeric(g)]
labels = [k for k,g in data.items() if _is_numeric(g)]
sublabels = None
elif isinstance(data, list):
if column is not None:
converted = [[_remove_na(g)] for g in data if _is_numeric(g) and i in column]
else:
converted = [[_remove_na(g)] for g in data if _is_numeric(g)]
if labels and len(labels) != len(converted):
raise ValueError("The number of labels does not match the length of the list.")
sublabels = None
else:
raise TypeError("Unknown type for 'data': {!r}".format(type(data)))
if ylabels is False:
labels = None
if all(len(subg)==0 for g in converted for subg in g):
raise ValueError("No numeric values found. Joyplot requires at least a numeric column/group.")
if any(len(subg)==0 for g in converted for subg in g):
warn("At least a column/group has no numeric values.")
return _joyplot(converted, labels=labels, sublabels=sublabels,
grid=grid,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
ax=ax, figsize=figsize,
hist=hist, bins=bins,
fade=fade, ylim=ylim,
fill=fill, linecolor=linecolor,
overlap=overlap, background=background,
xlabels=xlabels,
range_style=range_style, x_range=x_range,
title=title,
colormap=colormap,
color=color,
normalize=normalize,
floc=floc,
**kwds)
###########################################
def plot_density(ax, x_range, v, kind="kde", bw_method=None,
bins=50,
fill=False, linecolor=None, clip_on=True,
normalize=True, floc=None,**kwargs):
""" Draw a density plot given an axis, an array of values v and an array
of x positions where to return the estimated density.
"""
v = _remove_na(v)
if len(v) == 0 or len(x_range) == 0:
return
if kind == "kde":
try:
gkde = gaussian_kde(v, bw_method=bw_method)
y = gkde.evaluate(x_range)
except ValueError:
# Handle cases where there is no data in a group.
y = np.zeros_like(x_range)
except np.linalg.LinAlgError as e:
# Handle singular matrix in kde computation.
distinct_values = np.unique(v)
if len(distinct_values) == 1:
# In case of a group with a single value val,
# that should have infinite density,
# return a δ(val)
val = distinct_values[0]
warnings.warn("The data contains a group with a single distinct value ({}) "
"having infinite probability density. "
"Consider using a different visualization.".format(val))
# Find index i of x_range
# such that x_range[i-1] < val ≤ x_range[i]
i = np.searchsorted(x_range, val)
y = np.zeros_like(x_range)
y[i] = 1
else:
raise e
elif kind == "lognorm":
if floc is not None:
lnparam = stats.lognorm.fit(v,loc=floc)
else:
lnparam = stats.lognorm.fit(v)
lpdf = stats.lognorm.pdf(x_range,lnparam[0],lnparam[1],lnparam[2])
if normalize:
y = lpdf/lpdf.sum()
else:
y = lpdf
elif kind == "counts":
y, bin_edges = np.histogram(v, bins=bins, range=(min(x_range), max(x_range)))
# np.histogram returns the edges of the bins.
# We compute here the middle of the bins.
x_range = _moving_average(bin_edges, 2)
elif kind == "normalized_counts":
y, bin_edges = np.histogram(v, bins=bins, density=False,
range=(min(x_range), max(x_range)))
# np.histogram returns the edges of the bins.
# We compute here the middle of the bins.
y = y / len(v)
x_range = _moving_average(bin_edges, 2)
elif kind == "values":
# Warning: to use values and get a meaningful visualization,
# x_range must also be manually set in the main function.
y = v
x_range = list(range(len(y)))
else:
raise NotImplementedError
if fill:
ax.fill_between(x_range, 0.0, y, clip_on=clip_on, **kwargs)
# Hack to have a border at the bottom at the fill patch
# (of the same color of the fill patch)
# so that the fill reaches the same bottom margin as the edge lines
# with y value = 0.0
kw = kwargs
kw["label"] = None
ax.plot(x_range, [0.0]*len(x_range), clip_on=clip_on, **kw)
if linecolor is not None:
kwargs["color"] = linecolor
# Remove the legend labels if we are plotting filled curve:
# we only want one entry per group in the legend (if shown).
if fill:
kwargs["label"] = None
ax.plot(x_range, y, clip_on=clip_on, **kwargs)
###########################################
def _joyplot(data,
grid=False,
labels=None, sublabels=None,
xlabels=True,
xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None,
ax=None, figsize=None,
hist=False, bins=10,
fade=False,
xlim=None, ylim='max',
fill=True, linecolor=None,
overlap=1, background=None,
range_style='all', x_range=None, tails=0.2,
title=None,
legend=False, loc="upper right",
colormap=None, color=None,
normalize=True,
floc=None,
**kwargs):
"""
Internal method.
Draw a joyplot from an appropriately nested collection of lists
using matplotlib and pandas.
Parameters
----------
data : DataFrame, Series or nested collection
grid : boolean, default True
Whether to show axis grid lines
labels : boolean or list, default True.
If list, must be the same size of the de
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
figsize : tuple
The size of the figure to create in inches by default
hist : boolean, default False
bins : integer, default 10
Number of histogram bins to be used
kwarg : other plotting keyword arguments
To be passed to hist/kde plot function
"""
if fill is True and linecolor is None:
linecolor = "k"
if sublabels is None:
legend = False
def _get_color(i, num_axes, j, num_subgroups):
if isinstance(color, list):
return color[j] if num_subgroups > 1 else color[i]
elif color is not None:
return color
elif isinstance(colormap, list):
return colormap[j](i/num_axes)
elif color is None and colormap is None:
num_cycle_colors = len(plt.rcParams['axes.prop_cycle'].by_key()['color'])
return plt.rcParams['axes.prop_cycle'].by_key()['color'][j % num_cycle_colors]
else:
return colormap(i/num_axes)
ygrid = (grid is True or grid == 'y' or grid == 'both')
xgrid = (grid is True or grid == 'x' or grid == 'both')
num_axes = len(data)
if x_range is None:
global_x_range = _x_range([v for g in data for sg in g for v in sg])
else:
global_x_range = _x_range(x_range, 0.0)
global_x_min, global_x_max = min(global_x_range), max(global_x_range)
# Each plot will have its own axis
fig, axes = _subplots(naxes=num_axes, ax=ax, squeeze=False,
sharex=True, sharey=False, figsize=figsize,
layout_type='vertical')
_axes = _flatten(axes)
# The legend must be drawn in the last axis if we want it at the bottom.
if loc in (3, 4, 8) or 'lower' in str(loc):
legend_axis = num_axes - 1
else:
legend_axis = 0
# A couple of simple checks.
if labels is not None:
assert len(labels) == num_axes
if sublabels is not None:
assert all(len(g) == len(sublabels) for g in data)
if isinstance(color, list):
assert all(len(g) <= len(color) for g in data)
if isinstance(colormap, list):
assert all(len(g) == len(colormap) for g in data)
for i, group in enumerate(data):
a = _axes[i]
group_zorder = i
if fade:
kwargs['alpha'] = _get_alpha(i, num_axes)
num_subgroups = len(group)
if hist:
# matplotlib hist() already handles multiple subgroups in a histogram
a.hist(group, label=sublabels, bins=bins, color=color,
range=[min(global_x_range), max(global_x_range)],
edgecolor=linecolor, zorder=group_zorder, **kwargs)
else:
for j, subgroup in enumerate(group):
# Compute the x_range of the current plot
if range_style == 'all':
# All plots have the same range
x_range = global_x_range
elif range_style == 'own':
# Each plot has its own range
x_range = _x_range(subgroup, tails)
elif range_style == 'group':
# Each plot has a range that covers the whole group
x_range = _x_range(group, tails)
elif isinstance(range_style, (list, np.ndarray)):
# All plots have exactly the range passed as argument
x_range = _x_range(range_style, 0.0)
else:
raise NotImplementedError("Unrecognized range style.")
if sublabels is None:
sublabel = None
else:
sublabel = sublabels[j]
element_zorder = group_zorder + j/(num_subgroups+1)
element_color = _get_color(i, num_axes, j, num_subgroups)
plot_density(a, x_range, subgroup,
fill=fill, linecolor=linecolor, label=sublabel,
zorder=element_zorder, color=element_color,
bins=bins, **kwargs)
# Setup the current axis: transparency, labels, spines.
col_name = None if labels is None else labels[i]
_setup_axis(a, global_x_range, col_name=col_name, grid=ygrid,
ylabelsize=ylabelsize, yrot=yrot)
# When needed, draw the legend
if legend and i == legend_axis:
a.legend(loc=loc)
# Bypass alpha values, in case
for p in a.get_legend().get_patches():
p.set_facecolor(p.get_facecolor())
p.set_alpha(1.0)
for l in a.get_legend().get_lines():
l.set_alpha(1.0)
# Final adjustments
# Set the y limit for the density plots.
# Since the y range in the subplots can vary significantly,
# different options are available.
if ylim == 'max':
# Set all yaxis limit to the same value (max range among all)
max_ylim = max(a.get_ylim()[1] for a in _axes)
min_ylim = min(a.get_ylim()[0] for a in _axes)
for a in _axes:
a.set_ylim([min_ylim - 0.1*(max_ylim-min_ylim), max_ylim])
elif ylim == 'own':
# Do nothing, each axis keeps its own ylim
pass
else:
# Set all yaxis lim to the argument value ylim
try:
for a in _axes:
a.set_ylim(ylim)
except:
print("Warning: the value of ylim must be either 'max', 'own', or a tuple of length 2. The value you provided has no effect.")
# Compute a final axis, used to apply global settings
last_axis = fig.add_subplot(1, 1, 1)
# Background color
if background is not None:
last_axis.patch.set_facecolor(background)
for side in ['top', 'bottom', 'left', 'right']:
last_axis.spines[side].set_visible(_DEBUG)
# This looks hacky, but all the axes share the x-axis,
# so they have the same lims and ticks
last_axis.set_xlim(_axes[0].get_xlim())
if xlabels is True:
last_axis.set_xticks(np.array(_axes[0].get_xticks()[1:-1]))
for t in last_axis.get_xticklabels():
t.set_visible(True)
t.set_fontsize(xlabelsize)
t.set_rotation(xrot)
# If grid is enabled, do not allow xticks (they are ugly)
if xgrid:
last_axis.tick_params(axis='both', which='both',length=0)
else:
last_axis.xaxis.set_visible(False)
last_axis.yaxis.set_visible(False)
last_axis.grid(xgrid)
# Last axis on the back
last_axis.zorder = min(a.zorder for a in _axes) - 1
_axes = list(_axes) + [last_axis]
if title is not None:
plt.title(title)
# The magic overlap happens here.
h_pad = 5 + (- 5*(1 + overlap))
fig.tight_layout(h_pad=h_pad)
return fig, _axes
| 37.745734
| 138
| 0.584656
|
4a16e48913cd68bcf417c169036ec9c94784c054
| 238
|
py
|
Python
|
src/start_signal.py
|
bandrewss/pi-atem-tallies
|
0fccc061be418adb0e78d582c3d18e9278e30400
|
[
"Unlicense"
] | null | null | null |
src/start_signal.py
|
bandrewss/pi-atem-tallies
|
0fccc061be418adb0e78d582c3d18e9278e30400
|
[
"Unlicense"
] | null | null | null |
src/start_signal.py
|
bandrewss/pi-atem-tallies
|
0fccc061be418adb0e78d582c3d18e9278e30400
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import time
import tally_common as tc
def main():
tc.init_leds()
for i in range(3):
tc.PROGRAM_LED.on()
time.sleep(.5)
tc.PROGRAM_LED.off()
time.sleep(.5)
#FOR
#DEF
if __name__ == "__main__":
main()
#IF
| 11.9
| 26
| 0.655462
|
4a16e5076d4752b92292d0684af67de921484b73
| 1,970
|
py
|
Python
|
REDSI_1160929_1161573/boost_1_67_0/libs/python/test/opaque.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | 32
|
2019-02-27T06:57:07.000Z
|
2021-08-29T10:56:19.000Z
|
REDSI_1160929_1161573/boost_1_67_0/libs/python/test/opaque.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | 1
|
2019-03-04T11:21:00.000Z
|
2019-05-24T01:36:31.000Z
|
REDSI_1160929_1161573/boost_1_67_0/libs/python/test/opaque.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | 5
|
2019-08-20T13:45:04.000Z
|
2022-03-01T18:23:49.000Z
|
# -*- coding: utf-8 -*-
# Copyright Gottfried Ganßauge 2003..2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from opaque_ext import *
Check for correct conversion
>>> use(get())
Check that None is converted to a NULL opaque pointer
>>> useany(get())
1
>>> useany(None)
0
Check that we don't lose type information by converting NULL
opaque pointers to None
>>> assert getnull() is None
>>> useany(getnull())
0
>>> failuse(get())
Traceback (most recent call last):
...
RuntimeError: success
Check that there is no conversion from integers ...
>>> try: use(0)
... except TypeError: pass
... else: print('expected a TypeError')
... and from strings to opaque objects
>>> try: use("")
... except TypeError: pass
... else: print('expected a TypeError')
Now check the same for another opaque pointer type
>>> use2(get2())
>>> failuse2(get2())
Traceback (most recent call last):
...
RuntimeError: success
>>> try: use2(0)
... except TypeError: pass
... else: print('expected a TypeError')
>>> try: use2("")
... except TypeError: pass
... else: print('expected a TypeError')
Check that opaque types are distinct
>>> try: use(get2())
... except TypeError: pass
... else: print('expected a TypeError')
>>> try: use2(get())
... except TypeError: pass
... else: print('expected a TypeError')
This used to result in a segmentation violation
>>> type(get()) != type (get2())
1
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
| 22.386364
| 72
| 0.613198
|
4a16e5478f3d102091954d7db7f434a6f2b956b0
| 857
|
py
|
Python
|
corsheaders/defaults.py
|
imtapps/django-cors-headers
|
fc6726ceab125aee77d74845aee54ec92e94c576
|
[
"MIT"
] | null | null | null |
corsheaders/defaults.py
|
imtapps/django-cors-headers
|
fc6726ceab125aee77d74845aee54ec92e94c576
|
[
"MIT"
] | null | null | null |
corsheaders/defaults.py
|
imtapps/django-cors-headers
|
fc6726ceab125aee77d74845aee54ec92e94c576
|
[
"MIT"
] | null | null | null |
from django.conf import settings
default_headers = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
)
CORS_ALLOW_HEADERS = getattr(settings, 'CORS_ALLOW_HEADERS', default_headers)
default_methods = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS',
)
CORS_ALLOW_METHODS = getattr(settings, 'CORS_ALLOW_METHODS', default_methods)
CORS_ALLOW_CREDENTIALS = getattr(settings, 'CORS_ALLOW_CREDENTIALS', False)
CORS_PREFLIGHT_MAX_AGE = getattr(settings, 'CORS_PREFLIGHT_MAX_AGE', 86400)
CORS_ORIGIN_ALLOW_ALL = getattr(settings, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_WHITELIST = getattr(settings, 'CORS_ORIGIN_WHITELIST', ())
CORS_ORIGIN_REGEX_WHITELIST = getattr(settings, 'CORS_ORIGIN_REGEX_WHITELIST', ())
CORS_EXPOSE_HEADERS = getattr(settings, 'CORS_EXPOSE_HEADERS', ())
| 25.969697
| 82
| 0.743291
|
4a16e669a548105c8bf6870655f028ae644f33e9
| 21,356
|
py
|
Python
|
dfirtrack_main/exporter/spreadsheet/xls.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack_main/exporter/spreadsheet/xls.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack_main/exporter/spreadsheet/xls.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
from time import strftime
from urllib.parse import urlencode, urlunparse
import xlwt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
from dfirtrack_config.models import (
MainConfigModel,
SystemExporterSpreadsheetXlsConfigModel,
)
from dfirtrack_main.exporter.spreadsheet.checks import check_content_file_system
from dfirtrack_main.logger.default_logger import debug_logger, info_logger
from dfirtrack_main.models import (
Analysisstatus,
Reason,
Recommendation,
System,
Systemstatus,
Tag,
)
def write_row(worksheet, content, row_num, style):
""" write single row to worksheet """
# write row depending on column number
for col_num in range(len(content)):
worksheet.write(row_num, col_num, content[col_num], style)
# return worksheet object
return worksheet
def style_headline():
""" change style to headline """
# define styling for headline
style = xlwt.XFStyle()
style = xlwt.easyxf(
'font: bold on; alignment: horizontal center'
)
# return style object
return style
def style_default():
""" change style to default """
# clear styling to default
style = xlwt.XFStyle()
style = xlwt.easyxf(
'alignment: vertical top, horizontal left'
)
# return style object
return style
def write_xls(username):
""" write spreadsheet """
# create workbook object with UTF-8 encoding
workbook = xlwt.Workbook(encoding='utf-8')
# define name of worksheet within file
worksheet_system = workbook.add_sheet('systems')
# define styling for headline
style = style_headline()
# get config model
model = SystemExporterSpreadsheetXlsConfigModel.objects.get(system_exporter_spreadsheet_xls_config_name = 'SystemExporterSpreadsheetXlsConfig')
""" start with headline """
# set counter
row_num = 0
# create empty list
headline = []
# check for attribute id
if model.spread_xls_system_id:
headline.append('ID')
# append mandatory attribute
headline.append('System')
# check for remaining attributes
if model.spread_xls_dnsname:
headline.append('DNS name')
if model.spread_xls_domain:
headline.append('Domain')
if model.spread_xls_systemstatus:
headline.append('Systemstatus')
if model.spread_xls_analysisstatus:
headline.append('Analysisstatus')
if model.spread_xls_reason:
headline.append('Reason')
if model.spread_xls_recommendation:
headline.append('Recommendation')
if model.spread_xls_systemtype:
headline.append('Systemtype')
if model.spread_xls_ip:
headline.append('IP')
if model.spread_xls_os:
headline.append('OS')
if model.spread_xls_company:
headline.append('Company')
if model.spread_xls_location:
headline.append('Location')
if model.spread_xls_serviceprovider:
headline.append('Serviceprovider')
if model.spread_xls_tag:
headline.append('Tag')
if model.spread_xls_case:
headline.append('Case')
if model.spread_xls_system_create_time:
headline.append('Created')
if model.spread_xls_system_modify_time:
headline.append('Modified')
# write headline
worksheet_system = write_row(worksheet_system, headline, row_num, style)
# clear styling to default
style = style_default()
""" append systems """
# get all System objects ordered by system_name
systems = System.objects.all().order_by("system_name")
# iterate over systems
for system in systems:
# skip system depending on export variable
if system.system_export_spreadsheet == False:
continue
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline = []
""" check for attribute """
# system id
if model.spread_xls_system_id:
entryline.append(system.system_id)
""" append mandatory attribute """
# system name
entryline.append(system.system_name)
""" check for remaining attributes """
# dnsname
if model.spread_xls_dnsname:
if system.dnsname == None:
dnsname = ''
else:
dnsname = system.dnsname.dnsname_name
entryline.append(dnsname)
# domain
if model.spread_xls_domain:
if system.domain == None:
domain = ''
else:
domain = system.domain.domain_name
entryline.append(domain)
# systemstatus
if model.spread_xls_systemstatus:
entryline.append(system.systemstatus.systemstatus_name)
# analysisstatus
if model.spread_xls_analysisstatus:
if system.analysisstatus == None:
analysisstatus = ''
else:
analysisstatus = system.analysisstatus.analysisstatus_name
entryline.append(analysisstatus)
# reason
if model.spread_xls_reason:
if system.reason == None:
reason = ''
else:
reason = system.reason.reason_name
entryline.append(reason)
# recommendation
if model.spread_xls_recommendation:
if system.recommendation== None:
recommendation = ''
else:
recommendation = system.recommendation.recommendation_name
entryline.append(recommendation)
# systemtype
if model.spread_xls_systemtype:
if system.systemtype == None:
systemtype = ''
else:
systemtype = system.systemtype.systemtype_name
entryline.append(systemtype)
# ip
if model.spread_xls_ip:
# get all ips of system
ips_all = system.ip.all().order_by('ip_ip')
# count ips
n = system.ip.count()
# create empty ip string
ip = ''
# set counter
i = 1
# iterate over ip objects in ip list
for ip_obj in ips_all:
# add actual ip to ip string
ip = ip + ip_obj.ip_ip
# add newline except for last ip
if i < n:
ip = ip + '\n'
i = i + 1
entryline.append(ip)
# os
if model.spread_xls_os:
if system.os == None:
os = ''
else:
os = system.os.os_name
entryline.append(os)
# company
if model.spread_xls_company:
companys_all = system.company.all().order_by('company_name')
# count companies
n = system.company.count()
# create empty company string
company = ''
# set counter
i = 1
# iterate over company objects in company list
for company_obj in companys_all:
# add actual company to company string
company = company + company_obj.company_name
# add newline except for last company
if i < n:
company = company + '\n'
i = i + 1
entryline.append(company)
# location
if model.spread_xls_location:
if system.location == None:
location = ''
else:
location = system.location.location_name
entryline.append(location)
# serviceprovider
if model.spread_xls_serviceprovider:
if system.serviceprovider == None:
serviceprovider = ''
else:
serviceprovider = system.serviceprovider.serviceprovider_name
entryline.append(serviceprovider)
# tag
if model.spread_xls_tag:
tags_all = system.tag.all().order_by('tag_name')
# count tags
n = system.tag.count()
# create empty tag string
tag = ''
# set counter
i = 1
# iterate over tag objects in tag list
for tag_obj in tags_all:
# add actual tag to tag string
tag = tag + tag_obj.tag_name
# add newline except for last tag
if i < n:
tag = tag + '\n'
i = i + 1
entryline.append(tag)
# case
if model.spread_xls_case:
cases_all = system.case.all().order_by('case_name')
# count cases
n = system.case.count()
# create empty case string
case = ''
# set counter
i = 1
# iterate over case objects in case list
for case_obj in cases_all:
# add actual case to case string
case = case + case_obj.case_name
# add newline except for last case
if i < n:
case = case + '\n'
i = i + 1
entryline.append(case)
# system create time
if model.spread_xls_system_create_time:
system_create_time = system.system_create_time.strftime('%Y-%m-%d %H:%M')
entryline.append(system_create_time)
# system modify time
if model.spread_xls_system_modify_time:
system_modify_time = system.system_modify_time.strftime('%Y-%m-%d %H:%M')
entryline.append(system_modify_time)
# write line for system
worksheet_system = write_row(worksheet_system, entryline, row_num, style)
# call logger
debug_logger(username, ' SYSTEM_XLS_SYSTEM_EXPORTED ' + 'system_id:' + str(system.system_id) + '|system_name:' + system.system_name)
# write an empty row
row_num += 2
# write meta information for file creation
actualtime = timezone.now().strftime('%Y-%m-%d %H:%M')
worksheet_system.write(row_num, 0, 'Created:', style)
worksheet_system.write(row_num, 1, actualtime, style)
row_num += 1
creator = username
worksheet_system.write(row_num, 0, 'Created by:', style)
worksheet_system.write(row_num, 1, creator, style)
""" add worksheet for systemstatus """
# check all conditions
if model.spread_xls_worksheet_systemstatus and model.spread_xls_systemstatus and Systemstatus.objects.count() != 0:
# define name of worksheet within file
worksheet_systemstatus = workbook.add_sheet('systemstatus')
# create empty list
headline_systemstatus = []
# append attributes
headline_systemstatus.append('ID')
headline_systemstatus.append('Systemstatus')
headline_systemstatus.append('Note')
# define styling for headline
style = style_headline()
# set counter
row_num = 0
# write headline
worksheet_systemstatus = write_row(worksheet_systemstatus, headline_systemstatus, row_num, style)
# clear styling to default
style = style_default()
""" append systemstatus """
# get all Systemstatus objects ordered by systemstatus_id
systemstatuss = Systemstatus.objects.all().order_by("systemstatus_name")
# iterate over systemstatus
for systemstatus in systemstatuss:
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline_systemstatus = []
entryline_systemstatus.append(systemstatus.systemstatus_id)
entryline_systemstatus.append(systemstatus.systemstatus_name)
entryline_systemstatus.append(systemstatus.systemstatus_note)
# write line for systemstatus
worksheet_systemstatus = write_row(worksheet_systemstatus, entryline_systemstatus, row_num, style)
""" add worksheet for analysisstatus """
# check all conditions
if model.spread_xls_worksheet_analysisstatus and model.spread_xls_analysisstatus and Analysisstatus.objects.count() != 0:
# define name of worksheet within file
worksheet_analysisstatus = workbook.add_sheet('analysisstatus')
# create empty list
headline_analysisstatus = []
# append attributes
headline_analysisstatus.append('ID')
headline_analysisstatus.append('Analysisstatus')
headline_analysisstatus.append('Note')
# define styling for headline
style = style_headline()
# set counter
row_num = 0
# write headline
worksheet_analysisstatus = write_row(worksheet_analysisstatus, headline_analysisstatus, row_num, style)
# clear styling to default
style = style_default()
""" append analysisstatus """
# get all Analysisstatus objects ordered by analysisstatus_id
analysisstatuss = Analysisstatus.objects.all().order_by("analysisstatus_name")
# iterate over analysisstatus
for analysisstatus in analysisstatuss:
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline_analysisstatus = []
entryline_analysisstatus.append(analysisstatus.analysisstatus_id)
entryline_analysisstatus.append(analysisstatus.analysisstatus_name)
entryline_analysisstatus.append(analysisstatus.analysisstatus_note)
# write line for analysisstatus
worksheet_analysisstatus = write_row(worksheet_analysisstatus, entryline_analysisstatus, row_num, style)
""" add worksheet for reason """
# check all conditions
if model.spread_xls_worksheet_reason and model.spread_xls_reason and Reason.objects.count() != 0:
# define name of worksheet within file
worksheet_reason = workbook.add_sheet('reasons')
# create empty list
headline_reason = []
# append attributes
headline_reason.append('ID')
headline_reason.append('Reason')
headline_reason.append('Note')
# define styling for headline
style = style_headline()
# set counter
row_num = 0
# write headline
worksheet_reason = write_row(worksheet_reason, headline_reason, row_num, style)
# clear styling to default
style = style_default()
""" append reasons """
# get all Reason objects ordered by reason_name
reasons = Reason.objects.all().order_by("reason_name")
# iterate over reasons
for reason in reasons:
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline_reason = []
entryline_reason.append(reason.reason_id)
entryline_reason.append(reason.reason_name)
entryline_reason.append(reason.reason_note)
# write line for reason
worksheet_reason = write_row(worksheet_reason, entryline_reason, row_num, style)
""" add worksheet for recommendation """
# check all conditions
if model.spread_xls_worksheet_recommendation and model.spread_xls_recommendation and Recommendation.objects.count() != 0:
# define name of worksheet within file
worksheet_recommendation = workbook.add_sheet('recommendations')
# create empty list
headline_recommendation = []
# append attributes
headline_recommendation.append('ID')
headline_recommendation.append('Recommendation')
headline_recommendation.append('Note')
# define styling for headline
style = style_headline()
# set counter
row_num = 0
# write headline
worksheet_recommendation = write_row(worksheet_recommendation, headline_recommendation, row_num, style)
# clear styling to default
style = style_default()
""" append recommendations """
# get all Recommendation objects ordered by recommendation_name
recommendations = Recommendation.objects.all().order_by("recommendation_name")
# iterate over recommendations
for recommendation in recommendations:
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline_recommendation = []
entryline_recommendation.append(recommendation.recommendation_id)
entryline_recommendation.append(recommendation.recommendation_name)
entryline_recommendation.append(recommendation.recommendation_note)
# write line for recommendation
worksheet_recommendation = write_row(worksheet_recommendation, entryline_recommendation, row_num, style)
""" add worksheet for tag """
# check all conditions
if model.spread_xls_worksheet_tag and model.spread_xls_tag and Tag.objects.count() != 0:
# define name of worksheet within file
worksheet_tag = workbook.add_sheet('tags')
# create empty list
headline_tag = []
# append attributes
headline_tag.append('ID')
headline_tag.append('Tag')
headline_tag.append('Note')
# define styling for headline
style = style_headline()
# set counter
row_num = 0
# write headline
worksheet_tag = write_row(worksheet_tag, headline_tag, row_num, style)
# clear styling to default
style = style_default()
""" append tags """
# get all Tag objects ordered by tag_name
tags = Tag.objects.all().order_by("tag_name")
# iterate over tags
for tag in tags:
# autoincrement row counter
row_num += 1
# set column counter
col_num = 1
# create empty list for line
entryline_tag = []
entryline_tag.append(tag.tag_id)
entryline_tag.append(tag.tag_name)
entryline_tag.append(tag.tag_note)
# write line for tag
worksheet_tag = write_row(worksheet_tag, entryline_tag, row_num, style)
# call logger
info_logger(username, " SYSTEM_XLS_CREATED")
# return xls object
return workbook
@login_required(login_url="/login")
def system_create_cron(request):
""" helper function to check config before creating scheduled task """
# get config
main_config_model = MainConfigModel.objects.get(main_config_name = 'MainConfig')
# check file system
stop_cron_exporter = check_content_file_system(main_config_model, 'SYSTEM_XLS', request)
# check stop condition
if stop_cron_exporter:
# return to 'system_list'
return redirect(reverse('system_list'))
else:
# create parameter dict
params = {}
# prepare parameter dict
params['name'] = 'system_spreadsheet_exporter_xls'
params['func'] = 'dfirtrack_main.exporter.spreadsheet.xls.system_cron'
# build url
urlpath = '/admin/django_q/schedule/add/'
urlquery = urlencode(params)
admin_url_create_cron = urlunparse(('','',urlpath,'',urlquery,''))
# open django admin with pre-filled form for scheduled task
return redirect(admin_url_create_cron)
@login_required(login_url="/login")
def system(request):
""" instant spreadsheet export via button for direct download via browser """
# create xls MIME type object
xls_browser = HttpResponse(content_type='application/ms-excel')
# prepare interactive file including filename
xls_browser['Content-Disposition'] = 'attachment; filename="systems.xls"'
# get username from request object
username = str(request.user)
# call main function
xls_workbook = write_xls(username)
# save workbook to interactive file
xls_workbook.save(xls_browser)
# return spreadsheet object to browser
return xls_browser
def system_cron():
""" spreadsheet export via scheduled task to server file system """
# prepare time for output file
filetime = timezone.now().strftime('%Y%m%d_%H%M')
# get config
main_config_model = MainConfigModel.objects.get(main_config_name = 'MainConfig')
# check file system
stop_cron_exporter = check_content_file_system(main_config_model, 'SYSTEM_XLS')
# leave if config caused errors
if stop_cron_exporter:
# return to scheduled task
return
# prepare output file path
output_file_path = main_config_model.cron_export_path + '/' + filetime + '_systems.xls'
# get username from config
username = main_config_model.cron_username
# call main function
xls_disk = write_xls(username)
# save spreadsheet to disk
xls_disk.save(output_file_path)
# call logger
info_logger(username, ' SYSTEM_XLS_FILE_WRITTEN ' + output_file_path)
| 31.359765
| 147
| 0.628676
|
4a16e7ca783ce5c1dc52e1170bb94d4fa749f561
| 2,838
|
py
|
Python
|
ibis/backends/impala/tests/test_parquet_ddl.py
|
arielrossanigo/ibis
|
18e967cac961285b05d8df560f40148bac1a2571
|
[
"Apache-2.0"
] | 1
|
2021-07-14T12:27:34.000Z
|
2021-07-14T12:27:34.000Z
|
ibis/backends/impala/tests/test_parquet_ddl.py
|
arielrossanigo/ibis
|
18e967cac961285b05d8df560f40148bac1a2571
|
[
"Apache-2.0"
] | 1
|
2021-03-25T14:07:29.000Z
|
2021-03-25T14:07:29.000Z
|
ibis/backends/impala/tests/test_parquet_ddl.py
|
arielrossanigo/ibis
|
18e967cac961285b05d8df560f40148bac1a2571
|
[
"Apache-2.0"
] | 1
|
2017-11-30T13:32:23.000Z
|
2017-11-30T13:32:23.000Z
|
from posixpath import join as pjoin
import pytest
import ibis
from ibis.backends.impala.compat import HS2Error
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.impala
def test_cleanup_tmp_table_on_gc(con, test_data_dir):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
name = table.op().name
table = None
gc.collect()
assert not con.exists_table(name)
def test_persist_parquet_file_with_name(con, test_data_dir, temp_table_db):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
tmp_db, name = temp_table_db
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
con.parquet_file(
hdfs_path, schema=schema, name=name, database=tmp_db, persist=True
)
gc.collect()
# table still exists
con.table(name, database=tmp_db)
def test_query_parquet_file_with_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, schema=schema)
name = table.op().name
# table exists
con.table(name)
expr = table.r_name.value_counts()
expr.execute()
assert table.count().execute() == 5
def test_query_parquet_file_like_table(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
ex_schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, like_table='tpch_region')
assert_equal(table.schema(), ex_schema)
def test_query_parquet_infer_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
# NOTE: the actual schema should have an int16, but bc this is being
# inferred from a parquet file, which has no notion of int16, the
# inferred schema will have an int32 instead.
ex_schema = ibis.schema(
[
('r_regionkey', 'int32'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
assert_equal(table.schema(), ex_schema)
def test_create_table_persist_fails_if_called_twice(
con, temp_table_db, test_data_dir
):
tmp_db, tname = temp_table_db
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
with pytest.raises(HS2Error):
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
| 25.115044
| 78
| 0.652572
|
4a16e83e44d33f0335fd97e5e01e10846316d79f
| 6,720
|
py
|
Python
|
venv/lib/python3.7/site-packages/openpyxl/descriptors/serialisable.py
|
Jarzan/KennedyJumpstart
|
8174ecd30de034b7d242f364fd18f8d9ccc53ab2
|
[
"MIT"
] | 1
|
2019-12-15T01:44:17.000Z
|
2019-12-15T01:44:17.000Z
|
venv/lib/python3.7/site-packages/openpyxl/descriptors/serialisable.py
|
Jarzan/KennedyJumpstart
|
8174ecd30de034b7d242f364fd18f8d9ccc53ab2
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/openpyxl/descriptors/serialisable.py
|
Jarzan/KennedyJumpstart
|
8174ecd30de034b7d242f364fd18f8d9ccc53ab2
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# copyright openpyxl 2010-2015
from copy import copy
from keyword import kwlist
KEYWORDS = frozenset(kwlist)
from . import Descriptor
from . import _Serialiasable
from .sequence import Sequence, NestedSequence
from .namespace import namespaced
from openpyxl.compat import safe_string
from openpyxl.xml.functions import (
Element,
localname,
)
seq_types = (list, tuple)
class Serialisable(_Serialiasable):
"""
Objects can serialise to XML their attributes and child objects.
The following class attributes are created by the metaclass at runtime:
__attrs__ = attributes
__nested__ = single-valued child treated as an attribute
__elements__ = child elements
"""
__attrs__ = None
__nested__ = None
__elements__ = None
__namespaced__ = None
idx_base = 0
@property
def tagname(self):
raise(NotImplementedError)
namespace = None
@classmethod
def from_tree(cls, node):
"""
Create object from XML
"""
# strip known namespaces from attributes
attrib = dict(node.attrib)
for key, ns in cls.__namespaced__:
if ns in attrib:
attrib[key] = attrib[ns]
del attrib[ns]
# strip attributes with unknown namespaces
for key in list(attrib):
if key.startswith('{'):
del attrib[key]
elif key in KEYWORDS:
attrib["_" + key] = attrib[key]
del attrib[key]
if node.text and "attr_text" in cls.__attrs__:
attrib["attr_text"] = node.text
for el in node:
tag = localname(el)
if tag in KEYWORDS:
tag = "_" + tag
desc = getattr(cls, tag, None)
if desc is None or isinstance(desc, property):
continue
if hasattr(desc, 'from_tree'):
#descriptor manages conversion
obj = desc.from_tree(el)
else:
if hasattr(desc.expected_type, "from_tree"):
#complex type
obj = desc.expected_type.from_tree(el)
else:
#primitive
obj = el.text
if isinstance(desc, NestedSequence):
attrib[tag] = obj
elif isinstance(desc, Sequence):
attrib.setdefault(tag, [])
attrib[tag].append(obj)
else:
attrib[tag] = obj
return cls(**attrib)
def to_tree(self, tagname=None, idx=None, namespace=None):
if tagname is None:
tagname = self.tagname
# keywords have to be masked
if tagname.startswith("_"):
tagname = tagname[1:]
tagname = namespaced(self, tagname, namespace)
namespace = getattr(self, "namespace", namespace)
attrs = dict(self)
for key, ns in self.__namespaced__:
if key in attrs:
attrs[ns] = attrs[key]
del attrs[key]
el = Element(tagname, attrs)
if "attr_text" in self.__attrs__:
el.text = safe_string(getattr(self, "attr_text"))
for child_tag in self.__elements__:
desc = getattr(self.__class__, child_tag, None)
obj = getattr(self, child_tag)
if isinstance(obj, seq_types):
if isinstance(desc, NestedSequence):
# wrap sequence in container
if not obj:
continue
nodes = [desc.to_tree(child_tag, obj, namespace)]
elif isinstance(desc, Sequence):
# sequence
desc.idx_base = self.idx_base
nodes = (desc.to_tree(child_tag, obj, namespace))
else: # property
nodes = (v.to_tree(child_tag, namespace) for v in obj)
for node in nodes:
el.append(node)
else:
if child_tag in self.__nested__:
node = desc.to_tree(child_tag, obj, namespace)
elif obj is None:
continue
else:
node = obj.to_tree(child_tag)
if node is not None:
el.append(node)
return el
def __iter__(self):
for attr in self.__attrs__:
value = getattr(self, attr)
if attr.startswith("_"):
attr = attr[1:]
if attr != "attr_text" and value is not None:
yield attr, safe_string(value)
def __eq__(self, other):
if not self.__class__ == other.__class__:
return False
elif not dict(self) == dict(other):
return False
for el in self.__elements__:
if getattr(self, el) != getattr(other, el):
return False
return True
def __ne__(self, other):
return not self == other
def __repr__(self):
s = u"<{0}.{1} object>\nParameters:".format(
self.__module__,
self.__class__.__name__
)
args = []
for k in self.__attrs__ + self.__elements__:
v = getattr(self, k)
if isinstance(v, Descriptor):
v = None
args.append(u"{0}={1}".format(k, repr(v)))
args = u", ".join(args)
return u"\n".join([s, args])
def __hash__(self):
fields = []
for attr in self.__attrs__ + self.__elements__:
val = getattr(self, attr)
if isinstance(val, list):
val = tuple(val)
fields.append(val)
return hash(tuple(fields))
def __add__(self, other):
if type(self) != type(other):
raise TypeError("Cannot combine instances of different types")
vals = {}
for attr in self.__attrs__:
vals[attr] = getattr(self, attr) or getattr(other, attr)
for el in self.__elements__:
a = getattr(self, el)
b = getattr(other, el)
if a and b:
vals[el] = a + b
else:
vals[el] = a or b
return self.__class__(**vals)
def __copy__(self):
# serialise to xml and back to avoid shallow copies
xml = self.to_tree(tagname="dummy")
cp = self.__class__.from_tree(xml)
# copy any non-persisted attributed
for k in self.__dict__:
if k not in self.__attrs__ + self.__elements__:
v = copy(getattr(self, k))
setattr(cp, k, v)
return cp
| 29.866667
| 75
| 0.533185
|
4a16e89fcb83d61bef88e159403d4ac6f6606de7
| 11,947
|
py
|
Python
|
fixture/contact.py
|
spirit-87/python_training
|
f2e2389ba4e96139d666365abecf16a2db89cd6e
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
spirit-87/python_training
|
f2e2389ba4e96139d666365abecf16a2db89cd6e
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
spirit-87/python_training
|
f2e2389ba4e96139d666365abecf16a2db89cd6e
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.support.ui import Select
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_newcontact_page(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def create(self, contact):
wd = self.app.wd
self.open_newcontact_page()
# fill in contact form
self.change_contact_info(contact)
# submit contact creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_home_page()
self.contact_cashe = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
self.contact_cashe = None
def delete_contact_by_index(self, index):
wd = self.app.wd
# select first contact = click first checkbox
self.open_contacts_page()
self.select_contact_by_index(index)
# submit contact deletion
wd.find_element_by_css_selector("input[value='Delete']").click()
# accept dialog window
wd.switch_to_alert().accept()
self.app.return_to_home_page()
self.contact_cashe = None
def delete_contact_by_id(self, id):
wd = self.app.wd
# select first contact = click first checkbox
self.open_contacts_page()
self.select_contact_by_id(id)
# submit contact deletion
wd.find_element_by_css_selector("input[value='Delete']").click()
# accept dialog window
wd.switch_to_alert().accept()
self.app.return_to_home_page()
self.contact_cashe = None
def open_contacts_page(self):
wd = self.app.wd
# select first group = click first checkbox
if not wd.current_url.endswith("/index.php") > 0:
wd.find_element_by_link_text("home").click()
def select_first_contact(self):
self.select_contact_by_index(0)
def select_contact_by_index(self, index):
wd = self.app.wd
# select first contact = click first checkbox
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
# select first contact = click first checkbox
wd.find_element_by_css_selector("input[value='%s']" % id ).click()
def change_contact_info(self, contact):
wd = self.app.wd
# fill in names of new contact
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
# fill in job information of new contact
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
# fill in phones of new contact
self.change_field_value("home", contact.phone_home)
self.change_field_value("mobile", contact.phone_mobile)
self.change_field_value("work", contact.phone_work)
self.change_field_value("fax", contact.phone_fax)
# fill in emails of new contact
self.change_field_value("email", contact.email1)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
# fill in webpage of new contact
self.change_field_value("homepage", contact.webpage)
# fill in birth dates of new contact
self.change_date_value("bday", contact.bday)
self.change_date_value("bmonth", contact.bmonth)
self.change_field_value("byear", contact.byear)
# fill in anniversary dates of new contact
self.change_date_value("aday", contact.aday)
self.change_date_value("amonth", contact.amonth)
self.change_field_value("ayear", contact.ayear)
# fill in secondary info of new contact
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes2)
self.contact_cashe = None
def edit_first_contact(self, new_contact_data):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.select_contact_edit_by_index(index)
# edit contact
self.change_contact_info(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//input[@value='Update']").click()
self.app.return_to_home_page()
self.contact_cashe = None
def edit_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.select_contact_edit_by_id(id)
# edit contact
self.change_contact_info(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//input[@value='Update']").click()
self.app.return_to_home_page()
self.contact_cashe = None
def select_first_contact_edit(self):
self.select_contact_by_index(0)
def select_contact_edit_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
# init contact edition of first edit link
wd.find_elements_by_css_selector("img[src='icons/pencil.png']")[index].click()
def select_contact_edit_by_id(self, id):
wd = self.app.wd
self.app.return_to_home_page()
# init contact edition of first edit link
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def select_contact_view_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
# init contact view of first edit link
wd.find_elements_by_css_selector("img[src='icons/status_online.png']")[index].click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_date_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
Select(wd.find_element_by_name(field_name)).select_by_visible_text(text)
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cashe = None
def get_contact_list(self):
if self.contact_cashe is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cashe = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cashe.append(Contact(firstname=self.clear_extra_spaces(firstname), lastname=self.clear_extra_spaces(lastname), id=id,
address=self.clear_extra_spaces(address),
all_phones_from_home_page = all_phones,all_emails_from_home_page = all_emails))
# mine decision
# inputs = wd.find_elements_by_css_selector("#maintable .center input")
# first_names = wd.find_elements_by_css_selector("#maintable td:nth-child(3)")
# last_names = wd.find_elements_by_css_selector("#maintable td:nth-child(2)")
# for i in range(0, len(inputs)):
# id = inputs[i].get_attribute("value")
# first_name = first_names[i].text
# last_name = last_names[i].text
# self.contact_cashe.append(Contact(firstname=first_name, lastname=last_name, id=id))
return list(self.contact_cashe)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.select_contact_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
phone_home = wd.find_element_by_name("home").get_attribute("value")
phone_mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone_work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
address = wd.find_element_by_name("address").text
contact_edit = Contact(firstname=firstname, lastname=lastname, id=id, phone_home=phone_home, phone_mobile=phone_mobile,
phone_work=phone_work, phone2=phone2, email1 = email1, email2 = email2, email3 = email3, address = address)
contact_edit.all_phones_from_home_page = self.merge_phones_like_on_home_page(contact_edit)
contact_edit.all_emails_from_home_page = self.merge_emails_like_on_home_page(contact_edit)
return contact_edit
def get_contact_info_from_view_page(self, index):
wd = self.app.wd
self.select_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
phone_home = re.search("H: (.*)", text).group(1)
phone_mobile = re.search("M: (.*)", text).group(1)
phone_work = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(phone_home=phone_home, phone_mobile=phone_mobile,
phone_work=phone_work, phone2=phone2)
def clear_extra_spaces(self, s):
return re.sub(" ", " ", s.strip())
def clear(self, s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(self, contact):
#filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения
return "\n".join(filter(lambda x: x != "",
map(lambda x: self.clear(x),
filter(lambda x: x is not None,
[contact.phone_home, contact.phone_mobile, contact.phone_work, contact.phone2]))))
def merge_emails_like_on_home_page(self, contact):
#filter - удаляем элементы None, map - чистим контакты от лишних символов, filter - выбираем только не пустые значения
return "\n".join(filter(lambda x: x != "",
map(lambda x:self.clear(x),
filter(lambda x: x is not None,
[contact.email1, contact.email2, contact.email3]))))
def add_contact_to_group(self, contact, group):
wd = self.app.wd
self.select_contact_by_id(contact.id)
wd.find_element_by_name("to_group").click()
Select(wd.find_element_by_name("to_group")).select_by_value(group.id)
wd.find_element_by_name("add").click()
self.app.return_to_home_page()
def remove_contact_from_group(self, contact, group):
wd = self.app.wd
wd.find_element_by_name("group").click()
Select(wd.find_element_by_name("group")).select_by_value(group.id)
wd.find_element_by_id(contact.id).click()
wd.find_element_by_name("remove").click()
self.app.return_to_home_page()
| 40.225589
| 146
| 0.646522
|
4a16e8fbb875b76cb5d3cebb01d8705872c77f49
| 235
|
py
|
Python
|
tools/cross_check_tool/cross_check_tool.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2021-10-21T03:04:16.000Z
|
2021-10-21T03:04:16.000Z
|
tools/cross_check_tool/cross_check_tool.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/cross_check_tool/cross_check_tool.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4
|
2021-09-29T20:44:49.000Z
|
2021-10-20T13:02:12.000Z
|
#!/usr/bin/python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys
from openvino.tools.cross_check_tool.cross_check_tool import main
if __name__ == "__main__":
sys.exit(main() or 0)
| 19.583333
| 65
| 0.748936
|
4a16e986e164f6c09aed6b6aa6b8ebd46d6b9c62
| 1,361
|
py
|
Python
|
Lib/site-packages/braintree/sub_merchant_account/sub_merchant_account.py
|
shashank7991/eBuy
|
2e65572967b33e7205b38c048b7be2d9943173b6
|
[
"MIT"
] | null | null | null |
Lib/site-packages/braintree/sub_merchant_account/sub_merchant_account.py
|
shashank7991/eBuy
|
2e65572967b33e7205b38c048b7be2d9943173b6
|
[
"MIT"
] | null | null | null |
Lib/site-packages/braintree/sub_merchant_account/sub_merchant_account.py
|
shashank7991/eBuy
|
2e65572967b33e7205b38c048b7be2d9943173b6
|
[
"MIT"
] | null | null | null |
from braintree.configuration import Configuration
from braintree.resource import Resource
from braintree.sub_merchant_account import ContactDetails, BusinessDetails, FundingDetails
class SubMerchantAccount(Resource):
class Status(object):
Pending = "pending"
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
self.business_details = BusinessDetails(attributes.get("business", {}))
self.funding_details = FundingDetails(attributes.get("funding", {}))
self.contacts = self._build_contacts(attributes)
def __repr__(self):
detail_list = [
"id",
"tos_accepted",
"contacts",
"business_details",
"funding_details",
]
return super(SubMerchantAccount, self).__repr__(detail_list)
def _build_contacts(self, attributes):
contacts = []
for contact_attributes in attributes.get("contacts", []):
contacts.append(ContactDetails(contact_attributes))
return contacts
@staticmethod
def create(params={}):
return Configuration.gateway().sub_merchant_account.create(params)
@staticmethod
def update(sub_merchant_account_id, params={}):
return Configuration.gateway().sub_merchant_account.update(sub_merchant_account_id, params)
| 34.025
| 99
| 0.68626
|
4a16ea1cc9cdaf4ae02bedd5e69369238283c8af
| 23,091
|
py
|
Python
|
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/pgadmin/__init__.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | null | null | null |
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/pgadmin/__init__.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | null | null | null |
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/pgadmin/__init__.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | null | null | null |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""The main pgAdmin module. This handles the application initialisation tasks,
such as setup of logging, dynamic loading of modules etc."""
import logging
import os, sys
from collections import defaultdict
from importlib import import_module
from flask import Flask, abort, request, current_app, session, url_for
from flask_babel import Babel, gettext
from flask_login import user_logged_in, user_logged_out
from flask_security import Security, SQLAlchemyUserDatastore, current_user
from flask_mail import Mail
from flask_security.utils import login_user
from werkzeug.datastructures import ImmutableDict
from flask_paranoid import Paranoid
from pgadmin.utils import PgAdminModule, driver
from pgadmin.utils.versioned_template_loader import VersionedTemplateLoader
from pgadmin.utils.session import create_session_interface
from werkzeug.local import LocalProxy
from werkzeug.utils import find_modules
from pgadmin.utils.preferences import Preferences
from pgadmin.model import db, Role, Server, ServerGroup, \
User, Keys, Version, SCHEMA_VERSION as CURRENT_SCHEMA_VERSION
# If script is running under python3, it will not have the xrange function
# defined
winreg = None
if sys.version_info[0] >= 3:
xrange = range
if os.name == 'nt':
import winreg
elif os.name == 'nt':
import _winreg as winreg
class PgAdmin(Flask):
def __init__(self, *args, **kwargs):
# Set the template loader to a postgres-version-aware loader
self.jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'],
loader=VersionedTemplateLoader(self)
)
super(PgAdmin, self).__init__(*args, **kwargs)
def find_submodules(self, basemodule):
for module_name in find_modules(basemodule, True):
if module_name in self.config['MODULE_BLACKLIST']:
self.logger.info(
'Skipping blacklisted module: %s' % module_name
)
continue
self.logger.info('Examining potential module: %s' % module_name)
module = import_module(module_name)
for key in list(module.__dict__.keys()):
if isinstance(module.__dict__[key], PgAdminModule):
yield module.__dict__[key]
@property
def submodules(self):
for blueprint in self.blueprints.values():
if isinstance(blueprint, PgAdminModule):
yield blueprint
@property
def stylesheets(self):
stylesheets = []
for module in self.submodules:
stylesheets.extend(getattr(module, "stylesheets", []))
return set(stylesheets)
@property
def messages(self):
messages = dict()
for module in self.submodules:
messages.update(getattr(module, "messages", dict()))
return messages
@property
def exposed_endpoint_url_map(self):
#############################################################
# To handle WSGI paths
# If user has setup application under WSGI alias
# like 'localhost/pgadmin4' then we have to append '/pgadmin4'
# into endpoints
#############################################################
import config
is_wsgi_root_present = False
if config.SERVER_MODE:
pgadmin_root_path = url_for('browser.index')
if pgadmin_root_path != '/browser/':
is_wsgi_root_present = True
wsgi_root_path = pgadmin_root_path.replace(
'/browser/', ''
)
def get_full_url_path(url):
"""
Generate endpoint URL at per WSGI alias
"""
if is_wsgi_root_present and url:
return wsgi_root_path + url
else:
return url
# Fetch all endpoints and their respective url
for rule in current_app.url_map.iter_rules('static'):
yield rule.endpoint, get_full_url_path(rule.rule)
for module in self.submodules:
for endpoint in module.exposed_endpoints:
for rule in current_app.url_map.iter_rules(endpoint):
yield rule.endpoint, get_full_url_path(rule.rule)
@property
def javascripts(self):
scripts = []
scripts_names = []
# Remove duplicate javascripts from the list
for module in self.submodules:
module_scripts = getattr(module, "javascripts", [])
for s in module_scripts:
if s['name'] not in scripts_names:
scripts.append(s)
scripts_names.append(s['name'])
return scripts
@property
def panels(self):
panels = []
for module in self.submodules:
panels.extend(module.get_panels())
return panels
@property
def menu_items(self):
from operator import attrgetter
menu_items = defaultdict(list)
for module in self.submodules:
for key, value in module.menu_items.items():
menu_items[key].extend(value)
menu_items = dict((key, sorted(value, key=attrgetter('priority')))
for key, value in menu_items.items())
return menu_items
def _find_blueprint():
if request.blueprint:
return current_app.blueprints[request.blueprint]
current_blueprint = LocalProxy(_find_blueprint)
def create_app(app_name=None):
# Configuration settings
import config
if not app_name:
app_name = config.APP_NAME
# Only enable password related functionality in server mode.
if config.SERVER_MODE is True:
# Some times we need to access these config params where application
# context is not available (we can't use current_app.config in those
# cases even with current_app.app_context())
# So update these params in config itself.
# And also these updated config values will picked up by application
# since we are updating config before the application instance is
# created.
config.SECURITY_RECOVERABLE = True
config.SECURITY_CHANGEABLE = True
# Now we'll open change password page in alertify dialog
# we don't want it to redirect to main page after password
# change operation so we will open the same password change page again.
config.SECURITY_POST_CHANGE_VIEW = 'browser.change_password'
"""Create the Flask application, startup logging and dynamically load
additional modules (blueprints) that are found in this directory."""
app = PgAdmin(__name__, static_url_path='/static')
# Removes unwanted whitespace from render_template function
app.jinja_env.trim_blocks = True
app.config.from_object(config)
app.config.update(dict(PROPAGATE_EXCEPTIONS=True))
##########################################################################
# Setup logging and log the application startup
##########################################################################
# Add SQL level logging, and set the base logging level
logging.addLevelName(25, 'SQL')
app.logger.setLevel(logging.DEBUG)
app.logger.handlers = []
# We also need to update the handler on the webserver in order to see
# request. Setting the level prevents werkzeug from setting up it's own
# stream handler thus ensuring all the logging goes through the pgAdmin
# logger.
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
# Set SQLITE_PATH to TEST_SQLITE_PATH while running test cases
if "PGADMIN_TESTING_MODE" in os. environ and \
os.environ["PGADMIN_TESTING_MODE"] == "1":
config.SQLITE_PATH = config.TEST_SQLITE_PATH
# Ensure the various working directories exist
from pgadmin.setup import create_app_data_directory, db_upgrade
create_app_data_directory(config)
# File logging
fh = logging.FileHandler(config.LOG_FILE, encoding='utf-8')
fh.setLevel(config.FILE_LOG_LEVEL)
fh.setFormatter(logging.Formatter(config.FILE_LOG_FORMAT))
app.logger.addHandler(fh)
logger.addHandler(fh)
# Console logging
ch = logging.StreamHandler()
ch.setLevel(config.CONSOLE_LOG_LEVEL)
ch.setFormatter(logging.Formatter(config.CONSOLE_LOG_FORMAT))
app.logger.addHandler(ch)
logger.addHandler(ch)
# Log the startup
app.logger.info('########################################################')
app.logger.info('Starting %s v%s...', config.APP_NAME, config.APP_VERSION)
app.logger.info('########################################################')
app.logger.debug("Python syspath: %s", sys.path)
##########################################################################
# Setup i18n
##########################################################################
# Initialise i18n
babel = Babel(app)
app.logger.debug('Available translations: %s' % babel.list_translations())
@babel.localeselector
def get_locale():
"""Get the language for the user."""
language = 'en'
if config.SERVER_MODE is False:
# Get the user language preference from the miscellaneous module
if current_user.is_authenticated:
user_id = current_user.id
else:
user = user_datastore.get_user(config.DESKTOP_USER)
if user is not None:
user_id = user.id
user_language = Preferences.raw_value(
'miscellaneous', 'user_language', None, user_id
)
if user_language is not None:
language = user_language
else:
# If language is available in get request then return the same
# otherwise check the session or cookie
data = request.form
if 'language' in data:
language = data['language'] or language
setattr(session, 'PGADMIN_LANGUAGE', language)
elif hasattr(session, 'PGADMIN_LANGUAGE'):
language = getattr(session, 'PGADMIN_LANGUAGE', language)
elif hasattr(request.cookies, 'PGADMIN_LANGUAGE'):
language = getattr(request.cookies, 'PGADMIN_LANGUAGE', language)
return language
##########################################################################
# Setup authentication
##########################################################################
app.config['SQLALCHEMY_DATABASE_URI'] = u'sqlite:///{0}?timeout={1}'.format(
config.SQLITE_PATH.replace(u'\\', u'/'),
getattr(config, 'SQLITE_TIMEOUT', 500)
)
# Create database connection object and mailer
db.init_app(app)
##########################################################################
# Upgrade the schema (if required)
##########################################################################
with app.app_context():
# Run migration for the first time i.e. create database
from config import SQLITE_PATH
if not os.path.exists(SQLITE_PATH):
db_upgrade(app)
else:
version = Version.query.filter_by(name='ConfigDB').first()
schema_version = version.value
# Run migration if current schema version is greater than the
# schema version stored in version table
if CURRENT_SCHEMA_VERSION >= schema_version:
db_upgrade(app)
# Update schema version to the latest
if CURRENT_SCHEMA_VERSION > schema_version:
version = Version.query.filter_by(name='ConfigDB').first()
version.value = CURRENT_SCHEMA_VERSION
db.session.commit()
Mail(app)
import pgadmin.utils.paths as paths
paths.init_app(app)
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(None, user_datastore)
##########################################################################
# Setup security
##########################################################################
with app.app_context():
config.CSRF_SESSION_KEY = Keys.query.filter_by(name = 'CSRF_SESSION_KEY').first().value
config.SECRET_KEY = Keys.query.filter_by(name = 'SECRET_KEY').first().value
config.SECURITY_PASSWORD_SALT = Keys.query.filter_by(name = 'SECURITY_PASSWORD_SALT').first().value
# Update the app.config with proper security keyes for signing CSRF data,
# signing cookies, and the SALT for hashing the passwords.
app.config.update(dict(CSRF_SESSION_KEY=config.CSRF_SESSION_KEY))
app.config.update(dict(SECRET_KEY=config.SECRET_KEY))
app.config.update(dict(SECURITY_PASSWORD_SALT=config.SECURITY_PASSWORD_SALT))
security.init_app(app, user_datastore)
app.session_interface = create_session_interface(app)
# Make the Session more secure against XSS & CSRF when running in web mode
if config.SERVER_MODE:
paranoid = Paranoid(app)
paranoid.redirect_view = 'browser.index'
##########################################################################
# Load all available server drivers
##########################################################################
driver.init_app(app)
##########################################################################
# Register language to the preferences after login
##########################################################################
@user_logged_in.connect_via(app)
def register_language(sender, user):
# After logged in, set the language in the preferences if we get from
# the login page
data = request.form
if 'language' in data:
language = data['language']
# Set the user language preference
misc_preference = Preferences.module('miscellaneous')
user_languages = misc_preference.preference(
'user_language'
)
if user_languages and language:
language = user_languages.set(language)
##########################################################################
# Register any local servers we can discover
##########################################################################
@user_logged_in.connect_via(app)
def on_user_logged_in(sender, user):
# Keep hold of the user ID
user_id = user.id
# Get the first server group for the user
servergroup_id = 1
servergroups = ServerGroup.query.filter_by(
user_id=user_id
).order_by("id")
if servergroups.count() > 0:
servergroup = servergroups.first()
servergroup_id = servergroup.id
'''Add a server to the config database'''
def add_server(user_id, servergroup_id, name, superuser, port, discovery_id, comment):
# Create a server object if needed, and store it.
servers = Server.query.filter_by(
user_id=user_id,
discovery_id=svr_discovery_id
).order_by("id")
if servers.count() > 0:
return;
svr = Server(user_id=user_id,
servergroup_id=servergroup_id,
name=name,
host='localhost',
port=port,
maintenance_db='postgres',
username=superuser,
ssl_mode='prefer',
comment=svr_comment,
discovery_id=discovery_id)
db.session.add(svr)
db.session.commit()
# Figure out what servers are present
if winreg is not None:
arch_keys = set()
proc_arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
try:
proc_arch64 = os.environ['PROCESSOR_ARCHITEW6432'].lower()
except:
proc_arch64 = None
if proc_arch == 'x86' and not proc_arch64:
arch_keys.add(0)
elif proc_arch == 'x86' or proc_arch == 'amd64':
arch_keys.add(winreg.KEY_WOW64_32KEY)
arch_keys.add(winreg.KEY_WOW64_64KEY)
for arch_key in arch_keys:
for server_type in ('PostgreSQL', 'EnterpriseDB'):
try:
root_key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\" + server_type + "\Services", 0,
winreg.KEY_READ | arch_key
)
for i in xrange(0, winreg.QueryInfoKey(root_key)[0]):
inst_id = winreg.EnumKey(root_key, i)
inst_key = winreg.OpenKey(root_key, inst_id)
svr_name = winreg.QueryValueEx(
inst_key, 'Display Name'
)[0]
svr_superuser = winreg.QueryValueEx(
inst_key, 'Database Superuser'
)[0]
svr_port = winreg.QueryValueEx(inst_key, 'Port')[0]
svr_discovery_id = inst_id
svr_comment = gettext(
"Auto-detected %s installation with the data directory at %s" % (
winreg.QueryValueEx(
inst_key, 'Display Name'
)[0],
winreg.QueryValueEx(
inst_key, 'Data Directory'
)[0]
)
)
add_server(
user_id, servergroup_id, svr_name,
svr_superuser, svr_port,
svr_discovery_id, svr_comment
)
inst_key.Close()
except:
pass
else:
# We use the postgres-winreg.ini file on non-Windows
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser # Python 2
registry = ConfigParser()
try:
registry.read('/etc/postgres-reg.ini')
sections = registry.sections()
# Loop the sections, and get the data from any that are PG or PPAS
for section in sections:
if section.startswith('PostgreSQL/') or section.startswith('EnterpriseDB/'):
svr_name = registry.get(section, 'Description')
svr_superuser = registry.get(section, 'Superuser')
svr_port = registry.getint(section, 'Port')
svr_discovery_id = section
description = registry.get(section, 'Description')
data_directory = registry.get(section, 'DataDirectory')
if hasattr(str, 'decode'):
description = description.decode('utf-8')
data_directory = data_directory.decode('utf-8')
svr_comment = gettext(u"Auto-detected %s installation with the data directory at %s" % (
description,
data_directory
))
add_server(user_id, servergroup_id, svr_name, svr_superuser, svr_port, svr_discovery_id, svr_comment)
except:
pass
@user_logged_in.connect_via(app)
@user_logged_out.connect_via(app)
def force_session_write(app, user):
session.force_write = True
##########################################################################
# Load plugin modules
##########################################################################
for module in app.find_submodules('pgadmin'):
app.logger.info('Registering blueprint module: %s' % module)
app.register_blueprint(module)
##########################################################################
# Handle the desktop login
##########################################################################
@app.before_request
def before_request():
"""Login the default user if running in desktop mode"""
# Check the auth key is valid, if it's set, and we're not in server
# mode, and it's not a help file request.
if not config.SERVER_MODE and app.PGADMIN_KEY != '':
if (
(not 'key' in request.args or request.args['key'] != app.PGADMIN_KEY) and
request.cookies.get('PGADMIN_KEY') != app.PGADMIN_KEY and
request.endpoint != 'help.static'
):
abort(401)
if not config.SERVER_MODE and not current_user.is_authenticated:
user = user_datastore.get_user(config.DESKTOP_USER)
# Throw an error if we failed to find the desktop user, to give
# the sysadmin a hint. We'll continue to try to login anyway as
# that'll through a nice 500 error for us.
if user is None:
app.logger.error(
'The desktop user %s was not found in the configuration database.'
% config.DESKTOP_USER
)
abort(401)
login_user(user)
@app.after_request
def after_request(response):
if 'key' in request.args:
response.set_cookie('PGADMIN_KEY', value=request.args['key'])
return response
##########################################################################
# Minify output
##########################################################################
# HTMLMIN doesn't work with Python 2.6.
if not config.DEBUG and sys.version_info >= (2,7):
from flask_htmlmin import HTMLMIN
HTMLMIN(app)
@app.context_processor
def inject_blueprint():
"""Inject a reference to the current blueprint, if any."""
return {
'current_app': current_app,
'current_blueprint': current_blueprint
}
##########################################################################
# All done!
##########################################################################
return app
| 39.539384
| 121
| 0.54177
|
4a16eb03edd3392ecf17460d78de98d9e1ff2dbd
| 3,087
|
py
|
Python
|
src/helpers.py
|
sul-dlss-labs/biology-fast-etds
|
a928d74fd6d2533a9ee4fa7bb8d8d3c33f0966e5
|
[
"Apache-2.0"
] | null | null | null |
src/helpers.py
|
sul-dlss-labs/biology-fast-etds
|
a928d74fd6d2533a9ee4fa7bb8d8d3c33f0966e5
|
[
"Apache-2.0"
] | null | null | null |
src/helpers.py
|
sul-dlss-labs/biology-fast-etds
|
a928d74fd6d2533a9ee4fa7bb8d8d3c33f0966e5
|
[
"Apache-2.0"
] | null | null | null |
__license__ = "Apache 2"
from streamlit.report_thread import get_report_ctx
from streamlit.hashing import _CodeHasher
from streamlit.server.server import Server
import requests
import datetime
import json
import socket
import uuid
import sys
class _SessionState:
def __init__(self, session, hash_funcs):
"""Initialize SessionState instance."""
self.__dict__["_state"] = {
"data": {},
"hash": None,
"hasher": _CodeHasher(hash_funcs),
"is_rerun": False,
"session": session,
}
def __call__(self, **kwargs):
"""Initialize state data once."""
for item, value in kwargs.items():
if item not in self._state["data"]:
self._state["data"][item] = value
def __getitem__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __getattr__(self, item):
"""Return a saved state value, None if item is undefined."""
return self._state["data"].get(item, None)
def __setitem__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def __setattr__(self, item, value):
"""Set state value."""
self._state["data"][item] = value
def clear(self):
"""Clear session state and request a rerun."""
self._state["data"].clear()
self._state["session"].request_rerun()
def sync(self):
"""Rerun the app with all state values up to date from the beginning to fix rollbacks."""
# Ensure to rerun only once to avoid infinite loops
# caused by a constantly changing state value at each run.
#
# Example: state.value += 1
if self._state["is_rerun"]:
self._state["is_rerun"] = False
elif self._state["hash"] is not None:
if self._state["hash"] != self._state["hasher"].to_bytes(self._state["data"], None):
self._state["is_rerun"] = True
self._state["session"].request_rerun()
self._state["hash"] = self._state["hasher"].to_bytes(self._state["data"], None)
def _get_session():
session_id = get_report_ctx().session_id
session_info = Server.get_current()._get_session_info(session_id)
if session_info is None:
raise RuntimeError("Couldn't get your Streamlit Session object.")
return session_info.session
def save_fast_to_druid(druid: str, fast_uris: list):
if len(fast_uris) < 1:
return
host_name = socket.gethostname()
firebase_url = f'https://bio-etd-fast.firebaseio.com/{uuid.uuid4()}.json'
data = {
"druid": druid,
"timestamp": datetime.datetime.utcnow().isoformat(),
"fast_uris": fast_uris,
"ip": socket.gethostbyname(host_name)
}
result = requests.post(firebase_url, data=json.dumps(data))
if result.status_code < 400:
return True
else:
print(f"Error saving code: {result.status_code} error: {result.text}")
sys.stdout.flush()
| 32.15625
| 97
| 0.620991
|
4a16eb3e2eaee5152fad526986d290aede1d23ad
| 4,228
|
py
|
Python
|
tests/test_tasklist.py
|
threefoldtech/0-robot
|
80b9912b77782a0d2277cbfa98cc672a5ef1e88e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tasklist.py
|
threefoldtech/0-robot
|
80b9912b77782a0d2277cbfa98cc672a5ef1e88e
|
[
"Apache-2.0"
] | 63
|
2018-08-01T13:38:44.000Z
|
2019-12-09T09:31:31.000Z
|
tests/test_tasklist.py
|
threefoldtech/0-robot
|
80b9912b77782a0d2277cbfa98cc672a5ef1e88e
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import tempfile
import unittest
from zerorobot import service_collection as scol
from zerorobot import config
from zerorobot.task import (PRIORITY_NORMAL, PRIORITY_SYSTEM, Task, TaskList,
TaskNotFoundError)
from zerorobot.template_collection import _load_template
class FakeService:
def __init__(self, guid):
self.guid = guid
self.name = guid
def foo(self):
pass
def bar(self):
pass
class TestTaskList(unittest.TestCase):
def setUp(self):
config.data_repo = config.DataRepo(tempfile.mkdtemp(prefix='0robottest'))
scol.drop_all()
tmpl = self._load_template('node')
s = tmpl(name='test')
self.tl = TaskList(s)
# ensure we have a clean start
self.tl._done.drop()
def tearDown(self):
try:
self.tl._done.close()
finally:
if os.path.exists(config.data_repo.path):
shutil.rmtree(config.data_repo.path)
def _load_template(self, name):
"""
name of the template to load from the
fixtures/templates folder
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
tmpl = _load_template("https://github.com/threefoldtech/0-robot",
os.path.join(dir_path, 'fixtures', 'templates', name))
return tmpl
def _get_tasks(self, nr):
tasks = []
for i in range(nr):
s = FakeService("s%d" % i)
t = Task(s.foo, {})
tasks.append(t)
return tasks
def test_create_list(self):
self.assertIsNotNone(self.tl._queue)
self.assertIsNotNone(self.tl._done)
self.assertTrue(self.tl.empty())
def test_put(self):
with self.assertRaises(ValueError, msg="should raise when trying to put an object that is not a Task"):
self.tl.put('string')
self.assertTrue(self.tl.empty())
srv = FakeService('s1')
t = Task(srv.foo, {})
self.tl.put(t)
self.assertFalse(self.tl.empty())
t2 = self.tl.get()
self.tl.done(t2)
self.assertEqual(t, t2, "task extracted from the list should be the same as the task added")
self.assertEqual(self.tl._done.list()[0].guid, t.guid, "task extracted from the list should be kept in the _done list")
def test_get(self):
tasks = self._get_tasks(3)
for t in tasks:
self.tl.put(t)
returned_tasks = []
while not self.tl.empty():
returned_tasks.append(self.tl.get())
self.assertEqual(returned_tasks, tasks)
def test_get_by_guid(self):
tasks = self._get_tasks(3)
for t in tasks:
self.tl.put(t)
t1 = tasks[0]
task = self.tl.get_task_by_guid(t1.guid)
self.assertEqual(task, t1)
with self.assertRaises(TaskNotFoundError):
self.tl.get_task_by_guid('1111')
def test_list(self):
tasks = self._get_tasks(2)
for t in tasks:
self.tl.put(t)
returned_tasks = self.tl.list_tasks()
self.assertEqual(returned_tasks, tasks, "listing of tasks should return all enqueued tasks")
task = self.tl.get()
self.tl.done(task)
returned_tasks = self.tl.list_tasks()
self.assertEqual(returned_tasks, tasks[1:], "listing of tasks should return all enqueued tasks")
all_tasks = [t.guid for t in self.tl.list_tasks(all=True)]
self.assertEqual(all_tasks, [t.guid for t in reversed(tasks)], "listing of all tasks should return all enqueued tasks and all done tasks")
def test_priority(self):
s1 = FakeService("s1")
s2 = FakeService("s2")
s3 = FakeService("s3")
t1 = Task(s1.foo, {})
t2 = Task(s1.foo, {})
t3 = Task(s1.foo, {})
self.tl.put(t1, priority=PRIORITY_NORMAL)
self.tl.put(t2, priority=PRIORITY_NORMAL)
self.tl.put(t3, priority=PRIORITY_SYSTEM)
tasks = []
while not self.tl.empty():
tasks.append(self.tl.get())
self.assertEqual(tasks, [t3, t1, t2], "task with higher priority should be extracted first")
| 30.417266
| 146
| 0.603122
|
4a16ed02617a828b378707c3355b14ee529d97b9
| 665
|
py
|
Python
|
tests/modules/events/retrieve/integration/test_sql_events_repository_retrieve.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 1
|
2020-04-14T18:12:11.000Z
|
2020-04-14T18:12:11.000Z
|
tests/modules/events/retrieve/integration/test_sql_events_repository_retrieve.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 3
|
2020-04-20T10:35:26.000Z
|
2020-06-15T07:45:59.000Z
|
tests/modules/events/retrieve/integration/test_sql_events_repository_retrieve.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 1
|
2021-03-12T13:48:01.000Z
|
2021-03-12T13:48:01.000Z
|
import pytest
from meiga.assertions import assert_success
@pytest.mark.integration
def test_should_retrieve_events_successfully(
taskmanager_sql_database, given_a_sql_event_repository_with_some_events
):
repository = given_a_sql_event_repository_with_some_events
result = repository.retrieve_all()
assert_success(result)
assert len(result.value) == 3
@pytest.mark.integration
def test_should_retrieve_events_successfully_when_empty(
taskmanager_sql_database, given_empty_sql_event_repository
):
repository = given_empty_sql_event_repository
result = repository.retrieve_all()
assert_success(result, value_is_equal_to=[])
| 25.576923
| 75
| 0.821053
|
4a16ed129d82417c63a9ece1240eb56c0e0d0b53
| 126,670
|
py
|
Python
|
scripts/rpc.py
|
changpe1/spdk
|
20e9265e81708bddfc8cc58009e47538cb159889
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/rpc.py
|
changpe1/spdk
|
20e9265e81708bddfc8cc58009e47538cb159889
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/rpc.py
|
changpe1/spdk
|
20e9265e81708bddfc8cc58009e47538cb159889
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from rpc.client import print_dict, print_json, JSONRPCException
from rpc.helpers import deprecated_aliases
import logging
import argparse
import rpc
import sys
import shlex
import json
try:
from shlex import quote
except ImportError:
from pipes import quote
def print_array(a):
print(" ".join((quote(v) for v in a)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='SPDK RPC command line interface')
parser.add_argument('-s', dest='server_addr',
help='RPC domain socket path or IP address', default='/var/tmp/spdk.sock')
parser.add_argument('-p', dest='port',
help='RPC port number (if server_addr is IP address)',
default=5260, type=int)
parser.add_argument('-t', dest='timeout',
help='Timeout as a floating point number expressed in seconds waiting for response. Default: 60.0',
default=60.0, type=float)
parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
help='Set verbose mode to INFO', default="ERROR")
parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
help="""Set verbose level. """)
parser.add_argument('--dry_run', dest='dry_run', action='store_true', help="Display request and exit")
parser.set_defaults(dry_run=False)
subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name')
def framework_start_init(args):
rpc.framework_start_init(args.client)
p = subparsers.add_parser('framework_start_init', aliases=['start_subsystem_init'],
help='Start initialization of subsystems')
p.set_defaults(func=framework_start_init)
def framework_wait_init(args):
rpc.framework_wait_init(args.client)
p = subparsers.add_parser('framework_wait_init', aliases=['wait_subsystem_init'],
help='Block until subsystems have been initialized')
p.set_defaults(func=framework_wait_init)
def rpc_get_methods(args):
print_dict(rpc.rpc_get_methods(args.client,
current=args.current,
include_aliases=args.include_aliases))
p = subparsers.add_parser('rpc_get_methods', aliases=['get_rpc_methods'],
help='Get list of supported RPC methods')
p.add_argument('-c', '--current', help='Get list of RPC methods only callable in the current state.', action='store_true')
p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
p.set_defaults(func=rpc_get_methods)
def spdk_get_version(args):
print_json(rpc.spdk_get_version(args.client))
p = subparsers.add_parser('spdk_get_version', aliases=['get_spdk_version'],
help='Get SPDK version')
p.set_defaults(func=spdk_get_version)
def save_config(args):
rpc.save_config(args.client,
sys.stdout,
indent=args.indent)
p = subparsers.add_parser('save_config', help="""Write current (live) configuration of SPDK subsystems and targets to stdout.
""")
p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
""", type=int, default=2)
p.set_defaults(func=save_config)
def load_config(args):
rpc.load_config(args.client, sys.stdin,
include_aliases=args.include_aliases)
p = subparsers.add_parser('load_config', help="""Configure SPDK subsystems and targets using JSON RPC read from stdin.""")
p.add_argument('-i', '--include-aliases', help='include RPC aliases', action='store_true')
p.set_defaults(func=load_config)
def save_subsystem_config(args):
rpc.save_subsystem_config(args.client,
sys.stdout,
indent=args.indent,
name=args.name)
p = subparsers.add_parser('save_subsystem_config', help="""Write current (live) configuration of SPDK subsystem to stdout.
""")
p.add_argument('-i', '--indent', help="""Indent level. Value less than 0 mean compact mode. Default indent level is 2.
""", type=int, default=2)
p.add_argument('-n', '--name', help='Name of subsystem', required=True)
p.set_defaults(func=save_subsystem_config)
def load_subsystem_config(args):
rpc.load_subsystem_config(args.client,
sys.stdin)
p = subparsers.add_parser('load_subsystem_config', help="""Configure SPDK subsystem using JSON RPC read from stdin.""")
p.set_defaults(func=load_subsystem_config)
# app
def spdk_kill_instance(args):
rpc.app.spdk_kill_instance(args.client,
sig_name=args.sig_name)
p = subparsers.add_parser('spdk_kill_instance', aliases=['kill_instance'],
help='Send signal to instance')
p.add_argument('sig_name', help='signal will be sent to server.')
p.set_defaults(func=spdk_kill_instance)
def framework_monitor_context_switch(args):
enabled = None
if args.enable:
enabled = True
if args.disable:
enabled = False
print_dict(rpc.app.framework_monitor_context_switch(args.client,
enabled=enabled))
p = subparsers.add_parser('framework_monitor_context_switch', aliases=['context_switch_monitor'],
help='Control whether the context switch monitor is enabled')
p.add_argument('-e', '--enable', action='store_true', help='Enable context switch monitoring')
p.add_argument('-d', '--disable', action='store_true', help='Disable context switch monitoring')
p.set_defaults(func=framework_monitor_context_switch)
def framework_get_reactors(args):
print_dict(rpc.app.framework_get_reactors(args.client))
p = subparsers.add_parser(
'framework_get_reactors', help='Display list of all reactors')
p.set_defaults(func=framework_get_reactors)
# bdev
def bdev_set_options(args):
rpc.bdev.bdev_set_options(args.client,
bdev_io_pool_size=args.bdev_io_pool_size,
bdev_io_cache_size=args.bdev_io_cache_size)
p = subparsers.add_parser('bdev_set_options', aliases=['set_bdev_options'],
help="""Set options of bdev subsystem""")
p.add_argument('-p', '--bdev-io-pool-size', help='Number of bdev_io structures in shared buffer pool', type=int)
p.add_argument('-c', '--bdev-io-cache-size', help='Maximum number of bdev_io structures cached per thread', type=int)
p.set_defaults(func=bdev_set_options)
def bdev_compress_create(args):
print_json(rpc.bdev.bdev_compress_create(args.client,
base_bdev_name=args.base_bdev_name,
pm_path=args.pm_path))
p = subparsers.add_parser('bdev_compress_create', aliases=['construct_compress_bdev'],
help='Add a compress vbdev')
p.add_argument('-b', '--base_bdev_name', help="Name of the base bdev")
p.add_argument('-p', '--pm_path', help="Path to persistent memory")
p.set_defaults(func=bdev_compress_create)
def bdev_compress_delete(args):
rpc.bdev.bdev_compress_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_compress_delete', aliases=['delete_compress_bdev'],
help='Delete a compress disk')
p.add_argument('name', help='compress bdev name')
p.set_defaults(func=bdev_compress_delete)
def compress_set_pmd(args):
rpc.bdev.compress_set_pmd(args.client,
pmd=args.pmd)
p = subparsers.add_parser('compress_set_pmd', aliases=['set_compress_pmd'],
help='Set pmd option for a compress disk')
p.add_argument('-p', '--pmd', type=int, help='0 = auto-select, 1= QAT only, 2 = ISAL only')
p.set_defaults(func=compress_set_pmd)
def bdev_compress_get_orphans(args):
print_dict(rpc.bdev.bdev_compress_get_orphans(args.client,
name=args.name))
p = subparsers.add_parser(
'bdev_compress_get_orphans', help='Display list of orphaned compress bdevs.')
p.add_argument('-b', '--name', help="Name of a comp bdev. Example: COMP_Nvme0n1", required=False)
p.set_defaults(func=bdev_compress_get_orphans)
def bdev_crypto_create(args):
print_json(rpc.bdev.bdev_crypto_create(args.client,
base_bdev_name=args.base_bdev_name,
name=args.name,
crypto_pmd=args.crypto_pmd,
key=args.key))
p = subparsers.add_parser('bdev_crypto_create', aliases=['construct_crypto_bdev'],
help='Add a crypto vbdev')
p.add_argument('base_bdev_name', help="Name of the base bdev")
p.add_argument('name', help="Name of the crypto vbdev")
p.add_argument('crypto_pmd', help="Name of the crypto device driver")
p.add_argument('key', help="Key")
p.set_defaults(func=bdev_crypto_create)
def bdev_crypto_delete(args):
rpc.bdev.bdev_crypto_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_crypto_delete', aliases=['delete_crypto_bdev'],
help='Delete a crypto disk')
p.add_argument('name', help='crypto bdev name')
p.set_defaults(func=bdev_crypto_delete)
def bdev_ocf_create(args):
print_json(rpc.bdev.bdev_ocf_create(args.client,
name=args.name,
mode=args.mode,
cache_bdev_name=args.cache_bdev_name,
core_bdev_name=args.core_bdev_name))
p = subparsers.add_parser('bdev_ocf_create', aliases=['construct_ocf_bdev'],
help='Add an OCF block device')
p.add_argument('name', help='Name of resulting OCF bdev')
p.add_argument('mode', help='OCF cache mode', choices=['wb', 'wt', 'pt', 'wa', 'wi', 'wo'])
p.add_argument('cache_bdev_name', help='Name of underlying cache bdev')
p.add_argument('core_bdev_name', help='Name of unerlying core bdev')
p.set_defaults(func=bdev_ocf_create)
def bdev_ocf_delete(args):
rpc.bdev.bdev_ocf_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_ocf_delete', aliases=['delete_ocf_bdev'],
help='Delete an OCF block device')
p.add_argument('name', help='Name of OCF bdev')
p.set_defaults(func=bdev_ocf_delete)
def bdev_ocf_get_stats(args):
print_dict(rpc.bdev.bdev_ocf_get_stats(args.client,
name=args.name))
p = subparsers.add_parser('bdev_ocf_get_stats', aliases=['get_ocf_stats'],
help='Get statistics of chosen OCF block device')
p.add_argument('name', help='Name of OCF bdev')
p.set_defaults(func=bdev_ocf_get_stats)
def bdev_ocf_get_bdevs(args):
print_dict(rpc.bdev.bdev_ocf_get_bdevs(args.client,
name=args.name))
p = subparsers.add_parser('bdev_ocf_get_bdevs', aliases=['get_ocf_bdevs'],
help='Get list of OCF devices including unregistered ones')
p.add_argument('name', nargs='?', default=None, help='name of OCF vbdev or name of cache device or name of core device (optional)')
p.set_defaults(func=bdev_ocf_get_bdevs)
def bdev_malloc_create(args):
num_blocks = (args.total_size * 1024 * 1024) // args.block_size
print_json(rpc.bdev.bdev_malloc_create(args.client,
num_blocks=int(num_blocks),
block_size=args.block_size,
name=args.name,
uuid=args.uuid))
p = subparsers.add_parser('bdev_malloc_create', aliases=['construct_malloc_bdev'],
help='Create a bdev with malloc backend')
p.add_argument('-b', '--name', help="Name of the bdev")
p.add_argument('-u', '--uuid', help="UUID of the bdev")
p.add_argument(
'total_size', help='Size of malloc bdev in MB (float > 0)', type=float)
p.add_argument('block_size', help='Block size for this bdev', type=int)
p.set_defaults(func=bdev_malloc_create)
def bdev_malloc_delete(args):
rpc.bdev.bdev_malloc_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_malloc_delete', aliases=['delete_malloc_bdev'],
help='Delete a malloc disk')
p.add_argument('name', help='malloc bdev name')
p.set_defaults(func=bdev_malloc_delete)
def bdev_null_create(args):
num_blocks = (args.total_size * 1024 * 1024) // args.block_size
print_json(rpc.bdev.bdev_null_create(args.client,
num_blocks=num_blocks,
block_size=args.block_size,
name=args.name,
uuid=args.uuid,
md_size=args.md_size,
dif_type=args.dif_type,
dif_is_head_of_md=args.dif_is_head_of_md))
p = subparsers.add_parser('bdev_null_create', aliases=['construct_null_bdev'],
help='Add a bdev with null backend')
p.add_argument('name', help='Block device name')
p.add_argument('-u', '--uuid', help='UUID of the bdev')
p.add_argument(
'total_size', help='Size of null bdev in MB (int > 0)', type=int)
p.add_argument('block_size', help='Block size for this bdev', type=int)
p.add_argument('-m', '--md-size', type=int,
help='Metadata size for this bdev. Default 0')
p.add_argument('-t', '--dif-type', type=int, choices=[0, 1, 2, 3],
help='Protection information type. Default: 0 - no protection')
p.add_argument('-d', '--dif-is-head-of-md', action='store_true',
help='Protection information is in the first 8 bytes of metadata. Default: in the last 8 bytes')
p.set_defaults(func=bdev_null_create)
def bdev_null_delete(args):
rpc.bdev.bdev_null_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_null_delete', aliases=['delete_null_bdev'],
help='Delete a null bdev')
p.add_argument('name', help='null bdev name')
p.set_defaults(func=bdev_null_delete)
def bdev_aio_create(args):
print_json(rpc.bdev.bdev_aio_create(args.client,
filename=args.filename,
name=args.name,
block_size=args.block_size))
p = subparsers.add_parser('bdev_aio_create', aliases=['construct_aio_bdev'],
help='Add a bdev with aio backend')
p.add_argument('filename', help='Path to device or file (ex: /dev/sda)')
p.add_argument('name', help='Block device name')
p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
p.set_defaults(func=bdev_aio_create)
def bdev_aio_delete(args):
rpc.bdev.bdev_aio_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_aio_delete', aliases=['delete_aio_bdev'],
help='Delete an aio disk')
p.add_argument('name', help='aio bdev name')
p.set_defaults(func=bdev_aio_delete)
def bdev_uring_create(args):
print_json(rpc.bdev.bdev_uring_create(args.client,
filename=args.filename,
name=args.name,
block_size=args.block_size))
p = subparsers.add_parser('bdev_uring_create', help='Create a bdev with io_uring backend')
p.add_argument('filename', help='Path to device or file (ex: /dev/nvme0n1)')
p.add_argument('name', help='bdev name')
p.add_argument('block_size', help='Block size for this bdev', type=int, nargs='?', default=0)
p.set_defaults(func=bdev_uring_create)
def bdev_uring_delete(args):
rpc.bdev.bdev_uring_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_uring_delete', help='Delete a uring bdev')
p.add_argument('name', help='uring bdev name')
p.set_defaults(func=bdev_uring_delete)
def bdev_nvme_set_options(args):
rpc.bdev.bdev_nvme_set_options(args.client,
action_on_timeout=args.action_on_timeout,
timeout_us=args.timeout_us,
retry_count=args.retry_count,
arbitration_burst=args.arbitration_burst,
low_priority_weight=args.low_priority_weight,
medium_priority_weight=args.medium_priority_weight,
high_priority_weight=args.high_priority_weight,
nvme_adminq_poll_period_us=args.nvme_adminq_poll_period_us,
nvme_ioq_poll_period_us=args.nvme_ioq_poll_period_us,
io_queue_requests=args.io_queue_requests,
delay_cmd_submit=args.delay_cmd_submit)
p = subparsers.add_parser('bdev_nvme_set_options', aliases=['set_bdev_nvme_options'],
help='Set options for the bdev nvme type. This is startup command.')
p.add_argument('-a', '--action-on-timeout',
help="Action to take on command time out. Valid valies are: none, reset, abort")
p.add_argument('-t', '--timeout-us',
help="Timeout for each command, in microseconds. If 0, don't track timeouts.", type=int)
p.add_argument('-n', '--retry-count',
help='the number of attempts per I/O when an I/O fails', type=int)
p.add_argument('--arbitration-burst',
help='the value is expressed as a power of two', type=int)
p.add_argument('--low-priority-weight',
help='the maximum number of commands that the controller may launch at one time from a low priority queue', type=int)
p.add_argument('--medium-priority-weight',
help='the maximum number of commands that the controller may launch at one time from a medium priority queue', type=int)
p.add_argument('--high-priority-weight',
help='the maximum number of commands that the controller may launch at one time from a high priority queue', type=int)
p.add_argument('-p', '--nvme-adminq-poll-period-us',
help='How often the admin queue is polled for asynchronous events', type=int)
p.add_argument('-i', '--nvme-ioq-poll-period-us',
help='How often to poll I/O queues for completions', type=int)
p.add_argument('-s', '--io-queue-requests',
help='The number of requests allocated for each NVMe I/O queue. Default: 512', type=int)
p.add_argument('-d', '--disable-delay-cmd-submit',
help='Disable delaying NVMe command submission, i.e. no batching of multiple commands',
action='store_false', dest='delay_cmd_submit', default=True)
p.set_defaults(func=bdev_nvme_set_options)
def bdev_nvme_set_hotplug(args):
rpc.bdev.bdev_nvme_set_hotplug(args.client, enable=args.enable, period_us=args.period_us)
p = subparsers.add_parser('bdev_nvme_set_hotplug', aliases=['set_bdev_nvme_hotplug'],
help='Set hotplug options for bdev nvme type.')
p.add_argument('-d', '--disable', dest='enable', default=False, action='store_false', help="Disable hotplug (default)")
p.add_argument('-e', '--enable', dest='enable', action='store_true', help="Enable hotplug")
p.add_argument('-r', '--period-us',
help='How often the hotplug is processed for insert and remove events', type=int)
p.set_defaults(func=bdev_nvme_set_hotplug)
def bdev_nvme_attach_controller(args):
print_array(rpc.bdev.bdev_nvme_attach_controller(args.client,
name=args.name,
trtype=args.trtype,
traddr=args.traddr,
adrfam=args.adrfam,
trsvcid=args.trsvcid,
subnqn=args.subnqn,
hostnqn=args.hostnqn,
hostaddr=args.hostaddr,
hostsvcid=args.hostsvcid,
prchk_reftag=args.prchk_reftag,
prchk_guard=args.prchk_guard))
p = subparsers.add_parser('bdev_nvme_attach_controller', aliases=['construct_nvme_bdev'],
help='Add bdevs with nvme backend')
p.add_argument('-b', '--name', help="Name of the NVMe controller, prefix for each bdev name", required=True)
p.add_argument('-t', '--trtype',
help='NVMe-oF target trtype: e.g., rdma, pcie', required=True)
p.add_argument('-a', '--traddr',
help='NVMe-oF target address: e.g., an ip address or BDF', required=True)
p.add_argument('-f', '--adrfam',
help='NVMe-oF target adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
p.add_argument('-s', '--trsvcid',
help='NVMe-oF target trsvcid: e.g., a port number')
p.add_argument('-n', '--subnqn', help='NVMe-oF target subnqn')
p.add_argument('-q', '--hostnqn', help='NVMe-oF host subnqn')
p.add_argument('-i', '--hostaddr',
help='NVMe-oF host address: e.g., an ip address')
p.add_argument('-c', '--hostsvcid',
help='NVMe-oF host svcid: e.g., a port number')
p.add_argument('-r', '--prchk-reftag',
help='Enable checking of PI reference tag for I/O processing.', action='store_true')
p.add_argument('-g', '--prchk-guard',
help='Enable checking of PI guard for I/O processing.', action='store_true')
p.set_defaults(func=bdev_nvme_attach_controller)
def bdev_nvme_get_controllers(args):
print_dict(rpc.nvme.bdev_nvme_get_controllers(args.client,
name=args.name))
p = subparsers.add_parser(
'bdev_nvme_get_controllers', aliases=['get_nvme_controllers'],
help='Display current NVMe controllers list or required NVMe controller')
p.add_argument('-n', '--name', help="Name of the NVMe controller. Example: Nvme0", required=False)
p.set_defaults(func=bdev_nvme_get_controllers)
def bdev_nvme_detach_controller(args):
rpc.bdev.bdev_nvme_detach_controller(args.client,
name=args.name)
p = subparsers.add_parser('bdev_nvme_detach_controller', aliases=['delete_nvme_controller'],
help='Detach an NVMe controller and delete any associated bdevs')
p.add_argument('name', help="Name of the controller")
p.set_defaults(func=bdev_nvme_detach_controller)
def bdev_nvme_cuse_register(args):
rpc.bdev.bdev_nvme_cuse_register(args.client,
name=args.name)
p = subparsers.add_parser('bdev_nvme_cuse_register',
help='Register CUSE devices on NVMe controller')
p.add_argument('-n', '--name',
help='Name of the NVMe controller. Example: Nvme0', required=True)
p.set_defaults(func=bdev_nvme_cuse_register)
def bdev_nvme_cuse_unregister(args):
rpc.bdev.bdev_nvme_cuse_unregister(args.client,
name=args.name)
p = subparsers.add_parser('bdev_nvme_cuse_unregister',
help='Unregister CUSE devices on NVMe controller')
p.add_argument('-n', '--name',
help='Name of the NVMe controller. Example: Nvme0', required=True)
p.set_defaults(func=bdev_nvme_cuse_unregister)
def bdev_zone_block_create(args):
print_json(rpc.bdev.bdev_zone_block_create(args.client,
name=args.name,
base_bdev=args.base_bdev,
zone_capacity=args.zone_capacity,
optimal_open_zones=args.optimal_open_zones))
p = subparsers.add_parser('bdev_zone_block_create',
help='Create virtual zone namespace device with block device backend')
p.add_argument('-b', '--name', help="Name of the zone device", required=True)
p.add_argument('-n', '--base-bdev', help='Name of underlying, non-zoned bdev', required=True)
p.add_argument('-z', '--zone-capacity', help='Surfaced zone capacity in blocks', type=int, required=True)
p.add_argument('-o', '--optimal-open-zones', help='Number of zones required to reach optimal write speed', type=int, required=True)
p.set_defaults(func=bdev_zone_block_create)
def bdev_zone_block_delete(args):
rpc.bdev.bdev_zone_block_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_zone_block_delete', help='Delete a virtual zone namespace device')
p.add_argument('name', help='Virtual zone bdev name')
p.set_defaults(func=bdev_zone_block_delete)
def bdev_rbd_create(args):
config = None
if args.config:
config = {}
for entry in args.config:
parts = entry.split('=', 1)
if len(parts) != 2:
raise Exception('--config %s not in key=value form' % entry)
config[parts[0]] = parts[1]
print_json(rpc.bdev.bdev_rbd_create(args.client,
name=args.name,
user=args.user,
config=config,
pool_name=args.pool_name,
rbd_name=args.rbd_name,
block_size=args.block_size))
p = subparsers.add_parser('bdev_rbd_create', aliases=['construct_rbd_bdev'],
help='Add a bdev with ceph rbd backend')
p.add_argument('-b', '--name', help="Name of the bdev", required=False)
p.add_argument('--user', help="Ceph user name (i.e. admin, not client.admin)", required=False)
p.add_argument('--config', action='append', metavar='key=value',
help="adds a key=value configuration option for rados_conf_set (default: rely on config file)")
p.add_argument('pool_name', help='rbd pool name')
p.add_argument('rbd_name', help='rbd image name')
p.add_argument('block_size', help='rbd block size', type=int)
p.set_defaults(func=bdev_rbd_create)
def bdev_rbd_delete(args):
rpc.bdev.bdev_rbd_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_rbd_delete', aliases=['delete_rbd_bdev'],
help='Delete a rbd bdev')
p.add_argument('name', help='rbd bdev name')
p.set_defaults(func=bdev_rbd_delete)
def bdev_delay_create(args):
print_json(rpc.bdev.bdev_delay_create(args.client,
base_bdev_name=args.base_bdev_name,
name=args.name,
avg_read_latency=args.avg_read_latency,
p99_read_latency=args.nine_nine_read_latency,
avg_write_latency=args.avg_write_latency,
p99_write_latency=args.nine_nine_write_latency))
p = subparsers.add_parser('bdev_delay_create',
help='Add a delay bdev on existing bdev')
p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
p.add_argument('-d', '--name', help="Name of the delay bdev", required=True)
p.add_argument('-r', '--avg-read-latency',
help="Average latency to apply before completing read ops (in microseconds)", required=True, type=int)
p.add_argument('-t', '--nine-nine-read-latency',
help="latency to apply to 1 in 100 read ops (in microseconds)", required=True, type=int)
p.add_argument('-w', '--avg-write-latency',
help="Average latency to apply before completing write ops (in microseconds)", required=True, type=int)
p.add_argument('-n', '--nine-nine-write-latency',
help="latency to apply to 1 in 100 write ops (in microseconds)", required=True, type=int)
p.set_defaults(func=bdev_delay_create)
def bdev_delay_delete(args):
rpc.bdev.bdev_delay_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_delay_delete', help='Delete a delay bdev')
p.add_argument('name', help='delay bdev name')
p.set_defaults(func=bdev_delay_delete)
def bdev_delay_update_latency(args):
print_json(rpc.bdev.bdev_delay_update_latency(args.client,
delay_bdev_name=args.delay_bdev_name,
latency_type=args.latency_type,
latency_us=args.latency_us))
p = subparsers.add_parser('bdev_delay_update_latency',
help='Update one of the latency values for a given delay bdev')
p.add_argument('delay_bdev_name', help='The name of the given delay bdev')
p.add_argument('latency_type', help='one of: avg_read, avg_write, p99_read, p99_write. No other values accepted.')
p.add_argument('latency_us', help='new latency value in microseconds.', type=int)
p.set_defaults(func=bdev_delay_update_latency)
def bdev_error_create(args):
print_json(rpc.bdev.bdev_error_create(args.client,
base_name=args.base_name))
p = subparsers.add_parser('bdev_error_create', aliases=['construct_error_bdev'],
help='Add bdev with error injection backend')
p.add_argument('base_name', help='base bdev name')
p.set_defaults(func=bdev_error_create)
def bdev_error_delete(args):
rpc.bdev.bdev_error_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_error_delete', aliases=['delete_error_bdev'],
help='Delete an error bdev')
p.add_argument('name', help='error bdev name')
p.set_defaults(func=bdev_error_delete)
def bdev_iscsi_create(args):
print_json(rpc.bdev.bdev_iscsi_create(args.client,
name=args.name,
url=args.url,
initiator_iqn=args.initiator_iqn))
p = subparsers.add_parser('bdev_iscsi_create', aliases=['construct_iscsi_bdev'],
help='Add bdev with iSCSI initiator backend')
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
p.add_argument('-i', '--initiator-iqn', help="Initiator IQN", required=True)
p.add_argument('--url', help="iSCSI Lun URL", required=True)
p.set_defaults(func=bdev_iscsi_create)
def bdev_iscsi_delete(args):
rpc.bdev.bdev_iscsi_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_iscsi_delete', aliases=['delete_iscsi_bdev'],
help='Delete an iSCSI bdev')
p.add_argument('name', help='iSCSI bdev name')
p.set_defaults(func=bdev_iscsi_delete)
def bdev_pmem_create(args):
print_json(rpc.bdev.bdev_pmem_create(args.client,
pmem_file=args.pmem_file,
name=args.name))
p = subparsers.add_parser('bdev_pmem_create', aliases=['construct_pmem_bdev'],
help='Add a bdev with pmem backend')
p.add_argument('pmem_file', help='Path to pmemblk pool file')
p.add_argument('-n', '--name', help='Block device name', required=True)
p.set_defaults(func=bdev_pmem_create)
def bdev_pmem_delete(args):
rpc.bdev.bdev_pmem_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_pmem_delete', aliases=['delete_pmem_bdev'],
help='Delete a pmem bdev')
p.add_argument('name', help='pmem bdev name')
p.set_defaults(func=bdev_pmem_delete)
def bdev_passthru_create(args):
print_json(rpc.bdev.bdev_passthru_create(args.client,
base_bdev_name=args.base_bdev_name,
name=args.name))
p = subparsers.add_parser('bdev_passthru_create', aliases=['construct_passthru_bdev'],
help='Add a pass through bdev on existing bdev')
p.add_argument('-b', '--base-bdev-name', help="Name of the existing bdev", required=True)
p.add_argument('-p', '--name', help="Name of the pass through bdev", required=True)
p.set_defaults(func=bdev_passthru_create)
def bdev_passthru_delete(args):
rpc.bdev.bdev_passthru_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_passthru_delete', aliases=['delete_passthru_bdev'],
help='Delete a pass through bdev')
p.add_argument('name', help='pass through bdev name')
p.set_defaults(func=bdev_passthru_delete)
def bdev_get_bdevs(args):
print_dict(rpc.bdev.bdev_get_bdevs(args.client,
name=args.name))
p = subparsers.add_parser('bdev_get_bdevs', aliases=['get_bdevs'],
help='Display current blockdev list or required blockdev')
p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
p.set_defaults(func=bdev_get_bdevs)
def bdev_get_iostat(args):
print_dict(rpc.bdev.bdev_get_iostat(args.client,
name=args.name))
p = subparsers.add_parser('bdev_get_iostat', aliases=['get_bdevs_iostat'],
help='Display current I/O statistics of all the blockdevs or required blockdev.')
p.add_argument('-b', '--name', help="Name of the Blockdev. Example: Nvme0n1", required=False)
p.set_defaults(func=bdev_get_iostat)
def bdev_enable_histogram(args):
rpc.bdev.bdev_enable_histogram(args.client, name=args.name, enable=args.enable)
p = subparsers.add_parser('bdev_enable_histogram', aliases=['enable_bdev_histogram'],
help='Enable or disable histogram for specified bdev')
p.add_argument('-e', '--enable', default=True, dest='enable', action='store_true', help='Enable histograms on specified device')
p.add_argument('-d', '--disable', dest='enable', action='store_false', help='Disable histograms on specified device')
p.add_argument('name', help='bdev name')
p.set_defaults(func=bdev_enable_histogram)
def bdev_get_histogram(args):
print_dict(rpc.bdev.bdev_get_histogram(args.client, name=args.name))
p = subparsers.add_parser('bdev_get_histogram', aliases=['get_bdev_histogram'],
help='Get histogram for specified bdev')
p.add_argument('name', help='bdev name')
p.set_defaults(func=bdev_get_histogram)
def bdev_set_qd_sampling_period(args):
rpc.bdev.bdev_set_qd_sampling_period(args.client,
name=args.name,
period=args.period)
p = subparsers.add_parser('bdev_set_qd_sampling_period', aliases=['set_bdev_qd_sampling_period'],
help="Enable or disable tracking of a bdev's queue depth.")
p.add_argument('name', help='Blockdev name. Example: Malloc0')
p.add_argument('period', help='Period with which to poll the block device queue depth in microseconds.'
' If set to 0, polling will be disabled.',
type=int)
p.set_defaults(func=bdev_set_qd_sampling_period)
def bdev_set_qos_limit(args):
rpc.bdev.bdev_set_qos_limit(args.client,
name=args.name,
rw_ios_per_sec=args.rw_ios_per_sec,
rw_mbytes_per_sec=args.rw_mbytes_per_sec,
r_mbytes_per_sec=args.r_mbytes_per_sec,
w_mbytes_per_sec=args.w_mbytes_per_sec)
p = subparsers.add_parser('bdev_set_qos_limit', aliases=['set_bdev_qos_limit'],
help='Set QoS rate limit on a blockdev')
p.add_argument('name', help='Blockdev name to set QoS. Example: Malloc0')
p.add_argument('--rw_ios_per_sec',
help='R/W IOs per second limit (>=10000, example: 20000). 0 means unlimited.',
type=int, required=False)
p.add_argument('--rw_mbytes_per_sec',
help="R/W megabytes per second limit (>=10, example: 100). 0 means unlimited.",
type=int, required=False)
p.add_argument('--r_mbytes_per_sec',
help="Read megabytes per second limit (>=10, example: 100). 0 means unlimited.",
type=int, required=False)
p.add_argument('--w_mbytes_per_sec',
help="Write megabytes per second limit (>=10, example: 100). 0 means unlimited.",
type=int, required=False)
p.set_defaults(func=bdev_set_qos_limit)
def bdev_error_inject_error(args):
rpc.bdev.bdev_error_inject_error(args.client,
name=args.name,
io_type=args.io_type,
error_type=args.error_type,
num=args.num)
p = subparsers.add_parser('bdev_error_inject_error', aliases=['bdev_inject_error'],
help='bdev inject error')
p.add_argument('name', help="""the name of the error injection bdev""")
p.add_argument('io_type', help="""io_type: 'clear' 'read' 'write' 'unmap' 'flush' 'all'""")
p.add_argument('error_type', help="""error_type: 'failure' 'pending'""")
p.add_argument(
'-n', '--num', help='the number of commands you want to fail', type=int, default=1)
p.set_defaults(func=bdev_error_inject_error)
def bdev_nvme_apply_firmware(args):
print_dict(rpc.bdev.bdev_nvme_apply_firmware(args.client,
bdev_name=args.bdev_name,
filename=args.filename))
p = subparsers.add_parser('apply_firmware', aliases=['apply_firmware'],
help='Download and commit firmware to NVMe device')
p.add_argument('filename', help='filename of the firmware to download')
p.add_argument('bdev_name', help='name of the NVMe device')
p.set_defaults(func=bdev_nvme_apply_firmware)
# iSCSI
def iscsi_set_options(args):
rpc.iscsi.iscsi_set_options(
args.client,
auth_file=args.auth_file,
node_base=args.node_base,
nop_timeout=args.nop_timeout,
nop_in_interval=args.nop_in_interval,
disable_chap=args.disable_chap,
require_chap=args.require_chap,
mutual_chap=args.mutual_chap,
chap_group=args.chap_group,
max_sessions=args.max_sessions,
max_queue_depth=args.max_queue_depth,
max_connections_per_session=args.max_connections_per_session,
default_time2wait=args.default_time2wait,
default_time2retain=args.default_time2retain,
first_burst_length=args.first_burst_length,
immediate_data=args.immediate_data,
error_recovery_level=args.error_recovery_level,
allow_duplicated_isid=args.allow_duplicated_isid)
p = subparsers.add_parser('iscsi_set_options', aliases=['set_iscsi_options'],
help="""Set options of iSCSI subsystem""")
p.add_argument('-f', '--auth-file', help='Path to CHAP shared secret file')
p.add_argument('-b', '--node-base', help='Prefix of the name of iSCSI target node')
p.add_argument('-o', '--nop-timeout', help='Timeout in seconds to nop-in request to the initiator', type=int)
p.add_argument('-n', '--nop-in-interval', help='Time interval in secs between nop-in requests by the target', type=int)
p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
*** Mutually exclusive with --require-chap""", action='store_true')
p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
*** Mutually exclusive with --disable-chap""", action='store_true')
p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
*** Authentication group must be precreated ***""", type=int)
p.add_argument('-a', '--max-sessions', help='Maximum number of sessions in the host.', type=int)
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/Os per queue.', type=int)
p.add_argument('-c', '--max-connections-per-session', help='Negotiated parameter, MaxConnections.', type=int)
p.add_argument('-w', '--default-time2wait', help='Negotiated parameter, DefaultTime2Wait.', type=int)
p.add_argument('-v', '--default-time2retain', help='Negotiated parameter, DefaultTime2Retain.', type=int)
p.add_argument('-s', '--first-burst-length', help='Negotiated parameter, FirstBurstLength.', type=int)
p.add_argument('-i', '--immediate-data', help='Negotiated parameter, ImmediateData.', action='store_true')
p.add_argument('-l', '--error-recovery-level', help='Negotiated parameter, ErrorRecoveryLevel', type=int)
p.add_argument('-p', '--allow-duplicated-isid', help='Allow duplicated initiator session ID.', action='store_true')
p.set_defaults(func=iscsi_set_options)
def iscsi_set_discovery_auth(args):
rpc.iscsi.iscsi_set_discovery_auth(
args.client,
disable_chap=args.disable_chap,
require_chap=args.require_chap,
mutual_chap=args.mutual_chap,
chap_group=args.chap_group)
p = subparsers.add_parser('iscsi_set_discovery_auth', aliases=['set_iscsi_discovery_auth'],
help="""Set CHAP authentication for discovery session.""")
p.add_argument('-d', '--disable-chap', help="""CHAP for discovery session should be disabled.
*** Mutually exclusive with --require-chap""", action='store_true')
p.add_argument('-r', '--require-chap', help="""CHAP for discovery session should be required.
*** Mutually exclusive with --disable-chap""", action='store_true')
p.add_argument('-m', '--mutual-chap', help='CHAP for discovery session should be mutual', action='store_true')
p.add_argument('-g', '--chap-group', help="""Authentication group ID for discovery session.
*** Authentication group must be precreated ***""", type=int)
p.set_defaults(func=iscsi_set_discovery_auth)
def iscsi_create_auth_group(args):
secrets = None
if args.secrets:
secrets = [dict(u.split(":") for u in a.split(" ")) for a in args.secrets.split(",")]
rpc.iscsi.iscsi_create_auth_group(args.client, tag=args.tag, secrets=secrets)
p = subparsers.add_parser('iscsi_create_auth_group', aliases=['add_iscsi_auth_group'],
help='Create authentication group for CHAP authentication.')
p.add_argument('tag', help='Authentication group tag (unique, integer > 0).', type=int)
p.add_argument('-c', '--secrets', help="""Comma-separated list of CHAP secrets
<user:user_name secret:chap_secret muser:mutual_user_name msecret:mutual_chap_secret> enclosed in quotes.
Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 msecret:ms2'""", required=False)
p.set_defaults(func=iscsi_create_auth_group)
def iscsi_delete_auth_group(args):
rpc.iscsi.iscsi_delete_auth_group(args.client, tag=args.tag)
p = subparsers.add_parser('iscsi_delete_auth_group', aliases=['delete_iscsi_auth_group'],
help='Delete an authentication group.')
p.add_argument('tag', help='Authentication group tag', type=int)
p.set_defaults(func=iscsi_delete_auth_group)
def iscsi_auth_group_add_secret(args):
rpc.iscsi.iscsi_auth_group_add_secret(
args.client,
tag=args.tag,
user=args.user,
secret=args.secret,
muser=args.muser,
msecret=args.msecret)
p = subparsers.add_parser('iscsi_auth_group_add_secret', aliases=['add_secret_to_iscsi_auth_group'],
help='Add a secret to an authentication group.')
p.add_argument('tag', help='Authentication group tag', type=int)
p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
p.add_argument('-s', '--secret', help='Secret for one-way CHAP authentication', required=True)
p.add_argument('-m', '--muser', help='User name for mutual CHAP authentication')
p.add_argument('-r', '--msecret', help='Secret for mutual CHAP authentication')
p.set_defaults(func=iscsi_auth_group_add_secret)
def iscsi_auth_group_remove_secret(args):
rpc.iscsi.iscsi_auth_group_remove_secret(args.client, tag=args.tag, user=args.user)
p = subparsers.add_parser('iscsi_auth_group_remove_secret', aliases=['delete_secret_from_iscsi_auth_group'],
help='Remove a secret from an authentication group.')
p.add_argument('tag', help='Authentication group tag', type=int)
p.add_argument('-u', '--user', help='User name for one-way CHAP authentication', required=True)
p.set_defaults(func=iscsi_auth_group_remove_secret)
def iscsi_get_auth_groups(args):
print_dict(rpc.iscsi.iscsi_get_auth_groups(args.client))
p = subparsers.add_parser('iscsi_get_auth_groups', aliases=['get_iscsi_auth_groups'],
help='Display current authentication group configuration')
p.set_defaults(func=iscsi_get_auth_groups)
def iscsi_get_portal_groups(args):
print_dict(rpc.iscsi.iscsi_get_portal_groups(args.client))
p = subparsers.add_parser(
'iscsi_get_portal_groups', aliases=['get_portal_groups'],
help='Display current portal group configuration')
p.set_defaults(func=iscsi_get_portal_groups)
def iscsi_get_initiator_groups(args):
print_dict(rpc.iscsi.iscsi_get_initiator_groups(args.client))
p = subparsers.add_parser('iscsi_get_initiator_groups',
aliases=['get_initiator_groups'],
help='Display current initiator group configuration')
p.set_defaults(func=iscsi_get_initiator_groups)
def iscsi_get_target_nodes(args):
print_dict(rpc.iscsi.iscsi_get_target_nodes(args.client))
p = subparsers.add_parser('iscsi_get_target_nodes', aliases=['get_target_nodes'],
help='Display target nodes')
p.set_defaults(func=iscsi_get_target_nodes)
def iscsi_create_target_node(args):
luns = []
for u in args.bdev_name_id_pairs.strip().split(" "):
bdev_name, lun_id = u.split(":")
luns.append({"bdev_name": bdev_name, "lun_id": int(lun_id)})
pg_ig_maps = []
for u in args.pg_ig_mappings.strip().split(" "):
pg, ig = u.split(":")
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
rpc.iscsi.iscsi_create_target_node(
args.client,
luns=luns,
pg_ig_maps=pg_ig_maps,
name=args.name,
alias_name=args.alias_name,
queue_depth=args.queue_depth,
chap_group=args.chap_group,
disable_chap=args.disable_chap,
require_chap=args.require_chap,
mutual_chap=args.mutual_chap,
header_digest=args.header_digest,
data_digest=args.data_digest)
p = subparsers.add_parser('iscsi_create_target_node', aliases=['construct_target_node'],
help='Add a target node')
p.add_argument('name', help='Target node name (ASCII)')
p.add_argument('alias_name', help='Target node alias name (ASCII)')
p.add_argument('bdev_name_id_pairs', help="""Whitespace-separated list of <bdev name:LUN ID> pairs enclosed
in quotes. Format: 'bdev_name0:id0 bdev_name1:id1' etc
Example: 'Malloc0:0 Malloc1:1 Malloc5:2'
*** The bdevs must pre-exist ***
*** LUN0 (id = 0) is required ***
*** bdevs names cannot contain space or colon characters ***""")
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
Whitespace separated, quoted, mapping defined with colon
separated list of "tags" (int > 0)
Example: '1:1 2:2 2:1'
*** The Portal/Initiator Groups must be precreated ***""")
p.add_argument('queue_depth', help='Desired target queue depth', type=int)
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
*** Authentication group must be precreated ***""", type=int, default=0)
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
*** Mutually exclusive with --require-chap ***""", action='store_true')
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
*** Mutually exclusive with --disable-chap ***""", action='store_true')
p.add_argument(
'-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.', action='store_true')
p.add_argument('-H', '--header-digest',
help='Header Digest should be required for this target node.', action='store_true')
p.add_argument('-D', '--data-digest',
help='Data Digest should be required for this target node.', action='store_true')
p.set_defaults(func=iscsi_create_target_node)
def iscsi_target_node_add_lun(args):
rpc.iscsi.iscsi_target_node_add_lun(
args.client,
name=args.name,
bdev_name=args.bdev_name,
lun_id=args.lun_id)
p = subparsers.add_parser('iscsi_target_node_add_lun', aliases=['target_node_add_lun'],
help='Add LUN to the target node')
p.add_argument('name', help='Target node name (ASCII)')
p.add_argument('bdev_name', help="""bdev name enclosed in quotes.
*** bdev name cannot contain space or colon characters ***""")
p.add_argument('-i', dest='lun_id', help="""LUN ID (integer >= 0)
*** If LUN ID is omitted or -1, the lowest free one is assigned ***""", type=int, required=False)
p.set_defaults(func=iscsi_target_node_add_lun)
def iscsi_target_node_set_auth(args):
rpc.iscsi.iscsi_target_node_set_auth(
args.client,
name=args.name,
chap_group=args.chap_group,
disable_chap=args.disable_chap,
require_chap=args.require_chap,
mutual_chap=args.mutual_chap)
p = subparsers.add_parser('iscsi_target_node_set_auth', aliases=['set_iscsi_target_node_auth'],
help='Set CHAP authentication for the target node')
p.add_argument('name', help='Target node name (ASCII)')
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this target node.
*** Authentication group must be precreated ***""", type=int, default=0)
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this target node.
*** Mutually exclusive with --require-chap ***""", action='store_true')
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this target node.
*** Mutually exclusive with --disable-chap ***""", action='store_true')
p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
action='store_true')
p.set_defaults(func=iscsi_target_node_set_auth)
def iscsi_target_node_add_pg_ig_maps(args):
pg_ig_maps = []
for u in args.pg_ig_mappings.strip().split(" "):
pg, ig = u.split(":")
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
rpc.iscsi.iscsi_target_node_add_pg_ig_maps(
args.client,
pg_ig_maps=pg_ig_maps,
name=args.name)
p = subparsers.add_parser('iscsi_target_node_add_pg_ig_maps',
aliases=['add_pg_ig_maps'],
help='Add PG-IG maps to the target node')
p.add_argument('name', help='Target node name (ASCII)')
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
Whitespace separated, quoted, mapping defined with colon
separated list of "tags" (int > 0)
Example: '1:1 2:2 2:1'
*** The Portal/Initiator Groups must be precreated ***""")
p.set_defaults(func=iscsi_target_node_add_pg_ig_maps)
def iscsi_target_node_remove_pg_ig_maps(args):
pg_ig_maps = []
for u in args.pg_ig_mappings.strip().split(" "):
pg, ig = u.split(":")
pg_ig_maps.append({"pg_tag": int(pg), "ig_tag": int(ig)})
rpc.iscsi.iscsi_target_node_remove_pg_ig_maps(
args.client, pg_ig_maps=pg_ig_maps, name=args.name)
p = subparsers.add_parser('iscsi_target_node_remove_pg_ig_maps',
aliases=['delete_pg_ig_maps'],
help='Delete PG-IG maps from the target node')
p.add_argument('name', help='Target node name (ASCII)')
p.add_argument('pg_ig_mappings', help="""List of (Portal_Group_Tag:Initiator_Group_Tag) mappings
Whitespace separated, quoted, mapping defined with colon
separated list of "tags" (int > 0)
Example: '1:1 2:2 2:1'
*** The Portal/Initiator Groups must be precreated ***""")
p.set_defaults(func=iscsi_target_node_remove_pg_ig_maps)
def iscsi_create_portal_group(args):
portals = []
for p in args.portal_list.strip().split(' '):
ip, separator, port_cpumask = p.rpartition(':')
split_port_cpumask = port_cpumask.split('@')
if len(split_port_cpumask) == 1:
port = port_cpumask
portals.append({'host': ip, 'port': port})
else:
port = split_port_cpumask[0]
cpumask = split_port_cpumask[1]
portals.append({'host': ip, 'port': port})
print("WARNING: Specifying a portal group with a CPU mask is no longer supported. Ignoring it.")
rpc.iscsi.iscsi_create_portal_group(
args.client,
portals=portals,
tag=args.tag)
p = subparsers.add_parser('iscsi_create_portal_group', aliases=['add_portal_group'],
help='Add a portal group')
p.add_argument(
'tag', help='Portal group tag (unique, integer > 0)', type=int)
p.add_argument('portal_list', help="""List of portals in host:port format, separated by whitespace
Example: '192.168.100.100:3260 192.168.100.100:3261 192.168.100.100:3262""")
p.set_defaults(func=iscsi_create_portal_group)
def iscsi_create_initiator_group(args):
initiators = []
netmasks = []
for i in args.initiator_list.strip().split(' '):
initiators.append(i)
for n in args.netmask_list.strip().split(' '):
netmasks.append(n)
rpc.iscsi.iscsi_create_initiator_group(
args.client,
tag=args.tag,
initiators=initiators,
netmasks=netmasks)
p = subparsers.add_parser('iscsi_create_initiator_group', aliases=['add_initiator_group'],
help='Add an initiator group')
p.add_argument(
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
p.add_argument('initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
enclosed in quotes. Example: 'ANY' or '127.0.0.1 192.168.200.100'""")
p.add_argument('netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
Example: '255.255.0.0 255.248.0.0' etc""")
p.set_defaults(func=iscsi_create_initiator_group)
def iscsi_initiator_group_add_initiators(args):
initiators = None
netmasks = None
if args.initiator_list:
initiators = []
for i in args.initiator_list.strip().split(' '):
initiators.append(i)
if args.netmask_list:
netmasks = []
for n in args.netmask_list.strip().split(' '):
netmasks.append(n)
rpc.iscsi.iscsi_initiator_group_add_initiators(
args.client,
tag=args.tag,
initiators=initiators,
netmasks=netmasks)
p = subparsers.add_parser('iscsi_initiator_group_add_initiators',
aliases=['add_initiators_to_initiator_group'],
help='Add initiators to an existing initiator group')
p.add_argument(
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
p.set_defaults(func=iscsi_initiator_group_add_initiators)
def iscsi_initiator_group_remove_initiators(args):
initiators = None
netmasks = None
if args.initiator_list:
initiators = []
for i in args.initiator_list.strip().split(' '):
initiators.append(i)
if args.netmask_list:
netmasks = []
for n in args.netmask_list.strip().split(' '):
netmasks.append(n)
rpc.iscsi.iscsi_initiator_group_remove_initiators(
args.client,
tag=args.tag,
initiators=initiators,
netmasks=netmasks)
p = subparsers.add_parser('iscsi_initiator_group_remove_initiators',
aliases=['delete_initiators_from_initiator_group'],
help='Delete initiators from an existing initiator group')
p.add_argument(
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
p.add_argument('-n', dest='initiator_list', help="""Whitespace-separated list of initiator hostnames or IP addresses,
enclosed in quotes. This parameter can be omitted. Example: 'ANY' or '127.0.0.1 192.168.200.100'""", required=False)
p.add_argument('-m', dest='netmask_list', help="""Whitespace-separated list of initiator netmasks enclosed in quotes.
This parameter can be omitted. Example: '255.255.0.0 255.248.0.0' etc""", required=False)
p.set_defaults(func=iscsi_initiator_group_remove_initiators)
def iscsi_delete_target_node(args):
rpc.iscsi.iscsi_delete_target_node(
args.client, target_node_name=args.target_node_name)
p = subparsers.add_parser('iscsi_delete_target_node', aliases=['delete_target_node'],
help='Delete a target node')
p.add_argument('target_node_name',
help='Target node name to be deleted. Example: iqn.2016-06.io.spdk:disk1.')
p.set_defaults(func=iscsi_delete_target_node)
def iscsi_delete_portal_group(args):
rpc.iscsi.iscsi_delete_portal_group(args.client, tag=args.tag)
p = subparsers.add_parser('iscsi_delete_portal_group',
aliases=['delete_portal_group'],
help='Delete a portal group')
p.add_argument(
'tag', help='Portal group tag (unique, integer > 0)', type=int)
p.set_defaults(func=iscsi_delete_portal_group)
def iscsi_delete_initiator_group(args):
rpc.iscsi.iscsi_delete_initiator_group(args.client, tag=args.tag)
p = subparsers.add_parser('iscsi_delete_initiator_group',
aliases=['delete_initiator_group'],
help='Delete an initiator group')
p.add_argument(
'tag', help='Initiator group tag (unique, integer > 0)', type=int)
p.set_defaults(func=iscsi_delete_initiator_group)
def iscsi_portal_group_set_auth(args):
rpc.iscsi.iscsi_portal_group_set_auth(
args.client,
tag=args.tag,
chap_group=args.chap_group,
disable_chap=args.disable_chap,
require_chap=args.require_chap,
mutual_chap=args.mutual_chap)
p = subparsers.add_parser('iscsi_portal_group_set_auth',
help='Set CHAP authentication for discovery sessions specific for the portal group')
p.add_argument('tag', help='Portal group tag (unique, integer > 0)', type=int)
p.add_argument('-g', '--chap-group', help="""Authentication group ID for this portal group.
*** Authentication group must be precreated ***""", type=int, default=0)
p.add_argument('-d', '--disable-chap', help="""CHAP authentication should be disabled for this portal group.
*** Mutually exclusive with --require-chap ***""", action='store_true')
p.add_argument('-r', '--require-chap', help="""CHAP authentication should be required for this portal group.
*** Mutually exclusive with --disable-chap ***""", action='store_true')
p.add_argument('-m', '--mutual-chap', help='CHAP authentication should be mutual/bidirectional.',
action='store_true')
p.set_defaults(func=iscsi_portal_group_set_auth)
def iscsi_get_connections(args):
print_dict(rpc.iscsi.iscsi_get_connections(args.client))
p = subparsers.add_parser('iscsi_get_connections', aliases=['get_iscsi_connections'],
help='Display iSCSI connections')
p.set_defaults(func=iscsi_get_connections)
def iscsi_get_options(args):
print_dict(rpc.iscsi.iscsi_get_options(args.client))
p = subparsers.add_parser('iscsi_get_options', aliases=['get_iscsi_global_params'],
help='Display iSCSI global parameters')
p.set_defaults(func=iscsi_get_options)
def scsi_get_devices(args):
print_dict(rpc.iscsi.scsi_get_devices(args.client))
p = subparsers.add_parser('scsi_get_devices', aliases=['get_scsi_devices'],
help='Display SCSI devices')
p.set_defaults(func=scsi_get_devices)
# trace
def trace_enable_tpoint_group(args):
rpc.trace.trace_enable_tpoint_group(args.client, name=args.name)
p = subparsers.add_parser('trace_enable_tpoint_group', aliases=['enable_tpoint_group'],
help='enable trace on a specific tpoint group')
p.add_argument(
'name', help="""trace group name we want to enable in tpoint_group_mask.
(for example "bdev" for bdev trace group, "all" for all trace groups).""")
p.set_defaults(func=trace_enable_tpoint_group)
def trace_disable_tpoint_group(args):
rpc.trace.trace_disable_tpoint_group(args.client, name=args.name)
p = subparsers.add_parser('trace_disable_tpoint_group', aliases=['disable_tpoint_group'],
help='disable trace on a specific tpoint group')
p.add_argument(
'name', help="""trace group name we want to disable in tpoint_group_mask.
(for example "bdev" for bdev trace group, "all" for all trace groups).""")
p.set_defaults(func=trace_disable_tpoint_group)
def trace_get_tpoint_group_mask(args):
print_dict(rpc.trace.trace_get_tpoint_group_mask(args.client))
p = subparsers.add_parser('trace_get_tpoint_group_mask', aliases=['get_tpoint_group_mask'],
help='get trace point group mask')
p.set_defaults(func=trace_get_tpoint_group_mask)
# log
def log_set_flag(args):
rpc.log.log_set_flag(args.client, flag=args.flag)
p = subparsers.add_parser('log_set_flag', help='set log flag', aliases=['set_log_flag'])
p.add_argument(
'flag', help='log flag we want to set. (for example "nvme").')
p.set_defaults(func=log_set_flag)
def log_clear_flag(args):
rpc.log.log_clear_flag(args.client, flag=args.flag)
p = subparsers.add_parser('log_clear_flag', help='clear log flag', aliases=['clear_log_flag'])
p.add_argument(
'flag', help='log flag we want to clear. (for example "nvme").')
p.set_defaults(func=log_clear_flag)
def log_get_flags(args):
print_dict(rpc.log.log_get_flags(args.client))
p = subparsers.add_parser('log_get_flags', help='get log flags', aliases=['get_log_flags'])
p.set_defaults(func=log_get_flags)
def log_set_level(args):
rpc.log.log_set_level(args.client, level=args.level)
p = subparsers.add_parser('log_set_level', aliases=['set_log_level'],
help='set log level')
p.add_argument('level', help='log level we want to set. (for example "DEBUG").')
p.set_defaults(func=log_set_level)
def log_get_level(args):
print_dict(rpc.log.log_get_level(args.client))
p = subparsers.add_parser('log_get_level', aliases=['get_log_level'],
help='get log level')
p.set_defaults(func=log_get_level)
def log_set_print_level(args):
rpc.log.log_set_print_level(args.client, level=args.level)
p = subparsers.add_parser('log_set_print_level', aliases=['set_log_print_level'],
help='set log print level')
p.add_argument('level', help='log print level we want to set. (for example "DEBUG").')
p.set_defaults(func=log_set_print_level)
def log_get_print_level(args):
print_dict(rpc.log.log_get_print_level(args.client))
p = subparsers.add_parser('log_get_print_level', aliases=['get_log_print_level'],
help='get log print level')
p.set_defaults(func=log_get_print_level)
# lvol
def bdev_lvol_create_lvstore(args):
print_json(rpc.lvol.bdev_lvol_create_lvstore(args.client,
bdev_name=args.bdev_name,
lvs_name=args.lvs_name,
cluster_sz=args.cluster_sz,
clear_method=args.clear_method))
p = subparsers.add_parser('bdev_lvol_create_lvstore', aliases=['construct_lvol_store'],
help='Add logical volume store on base bdev')
p.add_argument('bdev_name', help='base bdev name')
p.add_argument('lvs_name', help='name for lvol store')
p.add_argument('-c', '--cluster-sz', help='size of cluster (in bytes)', type=int, required=False)
p.add_argument('--clear-method', help="""Change clear method for data region.
Available: none, unmap, write_zeroes""", required=False)
p.set_defaults(func=bdev_lvol_create_lvstore)
def bdev_lvol_rename_lvstore(args):
rpc.lvol.bdev_lvol_rename_lvstore(args.client,
old_name=args.old_name,
new_name=args.new_name)
p = subparsers.add_parser('bdev_lvol_rename_lvstore', aliases=['rename_lvol_store'],
help='Change logical volume store name')
p.add_argument('old_name', help='old name')
p.add_argument('new_name', help='new name')
p.set_defaults(func=bdev_lvol_rename_lvstore)
def bdev_lvol_create(args):
print_json(rpc.lvol.bdev_lvol_create(args.client,
lvol_name=args.lvol_name,
size=args.size * 1024 * 1024,
thin_provision=args.thin_provision,
clear_method=args.clear_method,
uuid=args.uuid,
lvs_name=args.lvs_name))
p = subparsers.add_parser('bdev_lvol_create', aliases=['construct_lvol_bdev'],
help='Add a bdev with an logical volume backend')
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
p.add_argument('-t', '--thin-provision', action='store_true', help='create lvol bdev as thin provisioned')
p.add_argument('-c', '--clear-method', help="""Change default data clusters clear method.
Available: none, unmap, write_zeroes""", required=False)
p.add_argument('lvol_name', help='name for this lvol')
p.add_argument('size', help='size in MiB for this bdev', type=int)
p.set_defaults(func=bdev_lvol_create)
def bdev_lvol_snapshot(args):
print_json(rpc.lvol.bdev_lvol_snapshot(args.client,
lvol_name=args.lvol_name,
snapshot_name=args.snapshot_name))
p = subparsers.add_parser('bdev_lvol_snapshot', aliases=['snapshot_lvol_bdev'],
help='Create a snapshot of an lvol bdev')
p.add_argument('lvol_name', help='lvol bdev name')
p.add_argument('snapshot_name', help='lvol snapshot name')
p.set_defaults(func=bdev_lvol_snapshot)
def bdev_lvol_clone(args):
print_json(rpc.lvol.bdev_lvol_clone(args.client,
snapshot_name=args.snapshot_name,
clone_name=args.clone_name))
p = subparsers.add_parser('bdev_lvol_clone', aliases=['clone_lvol_bdev'],
help='Create a clone of an lvol snapshot')
p.add_argument('snapshot_name', help='lvol snapshot name')
p.add_argument('clone_name', help='lvol clone name')
p.set_defaults(func=bdev_lvol_clone)
def bdev_lvol_rename(args):
rpc.lvol.bdev_lvol_rename(args.client,
old_name=args.old_name,
new_name=args.new_name)
p = subparsers.add_parser('bdev_lvol_rename', aliases=['rename_lvol_bdev'],
help='Change lvol bdev name')
p.add_argument('old_name', help='lvol bdev name')
p.add_argument('new_name', help='new lvol name')
p.set_defaults(func=bdev_lvol_rename)
def bdev_lvol_inflate(args):
rpc.lvol.bdev_lvol_inflate(args.client,
name=args.name)
p = subparsers.add_parser('bdev_lvol_inflate', aliases=['inflate_lvol_bdev'],
help='Make thin provisioned lvol a thick provisioned lvol')
p.add_argument('name', help='lvol bdev name')
p.set_defaults(func=bdev_lvol_inflate)
def bdev_lvol_decouple_parent(args):
rpc.lvol.bdev_lvol_decouple_parent(args.client,
name=args.name)
p = subparsers.add_parser('bdev_lvol_decouple_parent', aliases=['decouple_parent_lvol_bdev'],
help='Decouple parent of lvol')
p.add_argument('name', help='lvol bdev name')
p.set_defaults(func=bdev_lvol_decouple_parent)
def bdev_lvol_resize(args):
rpc.lvol.bdev_lvol_resize(args.client,
name=args.name,
size=args.size * 1024 * 1024)
p = subparsers.add_parser('bdev_lvol_resize', aliases=['resize_lvol_bdev'],
help='Resize existing lvol bdev')
p.add_argument('name', help='lvol bdev name')
p.add_argument('size', help='new size in MiB for this bdev', type=int)
p.set_defaults(func=bdev_lvol_resize)
def bdev_lvol_set_read_only(args):
rpc.lvol.bdev_lvol_set_read_only(args.client,
name=args.name)
p = subparsers.add_parser('bdev_lvol_set_read_only', aliases=['set_read_only_lvol_bdev'],
help='Mark lvol bdev as read only')
p.add_argument('name', help='lvol bdev name')
p.set_defaults(func=bdev_lvol_set_read_only)
def bdev_lvol_delete(args):
rpc.lvol.bdev_lvol_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_lvol_delete', aliases=['destroy_lvol_bdev'],
help='Destroy a logical volume')
p.add_argument('name', help='lvol bdev name')
p.set_defaults(func=bdev_lvol_delete)
def bdev_lvol_delete_lvstore(args):
rpc.lvol.bdev_lvol_delete_lvstore(args.client,
uuid=args.uuid,
lvs_name=args.lvs_name)
p = subparsers.add_parser('bdev_lvol_delete_lvstore', aliases=['destroy_lvol_store'],
help='Destroy an logical volume store')
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
p.set_defaults(func=bdev_lvol_delete_lvstore)
def bdev_lvol_get_lvstores(args):
print_dict(rpc.lvol.bdev_lvol_get_lvstores(args.client,
uuid=args.uuid,
lvs_name=args.lvs_name))
p = subparsers.add_parser('bdev_lvol_get_lvstores', aliases=['get_lvol_stores'],
help='Display current logical volume store list')
p.add_argument('-u', '--uuid', help='lvol store UUID', required=False)
p.add_argument('-l', '--lvs-name', help='lvol store name', required=False)
p.set_defaults(func=bdev_lvol_get_lvstores)
def bdev_raid_get_bdevs(args):
print_array(rpc.bdev.bdev_raid_get_bdevs(args.client,
category=args.category))
p = subparsers.add_parser('bdev_raid_get_bdevs', aliases=['get_raid_bdevs'],
help="""This is used to list all the raid bdev names based on the input category
requested. Category should be one of 'all', 'online', 'configuring' or 'offline'. 'all' means all the raid bdevs whether
they are online or configuring or offline. 'online' is the raid bdev which is registered with bdev layer. 'configuring'
is the raid bdev which does not have full configuration discovered yet. 'offline' is the raid bdev which is not registered
with bdev as of now and it has encountered any error or user has requested to offline the raid bdev""")
p.add_argument('category', help='all or online or configuring or offline')
p.set_defaults(func=bdev_raid_get_bdevs)
def bdev_raid_create(args):
base_bdevs = []
for u in args.base_bdevs.strip().split(" "):
base_bdevs.append(u)
rpc.bdev.bdev_raid_create(args.client,
name=args.name,
strip_size=args.strip_size,
strip_size_kb=args.strip_size_kb,
raid_level=args.raid_level,
base_bdevs=base_bdevs)
p = subparsers.add_parser('bdev_raid_create', aliases=['construct_raid_bdev'],
help='Create new raid bdev')
p.add_argument('-n', '--name', help='raid bdev name', required=True)
p.add_argument('-s', '--strip-size', help='strip size in KB (deprecated)', type=int)
p.add_argument('-z', '--strip-size_kb', help='strip size in KB', type=int)
p.add_argument('-r', '--raid-level', help='raid level, only raid level 0 is supported', required=True)
p.add_argument('-b', '--base-bdevs', help='base bdevs name, whitespace separated list in quotes', required=True)
p.set_defaults(func=bdev_raid_create)
def bdev_raid_delete(args):
rpc.bdev.bdev_raid_delete(args.client,
name=args.name)
p = subparsers.add_parser('bdev_raid_delete', aliases=['destroy_raid_bdev'],
help='Delete existing raid bdev')
p.add_argument('name', help='raid bdev name')
p.set_defaults(func=bdev_raid_delete)
# split
def bdev_split_create(args):
print_array(rpc.bdev.bdev_split_create(args.client,
base_bdev=args.base_bdev,
split_count=args.split_count,
split_size_mb=args.split_size_mb))
p = subparsers.add_parser('bdev_split_create', aliases=['construct_split_vbdev'],
help="""Add given disk name to split config. If bdev with base_name
name exist the split bdevs will be created right away, if not split bdevs will be created when base bdev became
available (during examination process).""")
p.add_argument('base_bdev', help='base bdev name')
p.add_argument('-s', '--split-size-mb', help='size in MiB for each bdev', type=int, default=0)
p.add_argument('split_count', help="""Optional - number of split bdevs to create. Total size * split_count must not
exceed the base bdev size.""", type=int)
p.set_defaults(func=bdev_split_create)
def bdev_split_delete(args):
rpc.bdev.bdev_split_delete(args.client,
base_bdev=args.base_bdev)
p = subparsers.add_parser('bdev_split_delete', aliases=['destruct_split_vbdev'],
help="""Delete split config with all created splits.""")
p.add_argument('base_bdev', help='base bdev name')
p.set_defaults(func=bdev_split_delete)
# ftl
ftl_valid_limits = ('crit', 'high', 'low', 'start')
def bdev_ftl_create(args):
def parse_limits(limits, arg_dict, key_suffix=''):
for limit in limits.split(','):
key, value = limit.split(':', 1)
if key in ftl_valid_limits:
arg_dict['limit_' + key + key_suffix] = int(value)
else:
raise ValueError('Limit {} is not supported'.format(key))
arg_limits = {}
if args.limit_threshold:
parse_limits(args.limit_threshold, arg_limits, '_threshold')
if args.limit:
parse_limits(args.limit, arg_limits)
print_dict(rpc.bdev.bdev_ftl_create(args.client,
name=args.name,
base_bdev=args.base_bdev,
uuid=args.uuid,
cache=args.cache,
allow_open_bands=args.allow_open_bands,
overprovisioning=args.overprovisioning,
**arg_limits))
p = subparsers.add_parser('bdev_ftl_create', aliases=['construct_ftl_bdev'], help='Add FTL bdev')
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
p.add_argument('-d', '--base_bdev', help='Name of zoned bdev used as underlying device',
required=True)
p.add_argument('-u', '--uuid', help='UUID of restored bdev (not applicable when creating new '
'instance): e.g. b286d19a-0059-4709-abcd-9f7732b1567d (optional)')
p.add_argument('-c', '--cache', help='Name of the bdev to be used as a write buffer cache (optional)')
p.add_argument('-o', '--allow_open_bands', help='Restoring after dirty shutdown without cache will'
' result in partial data recovery, instead of error', action='store_true')
p.add_argument('--overprovisioning', help='Percentage of device used for relocation, not exposed'
' to user (optional)', type=int)
limits = p.add_argument_group('Defrag limits', 'Configures defrag limits and thresholds for'
' levels ' + str(ftl_valid_limits)[1:-1])
limits.add_argument('--limit', help='Percentage of allowed user versus internal writes at given'
' levels, e.g. crit:0,high:20,low:80')
limits.add_argument('--limit-threshold', help='Number of free bands triggering a given level of'
' write limiting e.g. crit:1,high:2,low:3,start:4')
p.set_defaults(func=bdev_ftl_create)
def bdev_ftl_delete(args):
print_dict(rpc.bdev.bdev_ftl_delete(args.client, name=args.name))
p = subparsers.add_parser('bdev_ftl_delete', aliases=['delete_ftl_bdev'],
help='Delete FTL bdev')
p.add_argument('-b', '--name', help="Name of the bdev", required=True)
p.set_defaults(func=bdev_ftl_delete)
# vmd
def enable_vmd(args):
print_dict(rpc.vmd.enable_vmd(args.client))
p = subparsers.add_parser('enable_vmd', help='Enable VMD enumeration')
p.set_defaults(func=enable_vmd)
# nbd
def nbd_start_disk(args):
print(rpc.nbd.nbd_start_disk(args.client,
bdev_name=args.bdev_name,
nbd_device=args.nbd_device))
p = subparsers.add_parser('nbd_start_disk', aliases=['start_nbd_disk'],
help='Export a bdev as an nbd disk')
p.add_argument('bdev_name', help='Blockdev name to be exported. Example: Malloc0.')
p.add_argument('nbd_device', help='Nbd device name to be assigned. Example: /dev/nbd0.', nargs='?')
p.set_defaults(func=nbd_start_disk)
def nbd_stop_disk(args):
rpc.nbd.nbd_stop_disk(args.client,
nbd_device=args.nbd_device)
p = subparsers.add_parser('nbd_stop_disk', aliases=['stop_nbd_disk'],
help='Stop an nbd disk')
p.add_argument('nbd_device', help='Nbd device name to be stopped. Example: /dev/nbd0.')
p.set_defaults(func=nbd_stop_disk)
def nbd_get_disks(args):
print_dict(rpc.nbd.nbd_get_disks(args.client,
nbd_device=args.nbd_device))
p = subparsers.add_parser('nbd_get_disks', aliases=['get_nbd_disks'],
help='Display full or specified nbd device list')
p.add_argument('-n', '--nbd-device', help="Path of the nbd device. Example: /dev/nbd0", required=False)
p.set_defaults(func=nbd_get_disks)
# net
def net_interface_add_ip_address(args):
rpc.net.net_interface_add_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
p = subparsers.add_parser('net_interface_add_ip_address', aliases=['add_ip_address'],
help='Add IP address')
p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
p.add_argument('ip_addr', help='ip address will be added.')
p.set_defaults(func=net_interface_add_ip_address)
def net_interface_delete_ip_address(args):
rpc.net.net_interface_delete_ip_address(args.client, ifc_index=args.ifc_index, ip_addr=args.ip_addr)
p = subparsers.add_parser('net_interface_delete_ip_address', aliases=['delete_ip_address'],
help='Delete IP address')
p.add_argument('ifc_index', help='ifc index of the nic device.', type=int)
p.add_argument('ip_addr', help='ip address will be deleted.')
p.set_defaults(func=net_interface_delete_ip_address)
def net_get_interfaces(args):
print_dict(rpc.net.net_get_interfaces(args.client))
p = subparsers.add_parser(
'net_get_interfaces', aliases=['get_interfaces'], help='Display current interface list')
p.set_defaults(func=net_get_interfaces)
# NVMe-oF
def nvmf_set_max_subsystems(args):
rpc.nvmf.nvmf_set_max_subsystems(args.client,
max_subsystems=args.max_subsystems)
p = subparsers.add_parser('nvmf_set_max_subsystems', aliases=['set_nvmf_target_max_subsystems'],
help='Set the maximum number of NVMf target subsystems')
p.add_argument('-x', '--max-subsystems', help='Max number of NVMf subsystems', type=int, required=True)
p.set_defaults(func=nvmf_set_max_subsystems)
def nvmf_set_config(args):
rpc.nvmf.nvmf_set_config(args.client,
acceptor_poll_rate=args.acceptor_poll_rate,
conn_sched=args.conn_sched,
passthru_identify_ctrlr=args.passthru_identify_ctrlr)
p = subparsers.add_parser('nvmf_set_config', aliases=['set_nvmf_target_config'],
help='Set NVMf target config')
p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
p.add_argument('-s', '--conn-sched', help="""'roundrobin' - Schedule the incoming connections from any host
on the cores in a round robin manner (Default). 'hostip' - Schedule all the incoming connections from a
specific host IP on to the same core. Connections from different IP will be assigned to cores in a round
robin manner. 'transport' - Schedule the connection according to the transport characteristics.""")
p.add_argument('-i', '--passthru-identify-ctrlr', help="""Passthrough fields like serial number and model number
when the controller has a single namespace that is an NVMe bdev""", action='store_true')
p.set_defaults(func=nvmf_set_config)
def nvmf_create_transport(args):
rpc.nvmf.nvmf_create_transport(args.client,
trtype=args.trtype,
tgt_name=args.tgt_name,
max_queue_depth=args.max_queue_depth,
max_qpairs_per_ctrlr=args.max_qpairs_per_ctrlr,
in_capsule_data_size=args.in_capsule_data_size,
max_io_size=args.max_io_size,
io_unit_size=args.io_unit_size,
max_aq_depth=args.max_aq_depth,
num_shared_buffers=args.num_shared_buffers,
buf_cache_size=args.buf_cache_size,
max_srq_depth=args.max_srq_depth,
no_srq=args.no_srq,
c2h_success=args.c2h_success,
dif_insert_or_strip=args.dif_insert_or_strip,
sock_priority=args.sock_priority)
p = subparsers.add_parser('nvmf_create_transport', help='Create NVMf transport')
p.add_argument('-t', '--trtype', help='Transport type (ex. RDMA)', type=str, required=True)
p.add_argument('-g', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument('-q', '--max-queue-depth', help='Max number of outstanding I/O per queue', type=int)
p.add_argument('-p', '--max-qpairs-per-ctrlr', help='Max number of SQ and CQ per controller', type=int)
p.add_argument('-c', '--in-capsule-data-size', help='Max number of in-capsule data size', type=int)
p.add_argument('-i', '--max-io-size', help='Max I/O size (bytes)', type=int)
p.add_argument('-u', '--io-unit-size', help='I/O unit size (bytes)', type=int)
p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
p.add_argument('-o', '--c2h-success', action='store_false', help='Disable C2H success optimization. Relevant only for TCP transport')
p.add_argument('-f', '--dif-insert-or-strip', action='store_true', help='Enable DIF insert/strip. Relevant only for TCP transport')
p.add_argument('-y', '--sock-priority', help='The sock priority of the tcp connection. Relevant only for TCP transport', type=int)
p.set_defaults(func=nvmf_create_transport)
def nvmf_get_transports(args):
print_dict(rpc.nvmf.nvmf_get_transports(args.client, tgt_name=args.tgt_name))
p = subparsers.add_parser('nvmf_get_transports', aliases=['get_nvmf_transports'],
help='Display nvmf transports')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_get_transports)
def nvmf_get_subsystems(args):
print_dict(rpc.nvmf.nvmf_get_subsystems(args.client, tgt_name=args.tgt_name))
p = subparsers.add_parser('nvmf_get_subsystems', aliases=['get_nvmf_subsystems'],
help='Display nvmf subsystems')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_get_subsystems)
def nvmf_create_subsystem(args):
rpc.nvmf.nvmf_create_subsystem(args.client,
nqn=args.nqn,
tgt_name=args.tgt_name,
serial_number=args.serial_number,
model_number=args.model_number,
allow_any_host=args.allow_any_host,
max_namespaces=args.max_namespaces)
p = subparsers.add_parser('nvmf_create_subsystem', aliases=['nvmf_subsystem_create'],
help='Create an NVMe-oF subsystem')
p.add_argument('nqn', help='Subsystem NQN (ASCII)')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument("-s", "--serial-number", help="""
Format: 'sn' etc
Example: 'SPDK00000000000001'""", default='00000000000000000000')
p.add_argument("-d", "--model-number", help="""
Format: 'mn' etc
Example: 'SPDK Controller'""", default='SPDK bdev Controller')
p.add_argument("-a", "--allow-any-host", action='store_true', help="Allow any host to connect (don't enforce host NQN whitelist)")
p.add_argument("-m", "--max-namespaces", help="Maximum number of namespaces allowed",
type=int, default=0)
p.set_defaults(func=nvmf_create_subsystem)
def nvmf_delete_subsystem(args):
rpc.nvmf.nvmf_delete_subsystem(args.client,
nqn=args.subsystem_nqn,
tgt_name=args.tgt_name)
p = subparsers.add_parser('nvmf_delete_subsystem', aliases=['delete_nvmf_subsystem'],
help='Delete a nvmf subsystem')
p.add_argument('subsystem_nqn',
help='subsystem nqn to be deleted. Example: nqn.2016-06.io.spdk:cnode1.')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_delete_subsystem)
def nvmf_subsystem_add_listener(args):
rpc.nvmf.nvmf_subsystem_add_listener(args.client,
nqn=args.nqn,
trtype=args.trtype,
traddr=args.traddr,
tgt_name=args.tgt_name,
adrfam=args.adrfam,
trsvcid=args.trsvcid)
p = subparsers.add_parser('nvmf_subsystem_add_listener', help='Add a listener to an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
p.set_defaults(func=nvmf_subsystem_add_listener)
def nvmf_subsystem_remove_listener(args):
rpc.nvmf.nvmf_subsystem_remove_listener(args.client,
nqn=args.nqn,
trtype=args.trtype,
traddr=args.traddr,
tgt_name=args.tgt_name,
adrfam=args.adrfam,
trsvcid=args.trsvcid)
p = subparsers.add_parser('nvmf_subsystem_remove_listener', help='Remove a listener from an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('-t', '--trtype', help='NVMe-oF transport type: e.g., rdma', required=True)
p.add_argument('-a', '--traddr', help='NVMe-oF transport address: e.g., an ip address', required=True)
p.add_argument('-p', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument('-f', '--adrfam', help='NVMe-oF transport adrfam: e.g., ipv4, ipv6, ib, fc, intra_host')
p.add_argument('-s', '--trsvcid', help='NVMe-oF transport service id: e.g., a port number')
p.set_defaults(func=nvmf_subsystem_remove_listener)
def nvmf_subsystem_add_ns(args):
rpc.nvmf.nvmf_subsystem_add_ns(args.client,
nqn=args.nqn,
bdev_name=args.bdev_name,
tgt_name=args.tgt_name,
ptpl_file=args.ptpl_file,
nsid=args.nsid,
nguid=args.nguid,
eui64=args.eui64,
uuid=args.uuid)
p = subparsers.add_parser('nvmf_subsystem_add_ns', help='Add a namespace to an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('bdev_name', help='The name of the bdev that will back this namespace')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.add_argument('-p', '--ptpl-file', help='The persistent reservation storage location (optional)', type=str)
p.add_argument('-n', '--nsid', help='The requested NSID (optional)', type=int)
p.add_argument('-g', '--nguid', help='Namespace globally unique identifier (optional)')
p.add_argument('-e', '--eui64', help='Namespace EUI-64 identifier (optional)')
p.add_argument('-u', '--uuid', help='Namespace UUID (optional)')
p.set_defaults(func=nvmf_subsystem_add_ns)
def nvmf_subsystem_remove_ns(args):
rpc.nvmf.nvmf_subsystem_remove_ns(args.client,
nqn=args.nqn,
nsid=args.nsid,
tgt_name=args.tgt_name)
p = subparsers.add_parser('nvmf_subsystem_remove_ns', help='Remove a namespace to an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('nsid', help='The requested NSID', type=int)
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_subsystem_remove_ns)
def nvmf_subsystem_add_host(args):
rpc.nvmf.nvmf_subsystem_add_host(args.client,
nqn=args.nqn,
host=args.host,
tgt_name=args.tgt_name)
p = subparsers.add_parser('nvmf_subsystem_add_host', help='Add a host to an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('host', help='Host NQN to allow')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_subsystem_add_host)
def nvmf_subsystem_remove_host(args):
rpc.nvmf.nvmf_subsystem_remove_host(args.client,
nqn=args.nqn,
host=args.host,
tgt_name=args.tgt_name)
p = subparsers.add_parser('nvmf_subsystem_remove_host', help='Remove a host from an NVMe-oF subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('host', help='Host NQN to remove')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_subsystem_remove_host)
def nvmf_subsystem_allow_any_host(args):
rpc.nvmf.nvmf_subsystem_allow_any_host(args.client,
nqn=args.nqn,
disable=args.disable,
tgt_name=args.tgt_name)
p = subparsers.add_parser('nvmf_subsystem_allow_any_host', help='Allow any host to connect to the subsystem')
p.add_argument('nqn', help='NVMe-oF subsystem NQN')
p.add_argument('-e', '--enable', action='store_true', help='Enable allowing any host')
p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_subsystem_allow_any_host)
def nvmf_get_stats(args):
print_dict(rpc.nvmf.nvmf_get_stats(args.client, tgt_name=args.tgt_name))
p = subparsers.add_parser(
'nvmf_get_stats', help='Display current statistics for NVMf subsystem')
p.add_argument('-t', '--tgt_name', help='The name of the parent NVMe-oF target (optional)', type=str)
p.set_defaults(func=nvmf_get_stats)
# pmem
def bdev_pmem_create_pool(args):
num_blocks = int((args.total_size * 1024 * 1024) / args.block_size)
rpc.pmem.bdev_pmem_create_pool(args.client,
pmem_file=args.pmem_file,
num_blocks=num_blocks,
block_size=args.block_size)
p = subparsers.add_parser('bdev_pmem_create_pool', aliases=['create_pmem_pool'],
help='Create pmem pool')
p.add_argument('pmem_file', help='Path to pmemblk pool file')
p.add_argument('total_size', help='Size of malloc bdev in MB (int > 0)', type=int)
p.add_argument('block_size', help='Block size for this pmem pool', type=int)
p.set_defaults(func=bdev_pmem_create_pool)
def bdev_pmem_get_pool_info(args):
print_dict(rpc.pmem.bdev_pmem_get_pool_info(args.client,
pmem_file=args.pmem_file))
p = subparsers.add_parser('bdev_pmem_get_pool_info', aliases=['pmem_pool_info'],
help='Display pmem pool info and check consistency')
p.add_argument('pmem_file', help='Path to pmemblk pool file')
p.set_defaults(func=bdev_pmem_get_pool_info)
def bdev_pmem_delete_pool(args):
rpc.pmem.bdev_pmem_delete_pool(args.client,
pmem_file=args.pmem_file)
p = subparsers.add_parser('bdev_pmem_delete_pool', aliases=['delete_pmem_pool'],
help='Delete pmem pool')
p.add_argument('pmem_file', help='Path to pmemblk pool file')
p.set_defaults(func=bdev_pmem_delete_pool)
# subsystem
def framework_get_subsystems(args):
print_dict(rpc.subsystem.framework_get_subsystems(args.client))
p = subparsers.add_parser('framework_get_subsystems', aliases=['get_subsystems'],
help="""Print subsystems array in initialization order. Each subsystem
entry contain (unsorted) array of subsystems it depends on.""")
p.set_defaults(func=framework_get_subsystems)
def framework_get_config(args):
print_dict(rpc.subsystem.framework_get_config(args.client, args.name))
p = subparsers.add_parser('framework_get_config', aliases=['get_subsystem_config'],
help="""Print subsystem configuration""")
p.add_argument('name', help='Name of subsystem to query')
p.set_defaults(func=framework_get_config)
# vhost
def vhost_controller_set_coalescing(args):
rpc.vhost.vhost_controller_set_coalescing(args.client,
ctrlr=args.ctrlr,
delay_base_us=args.delay_base_us,
iops_threshold=args.iops_threshold)
p = subparsers.add_parser('vhost_controller_set_coalescing', aliases=['set_vhost_controller_coalescing'],
help='Set vhost controller coalescing')
p.add_argument('ctrlr', help='controller name')
p.add_argument('delay_base_us', help='Base delay time', type=int)
p.add_argument('iops_threshold', help='IOPS threshold when coalescing is enabled', type=int)
p.set_defaults(func=vhost_controller_set_coalescing)
def vhost_create_scsi_controller(args):
rpc.vhost.vhost_create_scsi_controller(args.client,
ctrlr=args.ctrlr,
cpumask=args.cpumask)
p = subparsers.add_parser(
'vhost_create_scsi_controller', aliases=['construct_vhost_scsi_controller'],
help='Add new vhost controller')
p.add_argument('ctrlr', help='controller name')
p.add_argument('--cpumask', help='cpu mask for this controller')
p.set_defaults(func=vhost_create_scsi_controller)
def vhost_scsi_controller_add_target(args):
print_json(rpc.vhost.vhost_scsi_controller_add_target(args.client,
ctrlr=args.ctrlr,
scsi_target_num=args.scsi_target_num,
bdev_name=args.bdev_name))
p = subparsers.add_parser('vhost_scsi_controller_add_target',
aliases=['add_vhost_scsi_lun'],
help='Add lun to vhost controller')
p.add_argument('ctrlr', help='conntroller name where add lun')
p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
p.add_argument('bdev_name', help='bdev name')
p.set_defaults(func=vhost_scsi_controller_add_target)
def vhost_scsi_controller_remove_target(args):
rpc.vhost.vhost_scsi_controller_remove_target(args.client,
ctrlr=args.ctrlr,
scsi_target_num=args.scsi_target_num)
p = subparsers.add_parser('vhost_scsi_controller_remove_target',
aliases=['remove_vhost_scsi_target'],
help='Remove target from vhost controller')
p.add_argument('ctrlr', help='controller name to remove target from')
p.add_argument('scsi_target_num', help='scsi_target_num', type=int)
p.set_defaults(func=vhost_scsi_controller_remove_target)
def vhost_create_blk_controller(args):
rpc.vhost.vhost_create_blk_controller(args.client,
ctrlr=args.ctrlr,
dev_name=args.dev_name,
cpumask=args.cpumask,
readonly=args.readonly)
p = subparsers.add_parser('vhost_create_blk_controller',
aliases=['construct_vhost_blk_controller'],
help='Add a new vhost block controller')
p.add_argument('ctrlr', help='controller name')
p.add_argument('dev_name', help='device name')
p.add_argument('--cpumask', help='cpu mask for this controller')
p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only')
p.set_defaults(func=vhost_create_blk_controller)
def vhost_create_nvme_controller(args):
rpc.vhost.vhost_create_nvme_controller(args.client,
ctrlr=args.ctrlr,
io_queues=args.io_queues,
cpumask=args.cpumask)
p = subparsers.add_parser('vhost_create_nvme_controller', aliases=['vhost_create_nvme_controller'],
help='Add new vhost controller')
p.add_argument('ctrlr', help='controller name')
p.add_argument('io_queues', help='number of IO queues for the controller', type=int)
p.add_argument('--cpumask', help='cpu mask for this controller')
p.set_defaults(func=vhost_create_nvme_controller)
def vhost_nvme_controller_add_ns(args):
rpc.vhost.vhost_nvme_controller_add_ns(args.client,
ctrlr=args.ctrlr,
bdev_name=args.bdev_name)
p = subparsers.add_parser('vhost_nvme_controller_add_ns', aliases=['add_vhost_nvme_ns'],
help='Add a Namespace to vhost controller')
p.add_argument('ctrlr', help='conntroller name where add a Namespace')
p.add_argument('bdev_name', help='block device name for a new Namespace')
p.set_defaults(func=vhost_nvme_controller_add_ns)
def vhost_get_controllers(args):
print_dict(rpc.vhost.vhost_get_controllers(args.client, args.name))
p = subparsers.add_parser('vhost_get_controllers', aliases=['get_vhost_controllers'],
help='List all or specific vhost controller(s)')
p.add_argument('-n', '--name', help="Name of vhost controller", required=False)
p.set_defaults(func=vhost_get_controllers)
def vhost_delete_controller(args):
rpc.vhost.vhost_delete_controller(args.client,
ctrlr=args.ctrlr)
p = subparsers.add_parser('vhost_delete_controller', aliases=['remove_vhost_controller'],
help='Delete a vhost controller')
p.add_argument('ctrlr', help='controller name')
p.set_defaults(func=vhost_delete_controller)
def bdev_virtio_attach_controller(args):
print_array(rpc.vhost.bdev_virtio_attach_controller(args.client,
name=args.name,
trtype=args.trtype,
traddr=args.traddr,
dev_type=args.dev_type,
vq_count=args.vq_count,
vq_size=args.vq_size))
p = subparsers.add_parser('bdev_virtio_attach_controller', aliases=['construct_virtio_dev'],
help="""Attach virtio controller using provided
transport type and device type. This will also create bdevs for any block devices connected to the
controller (for example, SCSI devices for a virtio-scsi controller).
Result is array of added bdevs.""")
p.add_argument('name', help="Use this name as base for new created bdevs")
p.add_argument('-t', '--trtype',
help='Virtio target transport type: pci or user', required=True)
p.add_argument('-a', '--traddr',
help='Transport type specific target address: e.g. UNIX domain socket path or BDF', required=True)
p.add_argument('-d', '--dev-type',
help='Device type: blk or scsi', required=True)
p.add_argument('--vq-count', help='Number of virtual queues to be used.', type=int)
p.add_argument('--vq-size', help='Size of each queue', type=int)
p.set_defaults(func=bdev_virtio_attach_controller)
def bdev_virtio_scsi_get_devices(args):
print_dict(rpc.vhost.bdev_virtio_scsi_get_devices(args.client))
p = subparsers.add_parser('bdev_virtio_scsi_get_devices', aliases=['get_virtio_scsi_devs'],
help='List all Virtio-SCSI devices.')
p.set_defaults(func=bdev_virtio_scsi_get_devices)
def bdev_virtio_detach_controller(args):
rpc.vhost.bdev_virtio_detach_controller(args.client,
name=args.name)
p = subparsers.add_parser('bdev_virtio_detach_controller', aliases=['remove_virtio_bdev'],
help="""Remove a Virtio device
This will delete all bdevs exposed by this device""")
p.add_argument('name', help='Virtio device name. E.g. VirtioUser0')
p.set_defaults(func=bdev_virtio_detach_controller)
# OCSSD
def bdev_ocssd_create(args):
nsid = int(args.nsid) if args.nsid is not None else None
print_json(rpc.bdev.bdev_ocssd_create(args.client,
ctrlr_name=args.ctrlr_name,
bdev_name=args.name,
nsid=nsid,
range=args.range))
p = subparsers.add_parser('bdev_ocssd_create',
help='Creates zoned bdev on specified Open Channel controller')
p.add_argument('-c', '--ctrlr_name', help='Name of the OC NVMe controller', required=True)
p.add_argument('-b', '--name', help='Name of the bdev to create', required=True)
p.add_argument('-n', '--nsid', help='Namespace ID', required=False)
p.add_argument('-r', '--range', help='Parallel unit range (in the form of BEGIN-END (inclusive))',
required=False)
p.set_defaults(func=bdev_ocssd_create)
def bdev_ocssd_delete(args):
print_json(rpc.bdev.bdev_ocssd_delete(args.client,
name=args.name))
p = subparsers.add_parser('bdev_ocssd_delete',
help='Deletes Open Channel bdev')
p.add_argument('name', help='Name of the Open Channel bdev')
p.set_defaults(func=bdev_ocssd_delete)
# ioat
def ioat_scan_copy_engine(args):
pci_whitelist = []
if args.pci_whitelist:
for w in args.pci_whitelist.strip().split(" "):
pci_whitelist.append(w)
rpc.ioat.ioat_scan_copy_engine(args.client, pci_whitelist)
p = subparsers.add_parser('ioat_scan_copy_engine', aliases=['scan_ioat_copy_engine'],
help='Set scan and enable IOAT copy engine offload.')
p.add_argument('-w', '--pci-whitelist', help="""Whitespace-separated list of PCI addresses in
domain:bus:device.function format or domain.bus.device.function format""")
p.set_defaults(func=ioat_scan_copy_engine)
# opal
def bdev_nvme_opal_init(args):
rpc.nvme.bdev_nvme_opal_init(args.client,
nvme_ctrlr_name=args.nvme_ctrlr_name,
password=args.password)
p = subparsers.add_parser('bdev_nvme_opal_init', help='take ownership and activate')
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
p.add_argument('-p', '--password', help='password for admin')
p.set_defaults(func=bdev_nvme_opal_init)
def bdev_nvme_opal_revert(args):
rpc.nvme.bdev_nvme_opal_revert(args.client,
nvme_ctrlr_name=args.nvme_ctrlr_name,
password=args.password)
p = subparsers.add_parser('bdev_nvme_opal_revert', help='Revert to default factory settings')
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name')
p.add_argument('-p', '--password', help='password')
p.set_defaults(func=bdev_nvme_opal_revert)
def bdev_opal_create(args):
print_json(rpc.bdev.bdev_opal_create(args.client,
nvme_ctrlr_name=args.nvme_ctrlr_name,
nsid=args.nsid,
locking_range_id=args.locking_range_id,
range_start=args.range_start,
range_length=args.range_length,
password=args.password))
p = subparsers.add_parser('bdev_opal_create', help="""Create opal bdev on specified NVMe controller""")
p.add_argument('-b', '--nvme-ctrlr-name', help='nvme ctrlr name', required=True)
p.add_argument('-n', '--nsid', help='namespace ID (only support nsid=1 for now)', type=int, required=True)
p.add_argument('-i', '--locking-range-id', help='locking range id', type=int, required=True)
p.add_argument('-s', '--range-start', help='locking range start LBA', type=int, required=True)
p.add_argument('-l', '--range-length', help='locking range length (in blocks)', type=int, required=True)
p.add_argument('-p', '--password', help='admin password', required=True)
p.set_defaults(func=bdev_opal_create)
def bdev_opal_get_info(args):
print_dict(rpc.bdev.bdev_opal_get_info(args.client,
bdev_name=args.bdev_name,
password=args.password))
p = subparsers.add_parser('bdev_opal_get_info', help='get opal locking range info for this bdev')
p.add_argument('-b', '--bdev-name', help='opal bdev')
p.add_argument('-p', '--password', help='password')
p.set_defaults(func=bdev_opal_get_info)
def bdev_opal_delete(args):
rpc.bdev.bdev_opal_delete(args.client,
bdev_name=args.bdev_name,
password=args.password)
p = subparsers.add_parser('bdev_opal_delete', help="""delete a virtual opal bdev""")
p.add_argument('-b', '--bdev-name', help='opal virtual bdev', required=True)
p.add_argument('-p', '--password', help='admin password', required=True)
p.set_defaults(func=bdev_opal_delete)
def bdev_opal_new_user(args):
rpc.bdev.bdev_opal_new_user(args.client,
bdev_name=args.bdev_name,
admin_password=args.admin_password,
user_id=args.user_id,
user_password=args.user_password)
p = subparsers.add_parser('bdev_opal_new_user', help="""Add a user to opal bdev who can set lock state for this bdev""")
p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
p.add_argument('-p', '--admin-password', help='admin password', required=True)
p.add_argument('-i', '--user-id', help='ID for new user', type=int, required=True)
p.add_argument('-u', '--user-password', help='password set for this user', required=True)
p.set_defaults(func=bdev_opal_new_user)
def bdev_opal_set_lock_state(args):
rpc.bdev.bdev_opal_set_lock_state(args.client,
bdev_name=args.bdev_name,
user_id=args.user_id,
password=args.password,
lock_state=args.lock_state)
p = subparsers.add_parser('bdev_opal_set_lock_state', help="""set lock state for an opal bdev""")
p.add_argument('-b', '--bdev-name', help='opal bdev', required=True)
p.add_argument('-i', '--user-id', help='ID of the user who want to set lock state, either admin or a user assigned to this bdev',
type=int, required=True)
p.add_argument('-p', '--password', help='password of this user', required=True)
p.add_argument('-l', '--lock-state', help='lock state to set, choose from {readwrite, readonly, rwlock}', required=True)
p.set_defaults(func=bdev_opal_set_lock_state)
# bdev_nvme_send_cmd
def bdev_nvme_send_cmd(args):
print_dict(rpc.nvme.bdev_nvme_send_cmd(args.client,
name=args.nvme_name,
cmd_type=args.cmd_type,
data_direction=args.data_direction,
cmdbuf=args.cmdbuf,
data=args.data,
metadata=args.metadata,
data_len=args.data_length,
metadata_len=args.metadata_length,
timeout_ms=args.timeout_ms))
p = subparsers.add_parser('bdev_nvme_send_cmd', aliases=['send_nvme_cmd'],
help='NVMe passthrough cmd.')
p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""")
p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""")
p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""")
p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""")
p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""")
p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""")
p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int)
p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int)
p.add_argument('-T', '--timeout-ms',
help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0)
p.set_defaults(func=bdev_nvme_send_cmd)
# Notifications
def notify_get_types(args):
print_dict(rpc.notify.notify_get_types(args.client))
p = subparsers.add_parser('notify_get_types', aliases=['get_notification_types'],
help='List available notifications that user can subscribe to.')
p.set_defaults(func=notify_get_types)
def notify_get_notifications(args):
ret = rpc.notify.notify_get_notifications(args.client,
id=args.id,
max=args.max)
print_dict(ret)
p = subparsers.add_parser('notify_get_notifications', aliases=['get_notifications'],
help='Get notifications')
p.add_argument('-i', '--id', help="""First ID to start fetching from""", type=int)
p.add_argument('-n', '--max', help="""Maximum number of notifications to return in response""", type=int)
p.set_defaults(func=notify_get_notifications)
def thread_get_stats(args):
print_dict(rpc.app.thread_get_stats(args.client))
p = subparsers.add_parser(
'thread_get_stats', help='Display current statistics of all the threads')
p.set_defaults(func=thread_get_stats)
def env_dpdk_get_mem_stats(args):
print_dict(rpc.env_dpdk.env_dpdk_get_mem_stats(args.client))
p = subparsers.add_parser(
'env_dpdk_get_mem_stats', help='write the dpdk memory stats to a file.')
p.set_defaults(func=env_dpdk_get_mem_stats)
# blobfs
def blobfs_detect(args):
print(rpc.blobfs.blobfs_detect(args.client,
bdev_name=args.bdev_name))
p = subparsers.add_parser('blobfs_detect', help='Detect whether a blobfs exists on bdev')
p.add_argument('bdev_name', help='Blockdev name to detect blobfs. Example: Malloc0.')
p.set_defaults(func=blobfs_detect)
def blobfs_create(args):
print(rpc.blobfs.blobfs_create(args.client,
bdev_name=args.bdev_name,
cluster_sz=args.cluster_sz))
p = subparsers.add_parser('blobfs_create', help='Build a blobfs on bdev')
p.add_argument('bdev_name', help='Blockdev name to build blobfs. Example: Malloc0.')
p.add_argument('-c', '--cluster_sz',
help="""Size of cluster in bytes (Optional). Must be multiple of 4KB page size. Default and minimal value is 1M.""")
p.set_defaults(func=blobfs_create)
def blobfs_mount(args):
print(rpc.blobfs.blobfs_mount(args.client,
bdev_name=args.bdev_name,
mountpoint=args.mountpoint))
p = subparsers.add_parser('blobfs_mount', help='Mount a blobfs on bdev to host path by FUSE')
p.add_argument('bdev_name', help='Blockdev name where the blobfs is. Example: Malloc0.')
p.add_argument('mountpoint', help='Mountpoint path in host to mount blobfs. Example: /mnt/.')
p.set_defaults(func=blobfs_mount)
def blobfs_set_cache_size(args):
print(rpc.blobfs.blobfs_set_cache_size(args.client,
size_in_mb=args.size_in_mb))
p = subparsers.add_parser('blobfs_set_cache_size', help='Set cache size for blobfs')
p.add_argument('size_in_mb', help='Cache size for blobfs in megabytes.', type=int)
p.set_defaults(func=blobfs_set_cache_size)
def check_called_name(name):
if name in deprecated_aliases:
print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr)
class dry_run_client:
def call(self, method, params=None):
print("Request:\n" + json.dumps({"method": method, "params": params}, indent=2))
def null_print(arg):
pass
def call_rpc_func(args):
args.func(args)
check_called_name(args.called_rpc_name)
def execute_script(parser, client, fd):
executed_rpc = ""
for rpc_call in map(str.rstrip, fd):
if not rpc_call.strip():
continue
executed_rpc = "\n".join([executed_rpc, rpc_call])
args = parser.parse_args(shlex.split(rpc_call))
args.client = client
try:
call_rpc_func(args)
except JSONRPCException as ex:
print("Exception:")
print(executed_rpc.strip() + " <<<")
print(ex.message)
exit(1)
args = parser.parse_args()
if args.dry_run:
args.client = dry_run_client()
print_dict = null_print
print_json = null_print
print_array = null_print
else:
args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper()))
if hasattr(args, 'func'):
try:
call_rpc_func(args)
except JSONRPCException as ex:
print(ex.message)
exit(1)
elif sys.stdin.isatty():
# No arguments and no data piped through stdin
parser.print_help()
exit(1)
else:
execute_script(parser, args.client, sys.stdin)
| 53.8792
| 139
| 0.609158
|
4a16ed5eee277d9950d43d4ca67faa59124bb264
| 196
|
py
|
Python
|
lab5/03_date.py
|
outfrost/os-labs
|
f99ebb5c528f916736d225b15b50698cb1e12f4e
|
[
"Unlicense"
] | null | null | null |
lab5/03_date.py
|
outfrost/os-labs
|
f99ebb5c528f916736d225b15b50698cb1e12f4e
|
[
"Unlicense"
] | null | null | null |
lab5/03_date.py
|
outfrost/os-labs
|
f99ebb5c528f916736d225b15b50698cb1e12f4e
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import sys
import re
pattern = re.compile(r"11[-/.]0?6[-/.](20)?18")
while True:
line = sys.stdin.readline()
if not line:
break
print(pattern.match(line))
| 16.333333
| 47
| 0.607143
|
4a16ed7224030df965755b9d259e579d047c2f69
| 149
|
py
|
Python
|
tests/unit/mock/config/compliance/config_section_not_parsed/cisco_ios/ios_basic_feature.py
|
vivekvashist/netutils
|
c2d75178d2613a44f070f55ef94e11866eef8f36
|
[
"Apache-2.0"
] | 91
|
2021-05-13T18:14:57.000Z
|
2022-03-22T14:36:38.000Z
|
tests/unit/mock/config/compliance/config_section_not_parsed/cisco_ios/ios_basic_feature.py
|
vivekvashist/netutils
|
c2d75178d2613a44f070f55ef94e11866eef8f36
|
[
"Apache-2.0"
] | 61
|
2021-05-15T00:49:31.000Z
|
2022-03-28T06:08:52.000Z
|
tests/unit/mock/config/compliance/config_section_not_parsed/cisco_ios/ios_basic_feature.py
|
vivekvashist/netutils
|
c2d75178d2613a44f070f55ef94e11866eef8f36
|
[
"Apache-2.0"
] | 26
|
2021-05-13T23:51:40.000Z
|
2022-03-31T12:30:11.000Z
|
features = [
{"name": "bgp", "ordered": True, "section": ["router bgp "]},
{"name": "snmp", "ordered": True, "section": ["snmp-server "]},
]
| 29.8
| 67
| 0.52349
|
4a16ee115f647a94cf68d9c680beaf07b9f3ff6b
| 1,806
|
py
|
Python
|
pyfmodex/structobject.py
|
bbbart/pyfmodex
|
b3f89fdb40ecebe528b229a3c5310ec7d7f66f55
|
[
"MIT"
] | 19
|
2015-02-01T08:26:25.000Z
|
2021-11-18T02:32:29.000Z
|
pyfmodex/structobject.py
|
tyrylu/pyfmodex
|
b3f89fdb40ecebe528b229a3c5310ec7d7f66f55
|
[
"MIT"
] | 32
|
2015-01-10T07:20:59.000Z
|
2021-12-29T20:28:58.000Z
|
pyfmodex/structobject.py
|
bbbart/pyfmodex
|
b3f89fdb40ecebe528b229a3c5310ec7d7f66f55
|
[
"MIT"
] | 12
|
2015-01-05T07:54:42.000Z
|
2021-03-27T12:21:47.000Z
|
"""A dict like object.
Implementation from http://benlast.livejournal.com/12301.html with unnecessary
zope security flag removed.
"""
class Structobject:
"""A 'bag' with keyword initialization, dict-semantics emulation and key
iteration.
"""
def __init__(self, **kw):
"""Initialize, and set attributes from all keyword arguments."""
self.__members = []
for k in list(kw.keys()):
setattr(self, k, kw[k])
self.__remember(k)
def __remember(self, k):
"""Add k to the list of explicitly set values."""
if k not in self.__members:
self.__members.append(k)
def __getitem__(self, key):
"""Equivalent of dict access by key."""
try:
return getattr(self, key)
except AttributeError as attrerr:
raise KeyError(key) from attrerr
def __setitem__(self, key, value):
setattr(self, key, value)
self.__remember(key)
def has_key(self, key):
"""wheter this Structobject contains a value for the given key.
:rtype: bool
"""
return hasattr(self, key)
def keys(self):
"""All keys this Structobject has values for.
:rtype: list
"""
return self.__members
def iterkeys(self):
"""All keys this Structobject has values for.
:rtype: list
"""
return self.__members
def __iter__(self):
return iter(self.__members)
def __str__(self):
"""Describe those attributes explicitly set."""
string = ""
for member in self.__members:
value = getattr(self, member)
if string:
string += ", "
string += "%string: %string" % (member, repr(value))
return string
| 26.173913
| 78
| 0.576966
|
4a16ef06d150caea365807fa52073c409cdd41b3
| 35,981
|
py
|
Python
|
scripts/experiments/text_classification/train.py
|
antoilouis/netbert
|
ccd37ef8a1727557de74498132eea24db2135940
|
[
"MIT"
] | 2
|
2021-01-29T01:30:51.000Z
|
2021-07-14T16:47:15.000Z
|
scripts/experiments/text_classification/train.py
|
antoilouis/netbert
|
ccd37ef8a1727557de74498132eea24db2135940
|
[
"MIT"
] | null | null | null |
scripts/experiments/text_classification/train.py
|
antoilouis/netbert
|
ccd37ef8a1727557de74498132eea24db2135940
|
[
"MIT"
] | null | null | null |
import json
import argparse
import sys
import time
import datetime
import random
import os
import itertools
import statistics
from tqdm import tqdm
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, matthews_corrcoef
from sklearn.utils import resample
from transformers import BertTokenizer, BertForSequenceClassification, BertConfig
from transformers import AdamW, get_linear_schedule_with_warmup
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
def parse_arguments():
"""
Parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path",
type=str,
required=True,
help="Path to pre-trained model or shortcut name",
)
parser.add_argument("--do_train",
action='store_true',
help="Whether to launch training.",
)
parser.add_argument("--do_val",
action='store_true',
help="Whether to do validation on the model during training.",
)
parser.add_argument("--do_test",
action='store_true',
help="Whether to do testing on the model after training.",
)
parser.add_argument("--filepath",
default=None,
type=str,
help="Path of the file containing the sentences to encode.",
)
parser.add_argument("--output_dir",
default=None,
type=str,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--cache_dir",
default='/raid/antoloui/Master-thesis/_cache/',
type=str,
help="Where do you want to store the pre-trained models downloaded from s3.",
)
parser.add_argument("--num_labels",
required=True,
type=int,
help="Number of classification labels.",
)
parser.add_argument('--test_percent',
default=0.1,
type=float,
help='Percentage of available data to use for val/test dataset ([0,1]).',
)
parser.add_argument("--seed",
default=42,
type=int,
help="Random seed for initialization.",
)
parser.add_argument("--batch_size",
default=32,
type=int,
help="Total batch size. For fine-tuning BERT on a specific task, the authors recommend a batch size of 16 or 32 per GPU/CPU.",
)
parser.add_argument("--num_epochs",
default=6,
type=int,
help="Total number of training epochs to perform. Authors recommend 2,3 or 4.",
)
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam. The authors recommend 5e-5, 3e-5 or 2e-5."
)
parser.add_argument("--adam_epsilon",
default=1e-6,
type=float,
help="Epsilon for Adam optimizer.",
)
parser.add_argument("--gpu_id",
default=None,
type=int,
help="Id of the GPU to use if multiple GPUs available.",
)
parser.add_argument("--logging_steps",
default=10,
type=int,
help="Log every X updates steps.",
)
parser.add_argument("--balanced",
action='store_true',
help="Should the training dataset be balanced or not.",
)
parser.add_argument("--do_compare",
action='store_true',
help="Whether to evaluate the model on BERT predictions (BERT must have been tested before).",
)
arguments, _ = parser.parse_known_args()
return arguments
def format_time(elapsed):
"""
Takes a time in seconds and returns a string hh:mm:ss
"""
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def set_seed(seed):
"""
Set seed.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_data(args, interest_classes=None):
"""
Filepath must be a csv file with 2 columns:
- First column is a set of sentences;
- Second column are the labels (strings) associated to the sentences.
NB:
- The delimiter is a comma;
- The csv file must have a header;
- The first column is the index column;
"""
if args.filepath is not None:
filepath = args.filepath
else:
print("Error: No data file provided.")
sys.exit()
# Load the dataset into a pandas dataframe.
df = pd.read_csv(filepath, delimiter=',', index_col=0)
# Rename columns.
df.columns = ['Sentence', 'Class']
# Keep only rows with class of interest.
if interest_classes is not None:
df = df[df.Class.isin(interest_classes)]
# Deal with duplicates.
df.drop_duplicates(subset=['Sentence', 'Class'], keep='first', inplace=True) # For duplicated queries with same class, keep first instance.
df.drop_duplicates(subset=['Sentence'], keep=False, inplace=True) # For duplicated queries with different classes, remove them.
df.reset_index(drop=True, inplace=True)
# Create a balanced dataset.
if args.balanced:
# Get the maximum number of samples of the smaller class.
# Note that the classes with under 1500 samples are not taken into account.
count = df['Class'].value_counts()
count = count[count > 1500]
nb_samples = min(count)
# Randomly select 'nb_samples' for all classes.
balanced_df = pd.DataFrame(columns=['Sentence', 'Class'])
for i, cat in enumerate(count.index.tolist()):
tmp_df = df[df['Class']==cat].sample(n=nb_samples, replace=False, random_state=2)
balanced_df = pd.concat([balanced_df,tmp_df], ignore_index=True)
df = balanced_df.copy(deep=True)
# Add categories ids column.
categories = df.Class.unique()
df['Class_id'] = df.apply(lambda row: np.where(categories == row.Class)[0][0], axis=1)
# Save mapping between class and id.
mapping = dict(enumerate(categories))
with open(os.path.join(args.output_dir, 'map_classes.json'), 'w') as f:
json.dump(mapping, f)
return df, categories
def tokenize_sentences(tokenizer, df):
"""
Tokenize all sentences in dataset with BertTokenizer.
"""
# Tokenize each sentence of the dataset.
tokenized = df['Sentence'].apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
lengths = [len(i) for i in tokenized]
max_len = max(lengths) if max(lengths) <= 512 else 512
# Pad and truncate our sequences so that they all have the same length, max_len.
print(' - Max sentence length: {}'.format(max_len))
print(' - Padding/truncating all sentences to {} tokens...'.format(max_len))
tokenized = pad_sequences(tokenized, maxlen=max_len, dtype="long",
value=0, truncating="post", padding="post") # "post" indicates that we want to pad and truncate at the end of the sequence.
return tokenized
def create_masks(tokenized):
"""
Given a list of tokenized sentences, create the corresponding attention masks.
- If a token ID is 0, then it's padding, set the mask to 0.
- If a token ID is > 0, then it's a real token, set the mask to 1.
"""
attention_masks = []
for sent in tokenized:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
return attention_masks
def split_data(args, dataset):
"""
Split dataset to train/val/test sets.
"""
tokenized, class_ids, attention_masks, sentences = dataset
if args.test_percent < 0.0 or args.test_percent > 1.0:
print("Error: '--test_percent' must be between [0,1].")
sys.exit()
# Split in train/test sets.
Train_inputs, test_inputs, Train_labels, test_labels = train_test_split(tokenized, class_ids, random_state=args.seed, test_size=args.test_percent)
Train_masks, test_masks, _, _ = train_test_split(attention_masks, class_ids, random_state=args.seed, test_size=args.test_percent)
Train_sentences, test_sentences, _, _ = train_test_split(sentences, class_ids, random_state=args.seed, test_size=args.test_percent)
# Further split train set to train/val sets.
val_percent = args.test_percent/(1-args.test_percent)
train_inputs, val_inputs, train_labels, val_labels = train_test_split(Train_inputs, Train_labels, random_state=args.seed, test_size=val_percent)
train_masks, val_masks, _, _ = train_test_split(Train_masks, Train_labels, random_state=args.seed, test_size=val_percent)
train_sentences, val_sentences, _, _ = train_test_split(Train_sentences, Train_labels, random_state=args.seed, test_size=val_percent)
return (train_inputs, train_labels, train_masks, train_sentences), (val_inputs, val_labels, val_masks, val_sentences), (test_inputs, test_labels, test_masks, test_sentences)
def combine_datasets(train_set, val_set):
"""
Combine two datasets in one.
"""
# Extract individual arrays.
train_inputs, train_labels, train_masks, train_sentences = train_set
val_inputs, val_labels, val_masks, val_sentences = val_set
# Combine respective arrays.
combined_inputs = train_inputs + val_inputs
combined_labels = train_labels + val_labels
combined_masks = train_masks + val_masks
combined_sentences = train_sentences + val_sentences
combined_set = (combined_inputs, combined_labels, combined_masks, combined_sentences)
return combined_set
def create_dataloader(dataset, batch_size, training_data=True):
"""
"""
inputs, labels, masks, _ = dataset
# Convert all inputs and labels into torch tensors, the required datatype for our model.
inputs = torch.tensor(inputs)
labels = torch.tensor(labels)
masks = torch.tensor(masks)
# Create the DataLoader.
data = TensorDataset(inputs, masks, labels)
if training_data:
sampler = RandomSampler(data)
else:
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size=batch_size)
return data, sampler, dataloader
def compute_metrics(preds, labels, classes):
"""
Compute metrics for the classification task.
"""
# Create dict to store scores.
result = dict()
result['Macro_Average'] = {}
result['Weighted_Average'] = {}
# Averaging methods
#------------------
# - "macro" simply calculates the mean of the binary metrics, giving equal weight to each class.
# - "weighted" accounts for class imbalance by computing the average of binary metrics in which each class’s score is weighted by its presence in the true data sample.
# - "micro" gives each sample-class pair an equal contribution to the overall metric.
result['Macro_Average']['Precision'] = precision_score(y_true=labels, y_pred=preds, average='macro')
result['Macro_Average']['Recall'] = recall_score(y_true=labels, y_pred=preds, average='macro')
result['Macro_Average']['F1'] = f1_score(y_true=labels, y_pred=preds, average='macro')
result['Weighted_Average']['Precision'] = precision_score(y_true=labels, y_pred=preds, average='weighted')
result['Weighted_Average']['Recall'] = recall_score(y_true=labels, y_pred=preds, average='weighted')
result['Weighted_Average']['F1'] = f1_score(y_true=labels, y_pred=preds, average='weighted')
# Accuracy.
result['Accuracy'] = accuracy_score(y_true=labels, y_pred=preds) #accuracy = (preds==labels).mean()
# Matthews correlation coefficient (MCC): used for imbalanced classes.
result['MCC'] = matthews_corrcoef(y_true=labels, y_pred=preds)
# Confusion matrix.
conf_matrix = confusion_matrix(y_true=labels, y_pred=preds, normalize='true', labels=range(len(classes)))
result['conf_matrix'] = conf_matrix.tolist()
return result
def plot_confusion_matrix(cm, classes, outdir):
"""
This function prints and plots the confusion matrix.
"""
cm = np.array(cm)
df_cm = pd.DataFrame(cm, index=classes, columns=classes)
plt.figure(figsize = (10,7))
ax = sn.heatmap(df_cm, annot=True, cmap='coolwarm')
ax.set_xticklabels(ax.get_xticklabels(), fontsize=8, horizontalalignment='right', rotation=45)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=8)
plt.title('Confusion matrix', fontsize=18)
plt.ylabel('True labels', fontsize=12)
plt.xlabel('Predicted labels', fontsize=12)
plt.tight_layout()
plt.savefig(outdir+"confusion_matrix.pdf")
plt.close()
return
def analyze_predictions(preds, labels, sentences):
"""
Analyze more deeply the right and wrong predictions of the model on the dev set.
"""
# Get the wrong predictions.
indices_wrong = np.where(preds!=labels)[0]
sentences_wrong = [sentences[i] for i in indices_wrong]
labels_wrong = [labels[i] for i in indices_wrong]
preds_wrong = [preds[i] for i in indices_wrong]
df_wrong = pd.DataFrame(list(zip(sentences_wrong, labels_wrong, preds_wrong)),
columns =['Sentence', 'Class_id', 'Prediction_id'])
# Get the right predictions.
indices_right = np.where(preds==labels)[0]
sentences_right = [sentences[i] for i in indices_right]
labels_right = [labels[i] for i in indices_right]
preds_right = [preds[i] for i in indices_right]
df_right = pd.DataFrame(list(zip(sentences_right, labels_right, preds_right)),
columns =['Sentence', 'Class_id', 'Prediction_id'])
return df_wrong, df_right
def train(args, model, tokenizer, categories, train_set, val_set):
"""
"""
tb_writer = SummaryWriter() # Create tensorboard summarywriter.
if not args.do_val:
print("Training on train/val sets combined...")
train_set = combine(train_set, val_set)
print(" - Total samples: {}".format(len(train_set[0])))
else:
print("Training on train set...")
print(" - Total samples: {}".format(len(train_set[0])))
# Creating training dataloader.
train_data, train_sampler, train_dataloader = create_dataloader(train_set, args.batch_size, training_data=True)
# Setting up Optimizer & Learning Rate Scheduler.
optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=args.adam_epsilon)
total_steps = len(train_dataloader) * args.num_epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
t = time.time()
for epoch_i in range(0, args.num_epochs):
print('\n======== Epoch {:} / {:} ========'.format(epoch_i + 1, args.num_epochs))
# Measure how long the training epoch takes.
t0 = time.time()
# Put the model into training mode.
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Unpack this training batch from our dataloader.
# As we unpack the batch, we'll also copy each tensor to the GPU using the `to` method.
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(args.device)
b_input_mask = batch[1].to(args.device)
b_labels = batch[2].to(args.device)
# Always clear any previously calculated gradients before performing a backward pass.
model.zero_grad()
# Perform a forward pass. This will return the loss (rather than the model output) because we have provided the `labels`.
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
loss = outputs[0] # The call to `model` always returns a tuple, so we need to pull the loss value out of the tuple. Note that `loss` is a Tensor containing a single value.
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
# Accumulate the training loss over all of the batches so that we can calculate the average loss at the end. The `.item()` function just returns the Python value from the tensor.
tr_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0. This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Update global step.
global_step += 1
# Progress update every 'logging_steps' batches.
if args.logging_steps > 0 and step != 0 and step % args.logging_steps == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Compute average training loss over the last 'logging_steps'. Write it to Tensorboard.
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
tb_writer.add_scalar('Train/Loss', loss_scalar, global_step)
logging_loss = tr_loss
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}. Training loss: {:.2f}'.format(step, len(train_dataloader), elapsed, loss_scalar))
print(" Training epoch took: {:}\n".format(format_time(time.time() - t0)))
if args.do_val:
print("Running validation on val set...")
t0 = time.time()
result, df_wrong, df_right = evaluate(args, model, categories, val_set)
# Write results to tensorboard.
tb_writer.add_scalar('Val/Accuracy', result['Accuracy'], epoch_i + 1)
tb_writer.add_scalar('Val/MCC', result['MCC'], epoch_i + 1)
tb_writer.add_scalar('Val/MacroAvg/Recall', result['Macro_Average']['Recall'], epoch_i + 1)
tb_writer.add_scalar('Val/MacroAvg/Precision', result['Macro_Average']['Precision'], epoch_i + 1)
tb_writer.add_scalar('Val/MacroAvg/F1', result['Macro_Average']['F1'], epoch_i + 1)
tb_writer.add_scalar('Val/WeightedAvg/Recall', result['Weighted_Average']['Recall'], epoch_i + 1)
tb_writer.add_scalar('Val/WeightedAvg/Precision', result['Weighted_Average']['Precision'], epoch_i + 1)
tb_writer.add_scalar('Val/WeightedAvg/F1', result['Weighted_Average']['F1'], epoch_i + 1)
# Print results.
print(" * Accuracy: {0:.6f}".format(result['Accuracy']))
print(" * MCC: {0:.6f}".format(result['MCC']))
print(" Macro Average")
print(" * Recall: {0:.6f}".format(result['Macro_Average']['Recall']))
print(" * Precision: {0:.6f}".format(result['Macro_Average']['Precision']))
print(" * F1 score: {0:.6f}".format(result['Macro_Average']['F1']))
print(" Weighted Average")
print(" * Recall: {0:.6f}".format(result['Weighted_Average']['Recall']))
print(" * Precision: {0:.6f}".format(result['Weighted_Average']['Precision']))
print(" * F1 score: {0:.6f}".format(result['Weighted_Average']['F1']))
print(" Validation took: {:}\n".format(format_time(time.time() - t0)))
print("Training complete! Took: {}\n".format(format_time(time.time() - t)))
print("Saving model to {}...\n.".format(args.output_dir))
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
return
def evaluate(args, model, categories, evaluation_set):
"""
"""
# Creating evaluation dataloader.
evaluation_data, evaluation_sampler, evaluation_dataloader = create_dataloader(evaluation_set, args.batch_size, training_data=False)
evaluation_sentences = evaluation_set[3]
# Put the model in evaluation mode.
model.eval()
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in evaluation_dataloader:
# Add batch to GPU.
b_input_ids, b_input_mask, b_labels = tuple(t.to(args.device) for t in batch)
# Telling the model not to compute or store gradients (saving memory and speeding up evaluation).
with torch.no_grad():
# Forward pass, calculate logit predictions. This will return the logits rather than the loss because we have not provided labels.
# token_type_ids is the same as the "segment ids", which differentiates sentence 1 and 2 in 2-sentence tasks.
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# Get the "logits" output by the model. The "logits" are the output values prior to applying an activation function like the softmax.
logits = outputs[0]
# Move logits and labels to CPU and store them.
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = b_labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, b_labels.detach().cpu().numpy(), axis=0)
# Track the number of batches
nb_eval_steps += 1
# Take the max predicitions.
preds = np.argmax(preds, axis=1)
# Compute performance.
result = compute_metrics(preds, out_label_ids, categories)
# Get wrong and right predictions.
df_wrong, df_right = analyze_predictions(preds, out_label_ids, evaluation_sentences)
return result, df_wrong, df_right
def create_bootstrap_sample(dataset):
"""
"""
# Extract lists.
tokenized, class_ids, attention_masks, sentences = dataset
# Get a sample.
sample_tokenized, sample_class_ids, sample_attention_masks, sample_sentences = resample(tokenized,
class_ids,
attention_masks,
sentences,
replace=True)
# Concat in tuple.
bootstrapped_sample = (sample_tokenized, sample_class_ids, sample_attention_masks, sample_sentences)
return bootstrapped_sample
def bootstrap_evaluation(args, model, categories, test_set, iters):
"""
"""
macro_recalls = []
macro_precisions = []
macro_f1s = []
weighted_recalls = []
weighted_precisions = []
weighted_f1s = []
mccs = []
accuracies = []
# Run bootstrapping.
for i in tqdm(range(iters)):
# Create bootstrap sample from test set.
bootstrap_sample = create_bootstrap_sample(test_set)
# Evaluate on sample.
result, _, _ = evaluate(args, model, categories, bootstrap_sample)
# Extract results.
macro_recalls.append(result['Macro_Average']['Recall'])
macro_precisions.append(result['Macro_Average']['Precision'])
macro_f1s.append(result['Macro_Average']['F1'])
weighted_recalls.append(result['Weighted_Average']['Recall'])
weighted_precisions.append(result['Weighted_Average']['Precision'])
weighted_f1s.append(result['Weighted_Average']['F1'])
mccs.append(result['MCC'])
accuracies.append(result['Accuracy'])
# Create dictionary to save statistics on metrics.
stats = dict()
stats['macro-recall'] = {}
stats['macro-precision'] = {}
stats['macro-f1'] = {}
stats['weighted-recall'] = {}
stats['weighted-precision'] = {}
stats['weighted-f1'] = {}
stats['mcc'] = {}
stats['accuracy'] = {}
# Compute stats.
stats['macro-recall']['mean'] = statistics.mean(macro_recalls)
stats['macro-recall']['std'] = statistics.pstdev(macro_recalls)
stats['macro-recall']['var'] = statistics.pvariance(macro_recalls)
stats['macro-precision']['mean'] = statistics.mean(macro_precisions)
stats['macro-precision']['std'] = statistics.pstdev(macro_precisions)
stats['macro-precision']['var'] = statistics.pvariance(macro_precisions)
stats['macro-f1']['mean'] = statistics.mean(macro_f1s)
stats['macro-f1']['std'] = statistics.pstdev(macro_f1s)
stats['macro-f1']['var'] = statistics.pvariance(macro_f1s)
stats['weighted-recall']['mean'] = statistics.mean(weighted_recalls)
stats['weighted-recall']['std'] = statistics.pstdev(weighted_recalls)
stats['weighted-recall']['var'] = statistics.pvariance(weighted_recalls)
stats['weighted-precision']['mean'] = statistics.mean(weighted_precisions)
stats['weighted-precision']['std'] = statistics.pstdev(weighted_precisions)
stats['weighted-precision']['var'] = statistics.pvariance(weighted_precisions)
stats['weighted-f1']['mean'] = statistics.mean(weighted_f1s)
stats['weighted-f1']['std'] = statistics.pstdev(weighted_f1s)
stats['weighted-f1']['var'] = statistics.pvariance(weighted_f1s)
stats['mcc']['mean'] = statistics.mean(mccs)
stats['mcc']['std'] = statistics.pstdev(mccs)
stats['mcc']['var'] = statistics.pvariance(mccs)
stats['accuracy']['mean'] = statistics.mean(accuracies)
stats['accuracy']['std'] = statistics.pstdev(accuracies)
stats['accuracy']['var'] = statistics.pvariance(accuracies)
return stats
def evaluate_bert_preds(args, model, tokenizer, categories):
"""
Temporary hard-coded evaluation on predictions from Bert-base.
"""
# Load queries that Bert-base classified correclty.
df_bert_right_preds = pd.read_csv('./output/bert_base_cased/eval_right_preds.csv', delimiter=',', index_col=0)
df_bert_right_preds['Class_id'] = df_bert_right_preds.apply(lambda row: np.where(categories == row.Class)[0][0], axis=1)
bert_right_preds_tokenized = tokenize_sentences(tokenizer, df_bert_right_preds)
bert_right_preds_attention_masks = create_masks(bert_right_preds_tokenized)
bert_right_preds_dataset = (bert_right_preds_tokenized, df_bert_right_preds.Class_id.values, bert_right_preds_attention_masks, df_bert_right_preds.Sentence.values)
result, df_wrong, df_right = evaluate(args, model, bert_right_preds_dataset, categories)
df_wrong.to_csv(os.path.join(args.output_dir, 'bert_right_netbert_wrong.csv'))
df_right.to_csv(os.path.join(args.output_dir, 'bert_right_netbert_right.csv'))
with open(os.path.join(args.output_dir, 'scores_bert_right_preds.json'), 'w+') as f:
json.dump(result, f)
# Load queries that Bert-base classified wrongly.
df_bert_wrong_preds = pd.read_csv('./output/bert_base_cased/eval_wrong_preds.csv', delimiter=',', index_col=0)
df_bert_wrong_preds['Class_id'] = df_bert_wrong_preds.apply(lambda row: np.where(categories == row.Class)[0][0], axis=1)
bert_wrong_preds_tokenized = tokenize_sentences(tokenizer, df_bert_wrong_preds)
bert_wrong_preds_attention_masks = create_masks(bert_wrong_preds_tokenized)
bert_wrong_preds_dataset = (bert_wrong_preds_tokenized, df_bert_wrong_preds.Class_id.values, bert_wrong_preds_attention_masks, df_bert_wrong_preds.Sentence.values)
result, df_wrong, df_right = evaluate(args, model, bert_wrong_preds_dataset, categories)
df_wrong.to_csv(os.path.join(args.output_dir, 'bert_wrong_netbert_wrong.csv'))
df_right.to_csv(os.path.join(args.output_dir, 'bert_wrong_netbert_right.csv'))
with open(os.path.join(args.output_dir, 'scores_bert_wrong_preds.json'), 'w+') as f:
json.dump(result, f)
return
def main(args):
"""
"""
# Create output dir if none mentioned.
if args.output_dir is None:
model_name = os.path.splitext(os.path.basename(args.model_name_or_path))[0]
args.output_dir = "./output/" + model_name + '/'
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print("\n========================================")
print(' MODEL ')
print("========================================")
print("Loading BertForSequenceClassification model...")
model = BertForSequenceClassification.from_pretrained(
args.model_name_or_path, # Use the 12-layer BERT model, with a cased vocab.
num_labels = args.num_labels, # The number of output labels
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
cache_dir = args.cache_dir,
)
print('Loading BertTokenizer...')
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=False)
print("Setting up CUDA & GPU...")
if torch.cuda.is_available():
if args.gpu_id is not None:
torch.cuda.set_device(args.gpu_id)
args.n_gpu = 1
print(" - GPU {} {} will be used.".format(torch.cuda.get_device_name(args.gpu_id), args.gpu_id))
else:
args.n_gpu = torch.cuda.device_count()
gpu_ids = list(range(0, args.n_gpu))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=gpu_ids, output_device=gpu_ids[-1])
print(" - GPU(s) {} will be used.".format(str(gpu_ids)))
args.device = torch.device("cuda")
else:
args.device = torch.device("cpu")
args.n_gpu = 0
print(" - No GPU available, using the CPU instead.")
model.to(args.device)
# Set the seed value all over the place to make this reproducible.
set_seed(args.seed)
print("\n========================================")
print(' DATA ')
print("========================================")
print("Loading data...")
classes_of_interest = ['Data Sheets',
'Configuration (Guides, Examples & TechNotes)',
'Install & Upgrade Guides',
'Release Notes',
'End User Guides']
df, categories = load_data(args, classes_of_interest)
sentences = df.Sentence.values
classes = df.Class.values
class_ids = df.Class_id.values
print(' - Number of sentences: {:,}'.format(df.shape[0]))
print(' - Number of doc types: {:,}'.format(len(categories)))
for i, cat in enumerate(categories):
print(" * {} : {}".format(cat, i))
print("Tokenizing sentences...")
tokenized = tokenize_sentences(tokenizer, df)
attention_masks = create_masks(tokenized)
print("Splitting dataset...")
dataset = (tokenized, class_ids, attention_masks, sentences)
train_set, val_set, test_set = split_data(args, dataset)
print(" - Samples in train set: {}".format(len(train_set[0])))
train_ids = Counter(train_set[1]).keys()
train_ids_freq = Counter(train_set[1]).values()
for i, freq in zip(train_ids, train_ids_freq):
print(" * {} : {}".format(i, freq))
print(" - Samples in val set: {}".format(len(val_set[0])))
val_ids = Counter(val_set[1]).keys()
val_ids_freq = Counter(val_set[1]).values()
for i, freq in zip(val_ids, val_ids_freq):
print(" * {} : {}".format(i, freq))
print(" - Samples in test set: {}".format(len(test_set[0])))
test_ids = Counter(test_set[1]).keys()
test_ids_freq = Counter(test_set[1]).values()
for i, freq in zip(test_ids, test_ids_freq):
print(" * {} : {}".format(i, freq))
if args.do_train:
print("\n========================================")
print(' TRAINING ')
print("========================================")
model = train(args, model, tokenizer, categories, train_set, val_set)
if args.do_test:
print("\n========================================")
print(' TESTING ')
print("========================================")
print("Evaluation on entire test set...")
result, df_wrong, df_right = evaluate(args, model, categories, test_set)
plot_confusion_matrix(result['conf_matrix'], categories, args.output_dir)
df_wrong.to_csv(os.path.join(args.output_dir, 'preds_wrong.csv'))
df_right.to_csv(os.path.join(args.output_dir, 'preds_right.csv'))
with open(os.path.join(args.output_dir, 'test_set_scores.json'), 'w+') as f:
json.dump(result, f)
print(" * Accuracy: {0:.6f}".format(result['Accuracy']))
print(" * MCC: {0:.6f}".format(result['MCC']))
print(" Macro Average")
print(" * Recall: {0:.6f}".format(result['Macro_Average']['Recall']))
print(" * Precision: {0:.6f}".format(result['Macro_Average']['Precision']))
print(" * F1 score: {0:.6f}".format(result['Macro_Average']['F1']))
print(" Weighted Average")
print(" * Recall: {0:.6f}".format(result['Weighted_Average']['Recall']))
print(" * Precision: {0:.6f}".format(result['Weighted_Average']['Precision']))
print(" * F1 score: {0:.6f}".format(result['Weighted_Average']['F1']))
print("Evaluation on bootstrap samples from test set...")
stats = bootstrap_evaluation(args, model, categories, test_set, 100)
with open(os.path.join(args.output_dir, 'bootstrap_scores.json'), 'w+') as f:
json.dump(stats, f)
if args.do_compare:
print("Evaluation on BERT predictions...")
evaluate_bert_preds(args, model, tokenizer, categories)
if __name__=="__main__":
args = parse_arguments()
main(args)
| 44.366215
| 190
| 0.619577
|
4a16f216eebe66d044227543804c52f98fcd716c
| 18
|
py
|
Python
|
PyBlokusTools/pyblokustools/version.py
|
HenrikThoroe/SWC-2021
|
8e7eee25e3a6fda7e863591b05fa161d8a2ebc78
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
PyBlokusTools/pyblokustools/version.py
|
HenrikThoroe/SWC-2021
|
8e7eee25e3a6fda7e863591b05fa161d8a2ebc78
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
PyBlokusTools/pyblokustools/version.py
|
HenrikThoroe/SWC-2021
|
8e7eee25e3a6fda7e863591b05fa161d8a2ebc78
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
VERSION = "1.3.4"
| 9
| 17
| 0.555556
|
4a16f30b585aa05308a78fa566522aba1b373df7
| 92
|
py
|
Python
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/scripting/util.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from apptools.scripting.util import *
| 23
| 38
| 0.836957
|
4a16f354a1b6177485c8d6aa03310c969a1d5206
| 673
|
py
|
Python
|
qwerty-hertz/_engine/command_list.py
|
ismaelithalo/QwertyHertz
|
38b37d1d06fae41962dd3b114acc1cf4e9bb5b28
|
[
"MIT"
] | null | null | null |
qwerty-hertz/_engine/command_list.py
|
ismaelithalo/QwertyHertz
|
38b37d1d06fae41962dd3b114acc1cf4e9bb5b28
|
[
"MIT"
] | null | null | null |
qwerty-hertz/_engine/command_list.py
|
ismaelithalo/QwertyHertz
|
38b37d1d06fae41962dd3b114acc1cf4e9bb5b28
|
[
"MIT"
] | null | null | null |
# coding: iso-8859-1
with open('script/command_atalho_list.txt') as file:
print("\nComandos disponíveis:")
comandos = file.read().split('\n')
for x in range(len(comandos)):
comman = comandos[x].split(';')
print("\n")
for i in range(len(comman)):
if(i > 0):
if (i == 1):
print("Numero de teclas: {}".format(comman[i]))
else:
print("{}: {}".format((i-1),comman[i]))
else:
print("Comando: {}".format(comman[i]))
print("\nAtalhos:")
print("\n")
# print("Teste")
| 37.388889
| 71
| 0.435364
|
4a16f362fb19ab06e52212fecb0699a89dd32e33
| 1,812
|
py
|
Python
|
PythonProjects/01-IntroductionToPython/src/m7_summary.py
|
csse120/csse120-public
|
36a92f0f4300def62ff091a8e70756e24beedd20
|
[
"MIT"
] | null | null | null |
PythonProjects/01-IntroductionToPython/src/m7_summary.py
|
csse120/csse120-public
|
36a92f0f4300def62ff091a8e70756e24beedd20
|
[
"MIT"
] | null | null | null |
PythonProjects/01-IntroductionToPython/src/m7_summary.py
|
csse120/csse120-public
|
36a92f0f4300def62ff091a8e70756e24beedd20
|
[
"MIT"
] | null | null | null |
"""
An exercise that summarizes what you have learned in this Session.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Derek Whitley, their colleagues, and PUT_YOUR_NAME_HERE.
"""
###############################################################################
# TODO: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# TODO: 2.
# Write code that accomplishes the following (and ONLY the following),
# in the order listed:
# _
# - Constructs a SimpleTurtle with a "blue" Pen.
# - Makes the SimpleTurtle go straight UP 200 pixels.
# - Makes the SimpleTurtle lift its pen UP
# (so that the next movements do NOT leave a "trail")
# HINT: Use the "dot trick" to figure out how to do this.
# - Makes the SimpleTurtle go to the Point at (100, -40).
# - Makes the SimpleTurtle put its pen DOWN
# (so that the next movements will return to leaving a "trail").
# - Makes the SimpleTurtle's pen have color "green" and thickness 10.
# - Makes the SimpleTurtle go 150 pixels straight DOWN.
# _
# Don't forget to:
# - import rosegraphics and construct a TurtleWindow
# [remember the required PARENTHESES for constructing an object!]
# at the BEGINNING of your code, and to
# - ask your TurtleWindow to close_on_mouse_click
# as the LAST line of your code.
# See the beginning and end of m5e_loopy_turtles for an example.
# _
# As always, test by running the module.
# As always, COMMIT-and-PUSH when you are done with this module.
###############################################################################
| 46.461538
| 79
| 0.565673
|
4a16f3c8c357183c223f6daea33c80356c112294
| 398
|
py
|
Python
|
backup/freq_study/scripts/indexize.py
|
stefanobellelli/nonce3vec
|
50c17cea552afd85e78ebc12f97898995dafc3a8
|
[
"MIT"
] | null | null | null |
backup/freq_study/scripts/indexize.py
|
stefanobellelli/nonce3vec
|
50c17cea552afd85e78ebc12f97898995dafc3a8
|
[
"MIT"
] | 10
|
2018-07-18T03:15:23.000Z
|
2018-09-13T16:19:06.000Z
|
backup/freq_study/scripts/indexize.py
|
stefanobellelli/nonce3vec
|
50c17cea552afd85e78ebc12f97898995dafc3a8
|
[
"MIT"
] | null | null | null |
import sys, pickle
from lib import vecindex
src = 'source_logs/' + sys.argv[1] + '.log'
out = 'partial_indexes/' + sys.argv[1] + '_index.txt'
with open(src, 'r', errors='replace') as f:
f = f.read().replace('\n', '')
d = eval(f)
_, word = vecindex(d, 'word', 50000)
_, pos = vecindex(d, 'pos', 50000)
with open(out, 'w', errors='replace') as f:
f.write('WORD:\n' + word + '\nPOS:\n' + pos)
| 26.533333
| 53
| 0.600503
|
4a16f40d3633a61b3057ac3b699de9ebe7eadabb
| 511
|
py
|
Python
|
corehq/apps/data_interfaces/migrations/0013_createscheduleinstanceactiondefinition_scheduler_module_info.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/data_interfaces/migrations/0013_createscheduleinstanceactiondefinition_scheduler_module_info.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/data_interfaces/migrations/0013_createscheduleinstanceactiondefinition_scheduler_module_info.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
# Generated by Django 1.10.7 on 2017-07-27 15:40
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0012_createscheduleinstanceactiondefinition_reset_case_property_name'),
]
operations = [
migrations.AddField(
model_name='createscheduleinstanceactiondefinition',
name='scheduler_module_info',
field=jsonfield.fields.JSONField(default=dict),
),
]
| 24.333333
| 100
| 0.690802
|
4a16f4340c0a4d139d9f62d4f6c33b48a2a37c5e
| 50,930
|
py
|
Python
|
test/run_test.py
|
dreiss/pytorch
|
46d27a53fe900ae53348904f6d4f90546a9d3009
|
[
"Intel"
] | null | null | null |
test/run_test.py
|
dreiss/pytorch
|
46d27a53fe900ae53348904f6d4f90546a9d3009
|
[
"Intel"
] | null | null | null |
test/run_test.py
|
dreiss/pytorch
|
46d27a53fe900ae53348904f6d4f90546a9d3009
|
[
"Intel"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import copy
import csv
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats.s3_stat_parser import (
get_previous_reports_for_branch,
get_previous_reports_for_pr,
Report,
HAVE_BOTO3)
from tools.testing.test_selections import calculate_shards
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'distributed/test_store',
'distributed/test_pg_wrapper',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_sparse_csr',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
'distributed/_sharding_spec/test_sharding_spec',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'distributed/test_store',
'distributed/test_pg_wrapper',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times.json'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
ENABLE_PR_HISTORY_REORDERING = bool(os.environ.get("ENABLE_PR_HISTORY_REORDERING", "0") == "1")
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
# Dictionary matching test modules (in TESTS) to lists of test cases (within that test_module) that would be run when
# options.run_specified_test_cases is enabled.
# For example:
# {
# "test_nn": ["test_doubletensor_avg_pool3d", "test_share_memory", "test_hook_requires_grad"],
# ...
# }
# then for test_nn.py, we would ONLY run test_doubletensor_avg_pool3d, test_share_memory, and test_hook_requires_grad.
SPECIFIED_TEST_CASES_DICT: Dict[str, List[str]] = {}
# The file from which the SPECIFIED_TEST_CASES_DICT will be filled, a CSV of test cases that would be run when
# options.run_specified_test_cases is enabled.
SPECIFIED_TEST_CASES_FILE: str = '.pytorch_specified_test_cases.csv'
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("JOB_BASE_NAME", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test') or job.endswith('-test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build') or job.endswith('-build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# This is no longer needed after https://github.com/pytorch/pytorch/pull/60604,
# TODO remove this once viable/strict move pass the merged commit.
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/viable/strict', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('JOB_BASE_NAME', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'JOB_BASE_NAME': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_test_case_args(test_module, using_pytest) -> List[str]:
args = []
# if test_module not specified or specified with '__all__' then run all tests
if test_module not in SPECIFIED_TEST_CASES_DICT or '__all__' in SPECIFIED_TEST_CASES_DICT[test_module]:
return args
if using_pytest:
args.append('-k')
args.append(' or '.join(SPECIFIED_TEST_CASES_DICT[test_module]))
else:
for test in SPECIFIED_TEST_CASES_DICT[test_module]:
args.append('-k')
args.append(test)
return args
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
# TODO: move this logic into common_utils.py instead of passing in "-k" individually
# The following logic for running specified tests will only run for non-distributed tests, as those are dispatched
# to test_distributed and not run_test (this function)
if options.run_specified_test_cases:
unittest_args.extend(get_test_case_args(test_module, 'pytest' in executable))
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
from shutil import copyfile
test_module = 'test_cpp_extensions_aot' + ('_ninja' if use_ninja else '_no_ninja')
copyfile(test_directory + '/test_cpp_extensions_aot.py', test_directory + '/' + test_module + '.py')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
if os.path.exists(test_directory + '/' + test_module + '.py'):
os.remove(test_directory + '/' + test_module + '.py')
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests must be a part of the TESTS list defined in run_test.py')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
parser.add_argument(
'--run-specified-test-cases',
nargs='?',
type=str,
const=SPECIFIED_TEST_CASES_FILE,
help='load specified test cases file dumped from previous OSS CI stats, format CSV. '
' If all test cases should run for a <test_module> please add a single row: \n'
' test_filename,test_case_name\n'
' ...\n'
' <test_module>,__all__\n'
' ...\n'
'how we use the stats will be based on option "--use-specified-test-cases-by".'
)
parser.add_argument(
'--use-specified-test-cases-by',
type=str,
choices=['include', 'bring-to-front'],
default='include',
help='used together with option "--run-specified-test-cases". When specified test case '
'file is set, this option allows the user to control whether to only run the specified test '
'modules or to simply bring the specified modules to front and also run the remaining '
'modules. Note: regardless of this option, we will only run the specified test cases '
' within a specified test module. For unspecified test modules with the bring-to-front '
'option, all test cases will be run, as one may expect.',
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
if options.run_specified_test_cases:
if options.use_specified_test_cases_by == 'include':
options.include = list(SPECIFIED_TEST_CASES_DICT.keys())
elif options.use_specified_test_cases_by == 'bring-to-front':
options.bring_to_front = list(SPECIFIED_TEST_CASES_DICT.keys())
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less than or equal to total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def load_specified_test_cases(filename: str) -> None:
if not os.path.exists(filename):
print(f'Could not find specified tests file: {filename}. Proceeding with default behavior.')
return
# The below encoding is utf-8-sig because utf-8 doesn't properly handle the byte-order-mark character
with open(filename, mode='r', encoding="utf-8-sig") as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
global SPECIFIED_TEST_CASES_DICT
for row in csv_reader:
line_count += 1
if line_count == 1:
if 'test_filename' not in row or 'test_case_name' not in row:
print('Data is missing necessary columns for test specification. Proceeding with default behavior.')
return
test_filename = row['test_filename']
test_case_name = row['test_case_name']
if test_filename not in TESTS:
print(f'Specified test_filename {test_filename} not found in TESTS. Skipping.')
continue
if test_filename not in SPECIFIED_TEST_CASES_DICT:
SPECIFIED_TEST_CASES_DICT[test_filename] = []
SPECIFIED_TEST_CASES_DICT[test_filename].append(test_case_name)
print(f'Processed {line_count} test cases.')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def query_failure_test_module(reports: List[Tuple["Report", str]]) -> List[str]:
test_modules = []
if len(reports) == 0 or len(reports[0]) == 0:
return test_modules
report = reports[0][0]
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for fname, file in files.items():
contains_failure = any(
any(case['status'] == 'errored' or case['status'] == 'failed'
for _, case in suite['cases'].items())
for _, suite in file['suites'].items())
if contains_failure:
test_modules.append(fname)
return test_modules
def reorder_tests(tests: List[str]) -> List[str]:
prioritized_tests = []
# Try using historic stats from PR.
if ENABLE_PR_HISTORY_REORDERING and HAVE_BOTO3:
pr_number = os.environ.get("CIRCLE_PR_NUMBER", "")
if len(pr_number):
ci_job_prefix = get_stripped_CI_job()
s3_reports: List[Tuple["Report", str]] = get_previous_reports_for_pr(
pr_number, ci_job_prefix)
prioritized_tests = query_failure_test_module(s3_reports)
print("Prioritized test from previous CI info.")
# Using file changes priority if no stats found from previous PR.
if len(prioritized_tests) == 0:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
prioritized_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
prioritized_tests = [f[len(prefix):] for f in prioritized_tests]
prioritized_tests = [f[:-len(".py")] for f in prioritized_tests]
print("Prioritized test from test file changes.")
bring_to_front = []
the_rest = []
for test in tests:
if test in prioritized_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
if len(tests) == len(bring_to_front) + len(the_rest):
print(f"reordering tests for PR:\n"
f"prioritized: {bring_to_front}\nthe rest: {the_rest}\n")
return bring_to_front + the_rest
else:
print(f"Something went wrong in CI reordering, expecting total of {len(tests)}:\n"
f"but found prioritized: {len(bring_to_front)}\nthe rest: {len(the_rest)}\n")
return tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
specified_test_cases_filename = options.run_specified_test_cases
if specified_test_cases_filename:
print(f'Loading specified test cases to run from {specified_test_cases_filename}.')
load_specified_test_cases(specified_test_cases_filename)
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
if IS_IN_CI:
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
| 38.848207
| 121
| 0.666267
|
4a16f4a88375544c777b75f8671ff5804fc593e9
| 1,085
|
py
|
Python
|
structural-patterns/composite.py
|
someshchaturvedi/pythonic-design-patterns
|
74ef0b1ad233bdf9f75f86afa1b8874228d88429
|
[
"MIT"
] | 1
|
2018-08-02T12:16:20.000Z
|
2018-08-02T12:16:20.000Z
|
structural-patterns/composite.py
|
someshchaturvedi/pythonic-design-patterns
|
74ef0b1ad233bdf9f75f86afa1b8874228d88429
|
[
"MIT"
] | null | null | null |
structural-patterns/composite.py
|
someshchaturvedi/pythonic-design-patterns
|
74ef0b1ad233bdf9f75f86afa1b8874228d88429
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Equipment(ABC):
@abstractmethod
def get_price(self):
pass
class Composite(Equipment):
def __init__(self):
self.equipments = []
def get_price(self):
price = 0
for e in self.equipments:
price = price + e.get_price()
return price
def add(self, equipment):
self.equipments.append(equipment)
def remove(self, equipment):
self.equipments.remove(equipment)
class Leaf(Equipment):
def __init__(self, price):
self.price = price
def get_price(self):
return self.price
if __name__ == '__main__':
sim = Leaf(20)
battery = Leaf(200)
back_case = Composite()
back_case.add(sim)
back_case.add(battery)
screen = Leaf(500)
microphone = Leaf(350)
front_case = Composite()
front_case.add(screen)
front_case.add(microphone)
mobile_cover = Leaf(100)
mobile = Composite()
mobile.add(front_case)
mobile.add(back_case)
mobile.add(mobile_cover)
print(mobile.get_price())
| 18.706897
| 41
| 0.630415
|
4a16f4c27117e60d007940cab6e1c60122265dee
| 27,430
|
py
|
Python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/operations/_private_endpoint_connections_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/operations/_private_endpoint_connections_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/operations/_private_endpoint_connections_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get the specified private endpoint connection associated with the container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_12_01_preview.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> LROPoller["_models.PrivateEndpointConnection"]:
"""Update the state of specified private endpoint connection associated with the container
registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The parameters for creating a private endpoint connection.
:type private_endpoint_connection:
~azure.mgmt.containerregistry.v2019_12_01_preview.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2019_12_01_preview.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
registry_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes the specified private endpoint connection associated with the container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.PrivateEndpointConnectionListResult"]:
"""List all private endpoint connections in a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2019_12_01_preview.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-12-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/privateEndpointConnections"} # type: ignore
| 45.792988
| 263
| 0.685053
|
4a16f4f42d9f6adfdab0caaf25eec8ee1ba92c38
| 3,178
|
py
|
Python
|
SciDataTool/Methods/DataND/_rebuild_symmetries.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Methods/DataND/_rebuild_symmetries.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
SciDataTool/Methods/DataND/_rebuild_symmetries.py
|
enjoyneer87/SciDataTool
|
37ddc4071f1edb1270ee03e43595c3f943fb9bd8
|
[
"Apache-2.0"
] | null | null | null |
from SciDataTool.Functions.symmetries import (
rebuild_symmetries as rebuild_symmetries_fct,
)
from numpy import take
def _rebuild_symmetries(
self,
values,
axes_list,
):
"""Reconstructs the field of a Data object taking symmetries into account
Parameters
----------
self: Data
a Data object
values: ndarray
ndarray of a field
axes_list: list
a list of RequestedAxis objects
Returns
-------
ndarray of the reconstructed field
"""
for axis in axes_list:
if (
axis.transform != "fft"
and axis.is_pattern
and (
axis.extension
not in [
"max",
"min",
"sum",
"rss",
"mean",
"rms",
"integrate",
"integrate_local",
"derivate",
"smallestperiod",
]
and axis.indices is None
)
):
values = take(values, axis.rebuild_indices, axis.index)
elif axis.transform != "fft" and axis.extension in [
"whole",
"interval",
"oneperiod",
"antiperiod",
"smallestperiod",
]:
if axis.extension == "smallestperiod":
is_smallestperiod = True
is_oneperiod = False
is_antiperiod = False
elif axis.extension == "antiperiod":
is_smallestperiod = False
is_oneperiod = False
is_antiperiod = True
elif axis.extension == "oneperiod":
is_smallestperiod = False
is_oneperiod = True
is_antiperiod = False
# Ignore symmetries if fft axis
elif axis.name in ["freqs", "wavenumber"]:
is_smallestperiod = True
is_oneperiod = False
is_antiperiod = False
# Ignore symmetries if non uniform ifft was used
elif (
axis.transform == "ifft"
# and len(axis.values) != len(axis.corr_values)
and len(axis.values) == values.shape[axis.index]
):
is_smallestperiod = True
is_oneperiod = False
is_antiperiod = False
else:
is_smallestperiod = False
is_oneperiod = False
is_antiperiod = False
# Rebuild symmetries
axis_symmetries = self.axes[axis.index].symmetries
if is_oneperiod:
if "antiperiod" in axis_symmetries:
nper = axis_symmetries["antiperiod"]
axis_symmetries["antiperiod"] = 2
values = rebuild_symmetries_fct(values, axis.index, axis_symmetries)
axis_symmetries["antiperiod"] = nper
elif not is_smallestperiod and not is_antiperiod:
values = rebuild_symmetries_fct(values, axis.index, axis_symmetries)
return values
| 32.428571
| 88
| 0.499371
|
4a16f599e827b44739b55ec04bdd8b8ee16266c8
| 508
|
py
|
Python
|
src/disentangle/array.py
|
yukunchen113/disentangle
|
91ddc19c52d9b910c40ff3bf5b4ed39002de95e6
|
[
"MIT"
] | null | null | null |
src/disentangle/array.py
|
yukunchen113/disentangle
|
91ddc19c52d9b910c40ff3bf5b4ed39002de95e6
|
[
"MIT"
] | 1
|
2021-01-18T04:41:23.000Z
|
2021-01-18T04:41:23.000Z
|
src/disentangle/array.py
|
yukunchen113/disentangle
|
91ddc19c52d9b910c40ff3bf5b4ed39002de95e6
|
[
"MIT"
] | null | null | null |
"""
This module contains general tools to process and manipulate arrays.
"""
import numpy as np
def shuffle(*args, **kwargs):
"""
Takes in arrays of the same length in the 0th axis and shuffles them the same way
Args:
*args: numpy arrays.
**kwargs: numpy arrays.
Returns:
arrays in the same order as been put in.
"""
args = list(args) + list(kwargs.values())
idx = np.arange(args[0].shape[0])
np.random.shuffle(idx)
new_data = []
for i in args:
new_data.append(i[idx])
return new_data
| 20.32
| 82
| 0.688976
|
4a16f6216caa109ca1969fbae50875b7bbea8183
| 397
|
py
|
Python
|
Essentials/spotify/models.py
|
KittLao/Essentials
|
47eb04a52e5b3e41a1d0c91951aed84f5824b44f
|
[
"MIT"
] | null | null | null |
Essentials/spotify/models.py
|
KittLao/Essentials
|
47eb04a52e5b3e41a1d0c91951aed84f5824b44f
|
[
"MIT"
] | null | null | null |
Essentials/spotify/models.py
|
KittLao/Essentials
|
47eb04a52e5b3e41a1d0c91951aed84f5824b44f
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class SpotifyToken(models.Model):
user = models.CharField(max_length=50, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
refresh_token = models.CharField(max_length=150)
access_token = models.CharField(max_length=150)
expires_in = models.DateTimeField()
token_type = models.CharField(max_length=50)
| 39.7
| 56
| 0.768262
|
4a16f90958c804ed3192b018fe5b12b438ed904c
| 2,774
|
py
|
Python
|
app/core/settings.py
|
GonnaFlyMethod/django-core-template
|
92596d00bb0a4725f2a3574dad14bfdd88d2e509
|
[
"MIT"
] | null | null | null |
app/core/settings.py
|
GonnaFlyMethod/django-core-template
|
92596d00bb0a4725f2a3574dad14bfdd88d2e509
|
[
"MIT"
] | null | null | null |
app/core/settings.py
|
GonnaFlyMethod/django-core-template
|
92596d00bb0a4725f2a3574dad14bfdd88d2e509
|
[
"MIT"
] | null | null | null |
import os
import environ
import sys
from pathlib import Path
root = environ.Path(__file__) - 2
env = environ.Env()
environ.Env.read_env(env_file=os.path.join(root, '.env'))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
SITE_ROOT = root()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=False)
ALLOWED_HOSTS = []
# Custom user model
#AUTH_USER_MODEL = 'accounts.Account'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
MAX_UPLOAD_SIZE = "2542880"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {'default': env.db('DATABASE_URL')}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 24.990991
| 91
| 0.694304
|
4a16f93b5d015a6c032822c3a2884149fb7a295b
| 477
|
py
|
Python
|
tests/test_util.py
|
grambank/pygrambank
|
147f62d0f7b0b0c209f7beeb3cec1050089e492d
|
[
"Apache-2.0"
] | 2
|
2018-11-13T07:33:06.000Z
|
2018-11-13T08:16:51.000Z
|
tests/test_util.py
|
grambank/pygrambank
|
147f62d0f7b0b0c209f7beeb3cec1050089e492d
|
[
"Apache-2.0"
] | 51
|
2020-07-31T10:23:40.000Z
|
2022-03-30T18:56:26.000Z
|
tests/test_util.py
|
glottobank/pygrambank
|
158dadf9bfde946229e2317b40b93eb219b3a5ea
|
[
"Apache-2.0"
] | 2
|
2020-08-28T12:54:41.000Z
|
2021-02-15T18:35:38.000Z
|
import pytest
from pygrambank import util
@pytest.mark.parametrize(
'fname,nrows',
[
('Other Coder_Lang [NOCODE_lang].xlsx', 2),
('Some One_Some Lang [NOCODE_xyz].csv', 0),
('The Other Coder_Language [iso].tsv', 1),
('Yet Another Coder_New Lang [abcd1234].xls', 0),
]
)
def test_write_tsv(api, tmp_path, fname, nrows):
assert nrows == util.write_tsv(
api.path('obsolete_sheets', fname), tmp_path / 't.tsv', 'abcd1234')
| 28.058824
| 75
| 0.631027
|
4a16f9442a2f6d76deac0e29c3eca790dfaa81e9
| 562
|
py
|
Python
|
OnlineJudge/oj/production_settings.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | 2
|
2020-03-07T02:26:00.000Z
|
2020-06-01T15:03:17.000Z
|
OnlineJudge/oj/production_settings.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | null | null | null |
OnlineJudge/oj/production_settings.py
|
FrozenWhalePP/OnlineJudge
|
aec81292046b4a0896ff164565b490dc37bd91cb
|
[
"MIT"
] | null | null | null |
from utils.shortcuts import get_env
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': get_env("POSTGRES_HOST", "oj-postgres"),
'PORT': get_env("POSTGRES_PORT", "5432"),
'NAME': get_env("POSTGRES_DB"),
'USER': get_env("POSTGRES_USER"),
'PASSWORD': get_env("POSTGRES_PASSWORD")
}
}
REDIS_CONF = {
"host": get_env("REDIS_HOST", "oj-redis"),
"port": get_env("REDIS_PORT", "6379")
}
DEBUG = False
ALLOWED_HOSTS = ['*']
DATA_DIR = "/data"
| 23.416667
| 60
| 0.580071
|
4a16f9b4df2ebaf890ca6f2f408b82c343cbc832
| 904
|
py
|
Python
|
demo/dask/sklearn_cpu_training.py
|
geraldagapov/xgboost
|
9b8765a988936acb351ef5655c9daeb8f4923b03
|
[
"Apache-2.0"
] | null | null | null |
demo/dask/sklearn_cpu_training.py
|
geraldagapov/xgboost
|
9b8765a988936acb351ef5655c9daeb8f4923b03
|
[
"Apache-2.0"
] | null | null | null |
demo/dask/sklearn_cpu_training.py
|
geraldagapov/xgboost
|
9b8765a988936acb351ef5655c9daeb8f4923b03
|
[
"Apache-2.0"
] | 1
|
2020-03-11T17:00:05.000Z
|
2020-03-11T17:00:05.000Z
|
'''Dask interface demo:
Use scikit-learn regressor interface with CPU histogram tree method.'''
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
import xgboost
if __name__ == '__main__':
cluster = LocalCluster(n_workers=2, silence_logs=False) # or use any other clusters
client = Client(cluster)
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=2, n_estimators=2)
regressor.set_params(tree_method='hist')
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
assert isinstance(prediction, da.Array)
| 29.16129
| 88
| 0.71792
|
4a16fb0b002dea08b5dc00abd90582e7939207ea
| 925
|
py
|
Python
|
setup.py
|
zubbyik/Property
|
47a972d32899d36cafe82f9b2b414b3199c4a6fc
|
[
"MIT"
] | null | null | null |
setup.py
|
zubbyik/Property
|
47a972d32899d36cafe82f9b2b414b3199c4a6fc
|
[
"MIT"
] | null | null | null |
setup.py
|
zubbyik/Property
|
47a972d32899d36cafe82f9b2b414b3199c4a6fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in property/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('property/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='property',
version=version,
description='Property Management',
author='Opensource Solutions Philippines',
author_email='info@ossph.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
| 30.833333
| 68
| 0.736216
|
4a16fbb67fe9f8fdec347d1a652c2c95708f0897
| 523
|
py
|
Python
|
cookbook/migrations/0017_auto_20200216_2257.py
|
mhoellmann/recipes
|
525aa4e4a4f218a47e1770498fff9fa8b0d7a097
|
[
"MIT"
] | null | null | null |
cookbook/migrations/0017_auto_20200216_2257.py
|
mhoellmann/recipes
|
525aa4e4a4f218a47e1770498fff9fa8b0d7a097
|
[
"MIT"
] | 1
|
2020-04-11T09:47:20.000Z
|
2020-04-11T09:47:20.000Z
|
cookbook/migrations/0017_auto_20200216_2257.py
|
mcejp/recipes
|
913d858473a1d44b2ced02e09fddfc4d320848b7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-16 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cookbook', '0016_auto_20200213_2335'),
]
operations = [
migrations.AlterField(
model_name='userpreference',
name='theme',
field=models.CharField(choices=[('BOOTSTRAP', 'Bootstrap'), ('DARKLY', 'Darkly'), ('FLATLY', 'Flatly'), ('SUPERHERO', 'Superhero')], default='FLATLY', max_length=128),
),
]
| 27.526316
| 179
| 0.611855
|
4a16fc678e7fe028e86b298367875348ea7cd66f
| 14,507
|
py
|
Python
|
mltoolkit/mlmo/utils/tools/beam_search.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 51
|
2020-09-25T07:05:01.000Z
|
2022-03-17T12:07:40.000Z
|
mltoolkit/mlmo/utils/tools/beam_search.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 4
|
2020-10-19T10:00:22.000Z
|
2022-03-14T17:02:47.000Z
|
mltoolkit/mlmo/utils/tools/beam_search.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 22
|
2020-09-22T01:06:47.000Z
|
2022-01-26T14:20:09.000Z
|
import torch as T
import numpy as np
from mltoolkit.mlmo.utils.helpers.search import traverse_table, find_mirror_next
EXCL_EPS = -1e20
class BeamSearch(object):
"""Wrapper over ONMT beam search that works over batches."""
def __init__(self, batch_size, beam_size, start_id, end_id, device='cpu',
min_lens=None, **kwargs):
"""For detailed explanation of parameters see `_BeamSearch`.
:param min_lens: minimum lengths of sequences. One per batch unit.
"""
if min_lens is not None and \
not isinstance(min_lens, (list, np.ndarray)):
raise ValueError("Please provide a list/array of minimum lengths!")
self.batch_size = batch_size
self.beam_size = beam_size
self.device = device
if min_lens is None:
min_lens = [0] * batch_size
self._beams = [_BeamSearch(beam_size=beam_size, start_id=start_id,
end_id=end_id, device=device,
min_length=min_lens[i], **kwargs)
for i in range(batch_size)]
def advance(self, word_log_probs):
"""
:param word_log_probs: [batch_size, beam_size, vocab_size]
log probabilities over next words.
"""
assert word_log_probs.size(0) == self.batch_size
for i in range(self.batch_size):
beam = self._beams[i]
if not beam.done():
beam.advance(word_log_probs[i])
def get_current_state(self):
"""
:return: [batch_size, beam_size]
current selected candidate (word) ids.
"""
word_ids = T.zeros((self.batch_size, self.beam_size), dtype=T.int64,
device=self.device)
for i in range(self.batch_size):
word_ids[i] = self._beams[i].get_current_state()
return word_ids
def get_current_origin(self):
"""
Returns current back-pointers to the previous time-step. Can be used
to shuffle hidden states.
:return: [batch_size, beam_size]
"""
coll = T.zeros((self.batch_size, self.beam_size), dtype=T.int64,
device=self.device)
for i in range(self.batch_size):
coll[i] = self._beams[i].get_current_origin()
return coll
def get_finished_best(self, minimum=None, **kwargs):
"""
Returns an array of the best completed hypothesis (word ids lists).
Each starts with 'start_id', and ends with 'end_id'.
Optionally, traverses additional parameters (**kwargs) that are passed
using back-pointers. E.g., some artifacts produced by the decoder.
:param kwargs: dict of arrays/tensors [steps, batch_size * beam_size, x]
"""
best_seqs = np.empty(self.batch_size, dtype='object')
new_kwargs = {k: np.empty(self.batch_size, dtype='object')
for k in kwargs}
for i in range(self.batch_size):
beam = self._beams[i]
_, ts_and_ks = beam.sort_finished(minimum=minimum)
# traversing both the internal word id table and additional params
if len(ts_and_ks):
t, k = ts_and_ks[0]
best_seqs[i] = traverse_table(time_step=t, beam_indx=k,
back_pointers=beam.back_pointers,
elems_table=beam.selected_ids)
for pname, pval in kwargs.items():
start_indx = self.beam_size * i
beam_params = pval[:, start_indx:
(start_indx + self.beam_size)]
# it's t-1 because when the <END> token is generated
# the decoder stops.
best_params = traverse_table(time_step=t - 1, beam_indx=k,
back_pointers=beam.back_pointers,
elems_table=beam_params)
new_kwargs[pname][i] = best_params
else:
best_seqs[i] = []
for pname in kwargs:
new_kwargs[pname][i] = []
if kwargs:
return best_seqs, new_kwargs
return best_seqs
def done(self):
all_beams_done = all([beam.done() for beam in self._beams])
return all_beams_done
class _BeamSearch(object):
def __init__(self, beam_size, start_id, end_id, n_best=1, device='cpu',
min_length=0, len_norm=False, excl_ids=None,
block_ngram_repeat=None, ngram_mirror_window=None,
mirror_conj_ids=None, block_consecutive=False):
"""
Args:
beam_size (int): self-explanatory.
start_id (int): self-explanatory.
end_id (int): self-explanatory.
n_best (int): how many results to return when search is finished.
device (str): self-explanatory.
min_length (): minimum sequence length that should be considered to
be completed.
len_norm (bool): whether to normalize beam scores by sequence length.
excl_ids (list): ids of words to be excluded from word candidates.
block_ngram_repeat (int): n-grams to prevent from repeating. E.g.,
3-gram blocking will assure that are 3-grams are unique.
ngram_mirror_window (int): the maximum window span to look for when
detecting mirror patterns. E.g., `ngram_mirrow_window`=2 will
prevent patterns like 'x y AND x y' and 'x OR x'.
mirror_conj_ids (list): ids of conjunctions that are centres of
mirror patterns to be blocked.
block_consecutive(bool): whether to block patterns like 'x x'.
"""
self.beam_size = beam_size
self.hyp_scores = T.zeros(beam_size, dtype=T.float32, device=device)
# The back-pointers at each time-step
self.back_pointers = []
# The outputs at each time-step
self.selected_ids = [T.full(size=(beam_size,), fill_value=start_id,
device=device, dtype=T.int64)]
self.alive_prefixes = T.full(size=(beam_size, 1), fill_value=start_id,
dtype=T.int64, device=device)
# Has EOS topped the beam yet
self._end_id = end_id
self.end_id_top = False
# Time and k pair for finished
self.finished = []
self.n_best = n_best
# Minimum prediction length
self.min_length = min_length
self.len_norm = len_norm
self.excl_ids = excl_ids
self.block_ngram_repeat = block_ngram_repeat
self.forbidden_tokens = [dict() for _ in range(beam_size)]
# mirror pattern blocking
self.ngram_mirror_window = ngram_mirror_window
self.mirror_conj_ids = mirror_conj_ids
self.block_consecutive = block_consecutive
def get_current_state(self):
"Get the outputs for the current timestep."
return self.selected_ids[-1]
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.back_pointers[-1]
def advance(self, word_log_probs):
"""
Given log-prob over words for every last beam `wordLk` and attention
:param word_log_probs: [K, vocab_size]
:return: True if beam search is complete.
"""
num_words = word_log_probs.size(1)
# force the output to be longer than self.min_length
cur_len = len(self.selected_ids)
if cur_len < self.min_length:
for k in range(len(word_log_probs)):
word_log_probs[k][self._end_id] = EXCL_EPS
# Sum the previous scores
if len(self.back_pointers):
# excluding words from being considered by setting their scores to
# a very small value such that they would not be selected
if self.excl_ids:
word_log_probs[:, self.excl_ids] = EXCL_EPS
# here we're summing log-probs of histories and next words
beam_scores = word_log_probs + self.hyp_scores.unsqueeze(1)
# Don't let EOS have children.
for i in range(self.selected_ids[-1].size(0)):
if self.selected_ids[-1][i] == self._end_id:
beam_scores[i] = EXCL_EPS
# Avoid any direction that would repeat unwanted ngrams
self.block_ngrams(beam_scores)
else:
beam_scores = word_log_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.beam_size,
0, True, True)
self.hyp_scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam score came from
prev_k = best_scores_id // num_words
sel_ids = best_scores_id - prev_k * num_words # resolving true word ids
self.back_pointers.append(prev_k)
self.selected_ids.append(sel_ids)
# updating current prefixes
self.alive_prefixes = T.cat(
[(self.alive_prefixes.index_select(0, prev_k)),
sel_ids.unsqueeze(-1)], -1)
# updating n-gram blocking table
self.update_blocker_table()
for i in range(self.selected_ids[-1].size(0)):
if self.selected_ids[-1][i] == self._end_id:
s = self.hyp_scores[i]
self.finished.append((s, len(self.selected_ids) - 1, i))
# End condition is when top-of-beam is EOS and no global score
# only applicable when lengths are not normalized
if not self.len_norm and self.selected_ids[-1][0] == self._end_id:
self.end_id_top = True
def done(self):
if self.len_norm:
return len(self.finished) >= self.n_best
else:
return self.end_id_top and len(self.finished) >= self.n_best
def sort_finished(self, minimum=None):
"""
:param minimum: the minimum number of hypotheses to add to the beam,
even if they are incomplete.
"""
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
s = self.hyp_scores[i]
self.finished.append((s, len(self.selected_ids) - 1, i))
i += 1
if self.len_norm:
self.finished = [(sc / float(ln), ln, i) for sc, ln, i
in self.finished]
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
time_step_and_k = [(t, k) for _, t, k in self.finished]
return scores, time_step_and_k
def __len__(self):
return len(self.selected_ids)
def block_ngrams(self, log_probs):
"""Blocks n-grams repetition and mirror patterns from occurring.
Prevents the beam from going in any direction that would repeat any
ngram of size <block_ngram_repeat> more thant once.
The way we do it: we maintain a list of all ngrams of size
<block_ngram_repeat> that is updated each time the beam advances, and
manually put any token that would lead to a repeated ngram to 0.
Args:
log_probs: [beam_size, curr_time_steps]
"""
# we don't block nothing if the user doesn't want it
if self.block_ngram_repeat is None and \
(self.ngram_mirror_window is None or not len(
self.mirror_conj_ids)):
return
for path_idx in range(self.alive_prefixes.shape[0]):
prefix = self.alive_prefixes[path_idx]
# blocking n-gram repetitions
if self.block_ngram_repeat is not None \
and len(self) >= self.block_ngram_repeat:
n = self.block_ngram_repeat - 1
# we check paths one by one
current_ngram = tuple(prefix[-n:].tolist())
forbidden_tokens = self.forbidden_tokens[path_idx].get(
current_ngram, None)
if forbidden_tokens is not None:
log_probs[path_idx, list(forbidden_tokens)] = EXCL_EPS
# blocking mirror patterns
if self.ngram_mirror_window and len(self.mirror_conj_ids):
forb_mirr_tok_id = find_mirror_next(prefix.tolist(),
max_window_size=self.ngram_mirror_window,
mirror_centre=self.mirror_conj_ids)
if forb_mirr_tok_id:
log_probs[path_idx, forb_mirr_tok_id] = EXCL_EPS
if self.block_consecutive:
log_probs[path_idx, prefix[-1].tolist()] = EXCL_EPS
def update_blocker_table(self):
"""Updates the blocker table based on the current alive prefixes.
Completes and reorders the list of ``forbidden_tokens`` based on n-gram
repetition and mirror patterns.
"""
# we don't forbid nothing if the user doesn't want it
if self.block_ngram_repeat is None:
return
# we can't forbid nothing if beam's too short
if len(self) < self.block_ngram_repeat:
return
forbidden_tokens = list()
for path_bp, seq in zip(self.get_current_origin(), self.alive_prefixes):
last_word_id = seq[-1]
# Reordering forbidden_tokens following beam selection
# We rebuild a dict to ensure we get the value and not the pointer
forbidden_tokens.append(
dict(self.forbidden_tokens[path_bp]))
# Grabing the newly selected tokens and associated ngram
current_ngram = tuple(seq[-self.block_ngram_repeat:].tolist())
# skip the blocking if any token in current_ngram is excluded
if self.excl_ids and (set(current_ngram) & set(self.excl_ids)):
continue
forbidden_tokens[-1].setdefault(current_ngram[:-1], set())
forbidden_tokens[-1][current_ngram[:-1]].add(last_word_id)
self.forbidden_tokens = forbidden_tokens
| 40.297222
| 93
| 0.585855
|
4a16fce57e525d7542a7b15a19451f6a8ffc1df0
| 304
|
py
|
Python
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | 4
|
2020-10-27T16:18:57.000Z
|
2020-12-01T10:58:19.000Z
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-18T14:04:10.000Z
|
2020-11-18T15:29:44.000Z
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | null | null | null |
# TIMEBOMB: report me
a = 1 # TIMEBOMB (jsmith): report me
# TIMEBOMB: do not report me (pragma). # no-check-fixmes
# TIMEBOMB(jsmith - 2020-04-25): report me
a = "TIMEBOMB" # do not report me (within a string)
a = "TIMEBOMB" # do not report me (within a string)
# TIMEBOMB - FEWTURE-BOOM: report me
| 33.777778
| 56
| 0.680921
|
4a16fd380706fefa2713b9616514d2845128f420
| 926
|
py
|
Python
|
oscar/lib/python2.7/site-packages/django/contrib/auth/migrations/0008_alter_user_username_max_length.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/django/contrib/auth/migrations/0008_alter_user_username_max_length.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/django/contrib/auth/migrations/0008_alter_user_username_max_length.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import validators
from django.db import migrations, models
from django.utils import six
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(
error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[validators.UnicodeUsernameValidator() if six.PY3 else validators.ASCIIUsernameValidator()],
verbose_name='username',
),
),
]
| 31.931034
| 120
| 0.592873
|
4a16fd3f6e7828aebed5c4024e3d89866e5c476f
| 3,116
|
py
|
Python
|
bboard/main/migrations/0001_initial.py
|
wiky-avis/bboard
|
aedcba95acece3d3e679ac269b5983decee80873
|
[
"BSD-3-Clause"
] | null | null | null |
bboard/main/migrations/0001_initial.py
|
wiky-avis/bboard
|
aedcba95acece3d3e679ac269b5983decee80873
|
[
"BSD-3-Clause"
] | null | null | null |
bboard/main/migrations/0001_initial.py
|
wiky-avis/bboard
|
aedcba95acece3d3e679ac269b5983decee80873
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.4 on 2020-12-07 11:42
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='AdvUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_activated', models.BooleanField(db_index=True, default=True, verbose_name='Прошел активацию?')),
('send_messages', models.BooleanField(default=True, verbose_name='Слать оповещения о новых комментариях?')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 66.297872
| 329
| 0.665597
|
4a16fd7960435fcbcd8446ea852f95a669d253e2
| 905
|
py
|
Python
|
scripts/tabledef.py
|
Hela98/flaskex
|
7f23109e721a223b4f54cd07a7f6c66a31b9be18
|
[
"MIT"
] | null | null | null |
scripts/tabledef.py
|
Hela98/flaskex
|
7f23109e721a223b4f54cd07a7f6c66a31b9be18
|
[
"MIT"
] | null | null | null |
scripts/tabledef.py
|
Hela98/flaskex
|
7f23109e721a223b4f54cd07a7f6c66a31b9be18
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
# Local
#SQLALCHEMY_DATABASE_URI = 'sqlite:///accounts.db'
# Heroku
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
Base = declarative_base()
def db_connect():
"""
Performs database connection using database settings from settings.py.
Returns sqlalchemy engine instance
"""
return create_engine(SQLALCHEMY_DATABASE_URI)
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
username = Column(String(30), unique=True)
password = Column(String(512))
email = Column(String(50))
def __repr__(self):
return '<User %r>' % self.username
engine = db_connect() # Connect to database
Base.metadata.create_all(engine) # Create models
| 22.625
| 74
| 0.723757
|
4a16fdb082f3b00601948cb1b1a1a3b8e45feb92
| 12,841
|
py
|
Python
|
Projects/2_Classical Planning/my_planning_graph.py
|
eminnett/artificial-intelligence
|
a7044b8a058a8ff24eb811ec2069aee96191cf8c
|
[
"MIT"
] | null | null | null |
Projects/2_Classical Planning/my_planning_graph.py
|
eminnett/artificial-intelligence
|
a7044b8a058a8ff24eb811ec2069aee96191cf8c
|
[
"MIT"
] | null | null | null |
Projects/2_Classical Planning/my_planning_graph.py
|
eminnett/artificial-intelligence
|
a7044b8a058a8ff24eb811ec2069aee96191cf8c
|
[
"MIT"
] | null | null | null |
from itertools import chain, combinations
from aimacode.planning import Action
from aimacode.utils import expr
from layers import BaseActionLayer, BaseLiteralLayer, makeNoOp, make_node
class ActionLayer(BaseActionLayer):
def _inconsistent_effects(self, actionA, actionB):
""" Return True if an effect of one action negates an effect of the other
Hints:
(1) `~Literal` can be used to logically negate a literal
(2) `self.children` contains a map from actions to effects
See Also
--------
layers.ActionNode
"""
effectsA = self.children[actionA]
effectsB = self.children[actionB]
for effectA in effectsA:
for effectB in effectsB:
# if (~effectA == effectB) or (effectA == ~effectB):
if effectA == ~effectB:
return True
return False
def _interference(self, actionA, actionB):
""" Return True if the effects of either action negate the preconditions of the other
Hints:
(1) `~Literal` can be used to logically negate a literal
(2) `self.parents` contains a map from actions to preconditions
See Also
--------
layers.ActionNode
"""
effectsA = self.children[actionA]
precondsA = self.parents[actionA]
effectsB = self.children[actionB]
precondsB = self.parents[actionB]
for effectA in effectsA:
for precondB in precondsB:
# if (~effectA == precondB) or (effectA == ~precondB):
if effectA == ~precondB:
return True
for effectB in effectsB:
for precondA in precondsA:
# if (~effectB == precondA) or (effectB == ~precondA):
if effectB == ~precondA:
return True
return False
def _competing_needs(self, actionA, actionB):
""" Return True if any preconditions of the two actions are pairwise mutex in the parent layer
Hints:
(1) `self.parent_layer` contains a reference to the previous literal layer
(2) `self.parents` contains a map from actions to preconditions
See Also
--------
layers.ActionNode
layers.BaseLayer.parent_layer
"""
precondsA = self.parents[actionA]
precondsB = self.parents[actionB]
for precondA in precondsA:
for precondB in precondsB:
if self.parent_layer.is_mutex(precondA, precondB):
return True
return False
class LiteralLayer(BaseLiteralLayer):
def _inconsistent_support(self, literalA, literalB):
""" Return True if all ways to achieve both literals are pairwise mutex in the parent layer
Hints:
(1) `self.parent_layer` contains a reference to the previous action layer
(2) `self.parents` contains a map from literals to actions in the parent layer
See Also
--------
layers.BaseLayer.parent_layer
"""
actionsA = self.parents[literalA]
actionsB = self.parents[literalB]
for actionA in actionsA:
for actionB in actionsB:
# if actionA is not actionB and not self.parent_layer.is_mutex(actionA, actionB):
if not self.parent_layer.is_mutex(actionA, actionB):
return False
return True
def _negation(self, literalA, literalB):
""" Return True if two literals are negations of each other """
# return (~literalA == literalB) or (literalA == ~literalB)
return literalA == ~literalB
class PlanningGraph:
def __init__(self, problem, state, serialize=True, ignore_mutexes=False):
"""
Parameters
----------
problem : PlanningProblem
An instance of the PlanningProblem class
state : tuple(bool)
An ordered sequence of True/False values indicating the literal value
of the corresponding fluent in problem.state_map
serialize : bool
Flag indicating whether to serialize non-persistence actions. Actions
should NOT be serialized for regression search (e.g., GraphPlan), and
_should_ be serialized if the planning graph is being used to estimate
a heuristic
"""
self._serialize = serialize
self._is_leveled = False
self._ignore_mutexes = ignore_mutexes
self.goal = set(problem.goal)
# make no-op actions that persist every literal to the next layer
no_ops = [make_node(n, no_op=True) for n in chain(*(makeNoOp(s) for s in problem.state_map))]
self._actionNodes = no_ops + [make_node(a) for a in problem.actions_list]
# initialize the planning graph by finding the literals that are in the
# first layer and finding the actions they they should be connected to
literals = [s if f else ~s for f, s in zip(state, problem.state_map)]
layer = LiteralLayer(literals, ActionLayer(), self._ignore_mutexes)
layer.update_mutexes()
self.literal_layers = [layer]
self.action_layers = []
def h_levelsum(self):
""" Calculate the level sum heuristic for the planning graph
The level sum is the sum of the level costs of all the goal literals
combined. The "level cost" to achieve any single goal literal is the
level at which the literal first appears in the planning graph. Note
that the level cost is **NOT** the minimum number of actions to
achieve a single goal literal.
For example, if Goal_1 first appears in level 0 of the graph (i.e.,
it is satisfied at the root of the planning graph) and Goal_2 first
appears in level 3, then the levelsum is 0 + 3 = 3.
Hints
-----
(1) See the pseudocode folder for help on a simple implementation
(2) You can implement this function more efficiently than the
sample pseudocode if you expand the graph one level at a time
and accumulate the level cost of each goal rather than filling
the whole graph at the start.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
"""
costs = []
goals_found = set()
level = 0
while not self._is_leveled:
for goal_literal in self.goal:
if goal_literal not in goals_found and goal_literal in self.literal_layers[-1]:
goals_found.add(goal_literal)
costs.append(level)
if goals_found == self.goal:
break
self._extend()
level += 1
return sum(costs)
def h_maxlevel(self):
""" Calculate the max level heuristic for the planning graph
The max level is the largest level cost of any single goal fluent.
The "level cost" to achieve any single goal literal is the level at
which the literal first appears in the planning graph. Note that
the level cost is **NOT** the minimum number of actions to achieve
a single goal literal.
For example, if Goal1 first appears in level 1 of the graph and
Goal2 first appears in level 3, then the levelsum is max(1, 3) = 3.
Hints
-----
(1) See the pseudocode folder for help on a simple implementation
(2) You can implement this function more efficiently if you expand
the graph one level at a time until the last goal is met rather
than filling the whole graph at the start.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
Notes
-----
WARNING: you should expect long runtimes using this heuristic with A*
"""
goals_found = set()
level = 0
max_level = 0
while not self._is_leveled:
for goal_literal in self.goal:
if goal_literal not in goals_found and goal_literal in self.literal_layers[-1]:
goals_found.add(goal_literal)
max_level = level
if goals_found == self.goal:
break
self._extend()
level += 1
return max_level
def h_setlevel(self):
""" Calculate the set level heuristic for the planning graph
The set level of a planning graph is the first level where all goals
appear such that no pair of goal literals are mutex in the last
layer of the planning graph.
Hints
-----
(1) See the pseudocode folder for help on a simple implementation
(2) You can implement this function more efficiently if you expand
the graph one level at a time until you find the set level rather
than filling the whole graph at the start.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
Notes
-----
WARNING: you should expect long runtimes using this heuristic on complex problems
"""
level = 0
while not self._is_leveled:
if self.all_goals_present():
if not self.goals_are_mutex():
return level
level += 1
self._extend()
return level
def all_goals_present(self):
for goal_literal in self.goal:
if goal_literal not in self.literal_layers[-1]:
return False
return True
def goals_are_mutex(self):
for goal_a in self.goal:
for goal_b in self.goal:
if goal_a is goal_b:
continue
mutually_exclusive = self.literal_layers[-1].is_mutex(goal_a, goal_b)
if mutually_exclusive:
return True
return False
##############################################################################
# DO NOT MODIFY CODE BELOW THIS LINE #
##############################################################################
def fill(self, maxlevels=-1):
""" Extend the planning graph until it is leveled, or until a specified number of
levels have been added
Parameters
----------
maxlevels : int
The maximum number of levels to extend before breaking the loop. (Starting with
a negative value will never interrupt the loop.)
Notes
-----
YOU SHOULD NOT THIS FUNCTION TO COMPLETE THE PROJECT, BUT IT MAY BE USEFUL FOR TESTING
"""
while not self._is_leveled:
if maxlevels == 0: break
self._extend()
maxlevels -= 1
return self
def _extend(self):
""" Extend the planning graph by adding both a new action layer and a new literal layer
The new action layer contains all actions that could be taken given the positive AND
negative literals in the leaf nodes of the parent literal level.
The new literal layer contains all literals that could result from taking each possible
action in the NEW action layer.
"""
if self._is_leveled: return
parent_literals = self.literal_layers[-1]
parent_actions = parent_literals.parent_layer
action_layer = ActionLayer(parent_actions, parent_literals, self._serialize, self._ignore_mutexes)
literal_layer = LiteralLayer(parent_literals, action_layer, self._ignore_mutexes)
for action in self._actionNodes:
# actions in the parent layer are skipped because are added monotonically to planning graphs,
# which is performed automatically in the ActionLayer and LiteralLayer constructors
if action not in parent_actions and action.preconditions <= parent_literals:
action_layer.add(action)
literal_layer |= action.effects
# add two-way edges in the graph connecting the parent layer with the new action
parent_literals.add_outbound_edges(action, action.preconditions)
action_layer.add_inbound_edges(action, action.preconditions)
# # add two-way edges in the graph connecting the new literaly layer with the new action
action_layer.add_outbound_edges(action, action.effects)
literal_layer.add_inbound_edges(action, action.effects)
action_layer.update_mutexes()
literal_layer.update_mutexes()
self.action_layers.append(action_layer)
self.literal_layers.append(literal_layer)
self._is_leveled = literal_layer == action_layer.parent_layer
| 37.879056
| 106
| 0.605249
|
4a16fdf6e6e261f6f6e931b13a5c3c990023809f
| 4,285
|
py
|
Python
|
setup.py
|
jayvdb/pip-plus
|
b4d5724f6898d6576b4a9aa3b0711432d2aa32f4
|
[
"MIT"
] | 228
|
2018-07-08T07:12:13.000Z
|
2020-09-05T11:53:23.000Z
|
setup.py
|
jayvdb/pip-plus
|
b4d5724f6898d6576b4a9aa3b0711432d2aa32f4
|
[
"MIT"
] | 1
|
2019-12-16T02:44:08.000Z
|
2019-12-16T02:44:08.000Z
|
setup.py
|
jayvdb/pip-plus
|
b4d5724f6898d6576b4a9aa3b0711432d2aa32f4
|
[
"MIT"
] | 5
|
2018-07-21T16:46:39.000Z
|
2020-06-30T23:08:48.000Z
|
import setuptools
from setuptools import find_packages, setup
# python setup.py bdist_wheel --universal
# twine upload dist/*
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pip-plus",
version="0.0.1.dev4",
author="huohongming",
author_email="gin_huo@hotmail.com",
description="A small pip web management tool package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hmhuo/pip-plus",
license='MIT',
keywords='sample setuptools development',
package_dir={"": "src"},
packages=find_packages(
where="src",
),
python_requires=">=3.4",
platforms=["windows"],
entry_points={'console_scripts': [
'pp = pip_plus.pip_plus:main',
]},
package_data={
# "pip_plus.html.css": ["*.css"],
# "pip_plus.html.images": ["*.ico","*.svg"],
# "pip_plus.html.scripts": ["*.js"],
# "pip_plus.html": ["index.html"],
"pip_plus":[
"controllor/ProcessGET.py",
"html/index.html",
"html/css/baseStyle.css",
"html/css/bootstrap-min.css",
"html/css/bootstrap.min.css",
"html/css/bootstrap.min.css.map",
"html/css/dashboard.css",
"html/css/fileinput.min.css",
"html/css/jquery.loading.css",
"html/css/jquery.loading.min.css",
"html/images/favicon.ico",
"html/images/white-cube.svg",
"html/scripts/bootstrap.js",
"html/scripts/dataHandleScript.js",
"html/scripts/fileinput.min.js",
"html/scripts/ie10-viewport-bug-workaround.js",
"html/scripts/jquery-3.2.1.min.js",
"html/scripts/jquery-3.3.1.min.js",
"html/scripts/jquery.loading.js",
"html/scripts/jquery.loading.min.js",
"html/scripts/layoutScript.js",
"html/scripts/mainScript.js",
"html/scripts/pageHandleScript.js",
"html/scripts/popper.min.js",
],
},
# data_files=[
# ("", [
# "src/pip_plus/html/index.html",
# "src/pip_plus/html/css/baseStyle.css",
# "src/pip_plus/html/css/bootstrap-min.css",
# "src/pip_plus/html/css/bootstrap.min.css",
# "src/pip_plus/html/css/bootstrap.min.css.map",
# "src/pip_plus/html/css/dashboard.css",
# "src/pip_plus/html/css/fileinput.min.css",
# "src/pip_plus/html/css/jquery.loading.css",
# "src/pip_plus/html/css/jquery.loading.min.css",
# "src/pip_plus/html/images/favicon.ico",
# "src/pip_plus/html/images/white-cube.svg",
# "src/pip_plus/html/scripts/bootstrap.js",
# "src/pip_plus/html/scripts/dataHandleScript.js",
# "src/pip_plus/html/scripts/fileinput.min.js",
# "src/pip_plus/html/scripts/ie10-viewport-bug-workaround.js",
# "src/pip_plus/html/scripts/jquery-3.2.1.min.js",
# "src/pip_plus/html/scripts/jquery-3.3.1.min.js",
# "src/pip_plus/html/scripts/jquery.loading.js",
# "src/pip_plus/html/scripts/jquery.loading.min.js",
# "src/pip_plus/html/scripts/layoutScript.js",
# "src/pip_plus/html/scripts/mainScript.js",
# "src/pip_plus/html/scripts/pageHandleScript.js",
# "src/pip_plus/html/scripts/popper.min.js",
# ]),
# ],
# package_data={
# # If any package contains *.txt or *.rst files, include them:
# '': ['*.js', '*.css','*.html','*.svg'],
# # # And include any *.msg files found in the 'hello' package, too:
# # 'hello': ['*.msg'],
# },
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Environment :: Web Environment',
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
),
project_urls={
'Documentation': 'https://github.com/hmhuo/pip-plus/wiki',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
'Source': 'https://github.com/hmhuo/pip-plus',
'Tracker': 'https://github.com/hmhuo/pip-plus/issues',
},
)
| 35.708333
| 76
| 0.596499
|
4a16fe5628a5f6b91a1eabe2fa0696b7f52037e2
| 3,298
|
py
|
Python
|
flask_resteasy/factories.py
|
mschenk42/flask-resteasy
|
e787c248a93eee1a4634b38fbcfdf747d8960849
|
[
"BSD-3-Clause"
] | null | null | null |
flask_resteasy/factories.py
|
mschenk42/flask-resteasy
|
e787c248a93eee1a4634b38fbcfdf747d8960849
|
[
"BSD-3-Clause"
] | null | null | null |
flask_resteasy/factories.py
|
mschenk42/flask-resteasy
|
e787c248a93eee1a4634b38fbcfdf747d8960849
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
"""
flask_resteasy.factories
~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import request
from flask_resteasy.parsers import GetRequestParser
from flask_resteasy.parsers import PutRequestParser
from flask_resteasy.parsers import PostRequestParser
from flask_resteasy.parsers import DeleteRequestParser
from flask_resteasy.processors import GetRequestProcessor
from flask_resteasy.processors import PutRequestProcessor
from flask_resteasy.processors import PostRequestProcessor
from flask_resteasy.processors import DeleteRequestProcessor
from flask_resteasy.builders import ResponseBuilder
class ParserFactory(object):
"""Factory for creating request parser objects.
"""
@staticmethod
def create(cfg, **kwargs):
"""Factory method for creating RequestParser.
:param cfg: :class:`flask_resteasy.configs.APIConfig` instance
:param kwargs: dictionary of keyword arguments which contains
route parameters and query parameters for the
current HTTP request
"""
if request.method == 'GET':
return GetRequestParser(cfg, **kwargs)
elif request.method == 'POST':
return PostRequestParser(cfg, **kwargs)
elif request.method == 'DELETE':
return DeleteRequestParser(cfg, **kwargs)
elif request.method == 'PUT':
return PutRequestParser(cfg, **kwargs)
class ProcessorFactory(object):
"""Factory for creating request processor objects.
"""
@staticmethod
def create(cfg, req_par):
"""Factory method for creating RequestProcessor.
:param cfg: :class:`flask_resteasy.configs.APIConfig` instance
:param req_par: :class:`flask_resteasy.parsers.RequestParser`
for the current HTTP request
"""
if request.method == 'GET':
return GetRequestProcessor(cfg, req_par)
elif request.method == 'POST':
post_process = cfg.api_manager.get_post_process(cfg.resource_name)
return ProcessorFactory._create_process(
PostRequestProcessor, cfg, req_par, post_process)
elif request.method == 'DELETE':
return DeleteRequestProcessor(cfg, req_par)
elif request.method == 'PUT':
put_process = cfg.api_manager.get_put_process(cfg.resource_name)
return ProcessorFactory._create_process(
PutRequestProcessor, cfg, req_par, put_process)
@staticmethod
def _create_process(process, cfg, req_par, custom_process):
if custom_process and custom_process[0] in request.json and \
request.json[custom_process[0]] == custom_process[1].__name__:
return custom_process[1](cfg, req_par)
else:
return process(cfg, req_par)
class BuilderFactory(object):
"""Factory for creating response builder objects.
"""
@staticmethod
def create(cfg, req_proc):
"""Factoring method for creating ResponseBuilder.
:param cfg: :class:`flask_resteasy.configs.APIConfig` instance
:param req_proc: :class:`flask_resteasy.processors.RequestProcessor`
for the current HTTP request
"""
return ResponseBuilder(cfg, req_proc)
| 37.05618
| 78
| 0.673135
|
4a16fed22ccb319943196c419e33f3e1e6c87011
| 3,164
|
py
|
Python
|
event_pubsub/alembic/versions/d8822b756c18_baseline.py
|
DhivakharVenkatachalam/snet-marketplace-service
|
6aee606bc9b00d418caeae26c64deae03792e0ce
|
[
"MIT"
] | 14
|
2019-02-12T09:14:52.000Z
|
2021-03-11T18:42:22.000Z
|
event_pubsub/alembic/versions/d8822b756c18_baseline.py
|
prashantramangupta/snet-marketplace-service
|
7c293054e4b0207deefecc46defd743c064472a4
|
[
"MIT"
] | 1,079
|
2019-01-10T04:31:24.000Z
|
2022-03-29T06:16:42.000Z
|
event_pubsub/alembic/versions/d8822b756c18_baseline.py
|
prashantramangupta/snet-marketplace-service
|
7c293054e4b0207deefecc46defd743c064472a4
|
[
"MIT"
] | 20
|
2018-12-18T13:06:41.000Z
|
2021-09-17T11:13:01.000Z
|
"""baseline
Revision ID: d8822b756c18
Revises:
Create Date: 2019-09-18 14:33:54.629555
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd8822b756c18'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute("""
CREATE TABLE `registry_events_raw` (
`row_id` int(11) NOT NULL AUTO_INCREMENT,
`block_no` int(11) NOT NULL,
`event` varchar(256) NOT NULL,
`json_str` text,
`processed` bit(1) DEFAULT NULL,
`transactionHash` varchar(256) DEFAULT NULL,
`logIndex` varchar(256) DEFAULT NULL,
`error_code` int(11) DEFAULT NULL,
`error_msg` varchar(256) DEFAULT NULL,
`row_updated` timestamp NULL DEFAULT NULL,
`row_created` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`row_id`),
KEY `blk_no_idx` (`block_no`),
UNIQUE KEY `uq_rg_ev` (`block_no`,`transactionHash`)
) ;
""")
conn.execute("""
CREATE TABLE `mpe_events_raw` (
`row_id` int(11) NOT NULL AUTO_INCREMENT,
`block_no` int(11) NOT NULL,
`event` varchar(256) NOT NULL,
`json_str` text,
`processed` bit(1) DEFAULT NULL,
`transactionHash` varchar(256) DEFAULT NULL,
`logIndex` varchar(256) DEFAULT NULL,
`error_code` int(11) DEFAULT NULL,
`error_msg` varchar(256) DEFAULT NULL,
`row_updated` timestamp NULL DEFAULT NULL,
`row_created` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`row_id`),
KEY `blk_no_idx` (`block_no`),
UNIQUE KEY `uq_mpe_ev` (`block_no`,`transactionHash`)
) ;
""")
conn.execute("""
CREATE TABLE `rfai_events_raw` (
`row_id` int(11) NOT NULL AUTO_INCREMENT,
`block_no` int(11) NOT NULL,
`event` varchar(256) NOT NULL,
`json_str` text,
`processed` bit(1) DEFAULT NULL,
`transactionHash` varchar(256) DEFAULT NULL,
`logIndex` varchar(256) DEFAULT NULL,
`error_code` int(11) DEFAULT NULL,
`error_msg` varchar(256) DEFAULT NULL,
`row_updated` timestamp NULL DEFAULT NULL,
`row_created` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`row_id`),
KEY `blk_no_idx` (`block_no`),
UNIQUE KEY `uq_rf_ev` (`block_no`,`transactionHash`)
)
""")
conn.execute("""
CREATE TABLE `event_blocknumber_marker` (
`row_id` int(11) NOT NULL AUTO_INCREMENT,
`event_type` varchar(128) NOT NULL,
`last_block_number` int(11) not null,
`row_created` timestamp NULL DEFAULT NULL,
`row_updated` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`row_id`)
)
""")
def downgrade():
conn = op.get_bind()
conn.execute("""
drop table registry_events_raw
"""
)
conn.execute("""
drop table mpe_events_raw
"""
)
conn.execute("""
drop table user_wallet
"""
)
| 29.849057
| 65
| 0.575853
|
4a16ffab2508ba13ee69093fcd7f1fec05042118
| 4,845
|
py
|
Python
|
neptune/generated/swagger_client/models/payment.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/generated/swagger_client/models/payment.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
neptune/generated/swagger_client/models/payment.py
|
jiji-online/neptune-cli
|
50cf680a80d141497f9331ab7cdaee49fcb90b0c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class Payment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, created=None, amount=None, source=None, error=None):
"""
Payment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created': 'datetime',
'amount': 'float',
'source': 'str',
'error': 'str'
}
self.attribute_map = {
'created': 'created',
'amount': 'amount',
'source': 'source',
'error': 'error'
}
self._created = created
self._amount = amount
self._source = source
self._error = error
@property
def created(self):
"""
Gets the created of this Payment.
:return: The created of this Payment.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this Payment.
:param created: The created of this Payment.
:type: datetime
"""
self._created = created
@property
def amount(self):
"""
Gets the amount of this Payment.
:return: The amount of this Payment.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this Payment.
:param amount: The amount of this Payment.
:type: float
"""
self._amount = amount
@property
def source(self):
"""
Gets the source of this Payment.
:return: The source of this Payment.
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this Payment.
:param source: The source of this Payment.
:type: str
"""
self._source = source
@property
def error(self):
"""
Gets the error of this Payment.
:return: The error of this Payment.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this Payment.
:param error: The error of this Payment.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.75
| 77
| 0.534365
|
4a17003fded9f401cf27fa9abdbd6f6d341606e9
| 4,479
|
py
|
Python
|
KTH_DataModule.py
|
CeeBeeTree/KTH-Action-Recognition
|
ec3a1290f13e7dc83b0db3dd5806ebd0c9c23a85
|
[
"MIT"
] | null | null | null |
KTH_DataModule.py
|
CeeBeeTree/KTH-Action-Recognition
|
ec3a1290f13e7dc83b0db3dd5806ebd0c9c23a85
|
[
"MIT"
] | null | null | null |
KTH_DataModule.py
|
CeeBeeTree/KTH-Action-Recognition
|
ec3a1290f13e7dc83b0db3dd5806ebd0c9c23a85
|
[
"MIT"
] | null | null | null |
import pytorch_lightning as pl
from torch._C import DisableTorchFunction
from torch.utils.data import Dataset, Subset, DataLoader
import imageio, cv2, torch, os, shutil
import numpy as np
class KTH_Dataset(Dataset):
categories = ["boxing", "handclapping", "handwaving","jogging", "running", "walking"]
def __init__(self, directory, frames_per_item = 15, use_preloaded=False, transform=None):
self.__transform = transform
self.__directory = directory
self.__use_preloaded = use_preloaded
self.__frames_per_item = frames_per_item
def prepare_data(self):
self.__data = self.__parse_sequence_file(self.__directory, self.__frames_per_item)
if not self.__use_preloaded:
self.__process_video_frames(self.__data, self.__directory + '/data', self.__transform)
def __len__(self):
return len(self.__data)
def __getitem__(self, idx):
item_data = torch.load(self.__directory + '/data/' + str(idx) + '.pt')
return (item_data, self.__data[idx]['label'])
def __parse_sequence_file(self, base_dir, num_frames):
# parse file for example : "person01_boxing_d1 frames 1-95, 96-185, 186-245, 246-360"
data = []
with open(base_dir + '/00sequences.txt', 'r') as sequence_file:
for sequence in sequence_file:
split_1 = sequence.split('frames')
if len(split_1) > 1:
label_desc = split_1[0].split('_')[1]
label = self.categories.index(label_desc)
person_num = split_1[0][6:8]
filepath = base_dir + '/' + label_desc + '/' + split_1[0].strip() + '_uncomp.avi'
for overall_start, overall_end in [tuple(split_2.split('-')) for split_2 in split_1[1].strip().split(',')]:
for i in range(int(overall_start) , int(overall_end) - num_frames, num_frames):
end = i + num_frames
data.append({'video_filepath': filepath, 'start':i, 'end':end, 'label':label,
'label_desc' : label_desc, 'person':person_num})
return data
def __process_video_frames(self, data, base_dir, transform):
if not os.path.isdir(base_dir) : os.mkdir(base_dir)
for i, data_item in enumerate(data):
vid = cv2.VideoCapture(data_item['video_filepath'])
frames = []
for frame_num in range(data_item['start'], data_item['end']):
try:
vid.set(1, frame_num)
ret, frame = vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frames.append([frame])
except:
print(data_item)
print(frame_num)
vid.release()
frames = torch.from_numpy(np.moveaxis( np.array(frames), 1,0)) / 255
torch.save(frames, base_dir + '/' + str(i) + '.pt')
def get_indices_for_persons(self, person_ids):
return [ i for i, x in enumerate(self.__data) if x['person'] in person_ids]
class KTH_DataModule(pl.LightningDataModule):
def __init__(self, directory, frames_per_item = 15, use_preloaded=False, transform=None):
super().__init__()
self.__partitions = {'Train': ['11', '12', '13', '14', '15', '16', '17', '18'],
'Valid': ['19', '20', '21', '23', '24', '25', '01', '04'],
'Test' : ['22', '02', '03', '05', '06', '07', '08', '09', '10']}
self.__complt_dataset = KTH_Dataset(directory,frames_per_item, use_preloaded,transform)
self.__subset_datasets = {}
def prepare_data(self):
self.__complt_dataset.prepare_data()
for partition_name, partition_ids in self.__partitions.items():
partition_indexes = self.__complt_dataset.get_indices_for_persons(partition_ids)
self.__subset_datasets[partition_name] = Subset(self.__complt_dataset, partition_indexes)
def train_dataloader(self):
return DataLoader(self.__subset_datasets['Train'] , batch_size=16, shuffle=True)
def val_dataloader(self):
return DataLoader(self.__subset_datasets['Valid'] , batch_size=16, shuffle=True)
def test_dataloader(self):
return DataLoader(self.__subset_datasets['Test'] , batch_size=16,shuffle=False)
| 46.65625
| 127
| 0.597678
|
4a17021a34fedbfd9dae7e00e435374c0e69c7b2
| 1,471
|
py
|
Python
|
opencanary/modules/ntp.py
|
patron-it/opencanary
|
c6b59fbabd2655c235484dbb095a6c53cb3b61ab
|
[
"BSD-3-Clause"
] | 3
|
2019-01-11T14:45:31.000Z
|
2021-02-07T21:49:54.000Z
|
opencanary/modules/ntp.py
|
patron-it/opencanary
|
c6b59fbabd2655c235484dbb095a6c53cb3b61ab
|
[
"BSD-3-Clause"
] | 12
|
2018-08-31T06:54:13.000Z
|
2019-01-15T22:18:31.000Z
|
opencanary/modules/ntp.py
|
patron-it/opencanary
|
c6b59fbabd2655c235484dbb095a6c53cb3b61ab
|
[
"BSD-3-Clause"
] | 1
|
2018-11-13T19:26:31.000Z
|
2018-11-13T19:26:31.000Z
|
from opencanary.modules import CanaryService
from zope.interface import implements
from twisted.application import internet
from twisted.internet.protocol import DatagramProtocol
from twisted.application.internet import UDPServer
from twisted.internet.address import IPv4Address
from twisted.internet import protocol
"""
A log-only NTP server. It won't respond, but it will log attempts
to trigger the MON_GETLIST_1 NTP commands, which is used for DDOS
and network recon.
"""
class MiniNtp(DatagramProtocol):
def datagramReceived(self, data, host_and_port):
if len(data) < 4:
#bogus packet, discard
return
logdata={'NTP CMD': 'monlist'}
self.transport.getPeer = lambda: IPv4Address('UDP',
host_and_port[0],
host_and_port[1])
self.factory.log(logdata=logdata, transport=self.transport)
class CanaryNtp(CanaryService):
NAME = 'ntp'
def __init__(self, config=None, logger=None):
CanaryService.__init__(self, config=config, logger=logger)
self.port = int(config.getVal('ntp.port', default=123))
self.logtype = logger.LOG_NTP_MONLIST
self.listen_addr = config.getVal('device.listen_addr', default='')
def getService(self):
f = MiniNtp()
f.factory = self
return internet.UDPServer(self.port, f, interface=self.listen_addr)
| 35.02381
| 75
| 0.660775
|
4a170340a0a7c3b57e360e5382d0f22f595c991d
| 59,027
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 7
|
2021-11-16T04:05:42.000Z
|
2022-02-19T21:14:29.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_bigquery_table.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2022-03-01T05:43:07.000Z
|
2022-03-01T05:43:07.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_bigquery_table
description:
- A Table that belongs to a Dataset .
short_description: Creates a GCP Table
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
table_reference:
description:
- Reference describing the ID of this table.
required: false
type: dict
suboptions:
dataset_id:
description:
- The ID of the dataset containing this table.
required: false
type: str
project_id:
description:
- The ID of the project containing this table.
required: false
type: str
table_id:
description:
- The ID of the the table.
required: false
type: str
clustering:
description:
- One or more fields on which data should be clustered. Only top-level, non-repeated,
simple-type fields are supported. When you cluster a table using multiple columns,
the order of columns you specify is important. The order of the specified columns
determines the sort order of the data.
elements: str
required: false
type: list
description:
description:
- A user-friendly description of the dataset.
required: false
type: str
friendly_name:
description:
- A descriptive name for this table.
required: false
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and group
your datasets .
required: false
type: dict
name:
description:
- Name of the table.
required: false
type: str
num_rows:
description:
- The number of rows of data in this table, excluding any data in the streaming
buffer.
required: false
type: int
view:
description:
- The view definition.
required: false
type: dict
suboptions:
use_legacy_sql:
description:
- Specifies whether to use BigQuery's legacy SQL for this view .
required: false
type: bool
user_defined_function_resources:
description:
- Describes user-defined function resources used in the query.
elements: dict
required: false
type: list
suboptions:
inline_code:
description:
- An inline resource that contains code for a user-defined function (UDF).
Providing a inline code resource is equivalent to providing a URI for
a file containing the same code.
required: false
type: str
resource_uri:
description:
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
required: false
type: str
time_partitioning:
description:
- If specified, configures time-based partitioning for this table.
required: false
type: dict
suboptions:
expiration_ms:
description:
- Number of milliseconds for which to keep the storage for a partition.
required: false
type: int
field:
description:
- If not set, the table is partitioned by pseudo column, referenced via either
'_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If
field is specified, the table is instead partitioned by this field. The
field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE
or REQUIRED.
required: false
type: str
type:
description:
- The only type supported is DAY, which will generate one partition per day.
- 'Some valid choices include: "DAY"'
required: false
type: str
schema:
description:
- Describes the schema of this table.
required: false
type: dict
suboptions:
fields:
description:
- Describes the fields in a table.
elements: dict
required: false
type: list
suboptions:
description:
description:
- The field description. The maximum length is 1,024 characters.
required: false
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to RECORD.
elements: str
required: false
type: list
mode:
description:
- The field mode.
- 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"'
required: false
type: str
name:
description:
- The field name.
required: false
type: str
type:
description:
- The field data type.
- 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT",
"TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"'
required: false
type: str
encryption_configuration:
description:
- Custom encryption configuration.
required: false
type: dict
suboptions:
kms_key_name:
description:
- Describes the Cloud KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your project
requires access to this encryption key.
required: false
type: str
expiration_time:
description:
- The time when this table expires, in milliseconds since the epoch. If not present,
the table will persist indefinitely.
required: false
type: int
external_data_configuration:
description:
- Describes the data format, location, and other properties of a table stored
outside of BigQuery. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
required: false
type: dict
suboptions:
autodetect:
description:
- Try to detect schema and format options automatically. Any option specified
explicitly will be honored.
required: false
type: bool
compression:
description:
- The compression type of the data source.
- 'Some valid choices include: "GZIP", "NONE"'
required: false
type: str
ignore_unknown_values:
description:
- Indicates if BigQuery should allow extra values that are not represented
in the table schema .
required: false
type: bool
max_bad_records:
description:
- The maximum number of bad records that BigQuery can ignore when reading
data .
required: false
type: int
source_format:
description:
- The data format.
- 'Some valid choices include: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON",
"AVRO", "DATASTORE_BACKUP", "BIGTABLE", "ORC"'
required: false
type: str
source_uris:
description:
- The fully-qualified URIs that point to your data in Google Cloud.
- 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard
character and it must come after the ''bucket'' name. Size limits related
to load jobs apply to external data sources. For Google Cloud Bigtable URIs:
Exactly one URI can be specified and it has be a fully specified and valid
HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore
backups, exactly one URI can be specified. Also, the ''*'' wildcard character
is not allowed.'
elements: str
required: false
type: list
schema:
description:
- The schema for the data. Schema is required for CSV and JSON formats.
required: false
type: dict
suboptions:
fields:
description:
- Describes the fields in a table.
elements: dict
required: false
type: list
suboptions:
description:
description:
- The field description.
required: false
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to
RECORD .
elements: str
required: false
type: list
mode:
description:
- Field mode.
- 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"'
required: false
type: str
name:
description:
- Field name.
required: false
type: str
type:
description:
- Field data type.
- 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT",
"TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"'
required: false
type: str
google_sheets_options:
description:
- Additional options if sourceFormat is set to GOOGLE_SHEETS.
required: false
type: dict
suboptions:
skip_leading_rows:
description:
- The number of rows at the top of a Google Sheet that BigQuery will skip
when reading the data.
required: false
type: int
csv_options:
description:
- Additional properties to set if sourceFormat is set to CSV.
required: false
type: dict
suboptions:
allow_jagged_rows:
description:
- Indicates if BigQuery should accept rows that are missing trailing optional
columns .
required: false
type: bool
allow_quoted_newlines:
description:
- Indicates if BigQuery should allow quoted data sections that contain
newline characters in a CSV file .
required: false
type: bool
encoding:
description:
- The character encoding of the data.
- 'Some valid choices include: "UTF-8", "ISO-8859-1"'
required: false
type: str
field_delimiter:
description:
- The separator for fields in a CSV file.
required: false
type: str
quote:
description:
- The value that is used to quote data sections in a CSV file.
required: false
type: str
skip_leading_rows:
description:
- The number of rows at the top of a CSV file that BigQuery will skip
when reading the data.
required: false
type: int
bigtable_options:
description:
- Additional options if sourceFormat is set to BIGTABLE.
required: false
type: dict
suboptions:
ignore_unspecified_column_families:
description:
- If field is true, then the column families that are not specified in
columnFamilies list are not exposed in the table schema .
required: false
type: bool
read_rowkey_as_string:
description:
- If field is true, then the rowkey column families will be read and converted
to string.
required: false
type: bool
column_families:
description:
- List of column families to expose in the table schema along with their
types.
elements: dict
required: false
type: list
suboptions:
columns:
description:
- Lists of columns that should be exposed as individual fields as
opposed to a list of (column name, value) pairs.
elements: dict
required: false
type: list
suboptions:
encoding:
description:
- The encoding of the values when the type is not STRING.
- 'Some valid choices include: "TEXT", "BINARY"'
required: false
type: str
field_name:
description:
- If the qualifier is not a valid BigQuery field identifier, a
valid identifier must be provided as the column field name and
is used as field name in queries.
required: false
type: str
only_read_latest:
description:
- If this is set, only the latest version of value in this column
are exposed .
required: false
type: bool
qualifier_string:
description:
- Qualifier of the column.
required: true
type: str
type:
description:
- The type to convert the value in cells of this column.
- 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT",
"BOOLEAN"'
required: false
type: str
encoding:
description:
- The encoding of the values when the type is not STRING.
- 'Some valid choices include: "TEXT", "BINARY"'
required: false
type: str
family_id:
description:
- Identifier of the column family.
required: false
type: str
only_read_latest:
description:
- If this is set only the latest version of value are exposed for
all columns in this column family .
required: false
type: bool
type:
description:
- The type to convert the value in cells of this column family.
- 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT",
"BOOLEAN"'
required: false
type: str
dataset:
description:
- Name of the dataset.
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
'''
EXAMPLES = '''
- name: create a dataset
google.cloud.gcp_bigquery_dataset:
name: example_dataset
dataset_reference:
dataset_id: example_dataset
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: dataset
- name: create a table
google.cloud.gcp_bigquery_table:
name: example_table
dataset: example_dataset
table_reference:
dataset_id: example_dataset
project_id: test_project
table_id: example_table
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
tableReference:
description:
- Reference describing the ID of this table.
returned: success
type: complex
contains:
datasetId:
description:
- The ID of the dataset containing this table.
returned: success
type: str
projectId:
description:
- The ID of the project containing this table.
returned: success
type: str
tableId:
description:
- The ID of the the table.
returned: success
type: str
clustering:
description:
- One or more fields on which data should be clustered. Only top-level, non-repeated,
simple-type fields are supported. When you cluster a table using multiple columns,
the order of columns you specify is important. The order of the specified columns
determines the sort order of the data.
returned: success
type: list
creationTime:
description:
- The time when this dataset was created, in milliseconds since the epoch.
returned: success
type: int
description:
description:
- A user-friendly description of the dataset.
returned: success
type: str
friendlyName:
description:
- A descriptive name for this table.
returned: success
type: str
id:
description:
- An opaque ID uniquely identifying the table.
returned: success
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and group
your datasets .
returned: success
type: dict
lastModifiedTime:
description:
- The time when this table was last modified, in milliseconds since the epoch.
returned: success
type: int
location:
description:
- The geographic location where the table resides. This value is inherited from
the dataset.
returned: success
type: str
name:
description:
- Name of the table.
returned: success
type: str
numBytes:
description:
- The size of this table in bytes, excluding any data in the streaming buffer.
returned: success
type: int
numLongTermBytes:
description:
- The number of bytes in the table that are considered "long-term storage".
returned: success
type: int
numRows:
description:
- The number of rows of data in this table, excluding any data in the streaming
buffer.
returned: success
type: int
requirePartitionFilter:
description:
- If set to true, queries over this table require a partition filter that can be
used for partition elimination to be specified.
returned: success
type: bool
type:
description:
- Describes the table type.
returned: success
type: str
view:
description:
- The view definition.
returned: success
type: complex
contains:
useLegacySql:
description:
- Specifies whether to use BigQuery's legacy SQL for this view .
returned: success
type: bool
userDefinedFunctionResources:
description:
- Describes user-defined function resources used in the query.
returned: success
type: complex
contains:
inlineCode:
description:
- An inline resource that contains code for a user-defined function (UDF).
Providing a inline code resource is equivalent to providing a URI for
a file containing the same code.
returned: success
type: str
resourceUri:
description:
- A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
returned: success
type: str
timePartitioning:
description:
- If specified, configures time-based partitioning for this table.
returned: success
type: complex
contains:
expirationMs:
description:
- Number of milliseconds for which to keep the storage for a partition.
returned: success
type: int
field:
description:
- If not set, the table is partitioned by pseudo column, referenced via either
'_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field
is specified, the table is instead partitioned by this field. The field must
be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.
returned: success
type: str
type:
description:
- The only type supported is DAY, which will generate one partition per day.
returned: success
type: str
streamingBuffer:
description:
- Contains information regarding this table's streaming buffer, if one is present.
This field will be absent if the table is not being streamed to or if there is
no data in the streaming buffer.
returned: success
type: complex
contains:
estimatedBytes:
description:
- A lower-bound estimate of the number of bytes currently in the streaming buffer.
returned: success
type: int
estimatedRows:
description:
- A lower-bound estimate of the number of rows currently in the streaming buffer.
returned: success
type: int
oldestEntryTime:
description:
- Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds
since the epoch, if the streaming buffer is available.
returned: success
type: int
schema:
description:
- Describes the schema of this table.
returned: success
type: complex
contains:
fields:
description:
- Describes the fields in a table.
returned: success
type: complex
contains:
description:
description:
- The field description. The maximum length is 1,024 characters.
returned: success
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to RECORD.
returned: success
type: list
mode:
description:
- The field mode.
returned: success
type: str
name:
description:
- The field name.
returned: success
type: str
type:
description:
- The field data type.
returned: success
type: str
encryptionConfiguration:
description:
- Custom encryption configuration.
returned: success
type: complex
contains:
kmsKeyName:
description:
- Describes the Cloud KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your project
requires access to this encryption key.
returned: success
type: str
expirationTime:
description:
- The time when this table expires, in milliseconds since the epoch. If not present,
the table will persist indefinitely.
returned: success
type: int
externalDataConfiguration:
description:
- Describes the data format, location, and other properties of a table stored outside
of BigQuery. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
returned: success
type: complex
contains:
autodetect:
description:
- Try to detect schema and format options automatically. Any option specified
explicitly will be honored.
returned: success
type: bool
compression:
description:
- The compression type of the data source.
returned: success
type: str
ignoreUnknownValues:
description:
- Indicates if BigQuery should allow extra values that are not represented in
the table schema .
returned: success
type: bool
maxBadRecords:
description:
- The maximum number of bad records that BigQuery can ignore when reading data
.
returned: success
type: int
sourceFormat:
description:
- The data format.
returned: success
type: str
sourceUris:
description:
- The fully-qualified URIs that point to your data in Google Cloud.
- 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character
and it must come after the ''bucket'' name. Size limits related to load jobs
apply to external data sources. For Google Cloud Bigtable URIs: Exactly one
URI can be specified and it has be a fully specified and valid HTTPS URL for
a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly
one URI can be specified. Also, the ''*'' wildcard character is not allowed.'
returned: success
type: list
schema:
description:
- The schema for the data. Schema is required for CSV and JSON formats.
returned: success
type: complex
contains:
fields:
description:
- Describes the fields in a table.
returned: success
type: complex
contains:
description:
description:
- The field description.
returned: success
type: str
fields:
description:
- Describes the nested schema fields if the type property is set to
RECORD .
returned: success
type: list
mode:
description:
- Field mode.
returned: success
type: str
name:
description:
- Field name.
returned: success
type: str
type:
description:
- Field data type.
returned: success
type: str
googleSheetsOptions:
description:
- Additional options if sourceFormat is set to GOOGLE_SHEETS.
returned: success
type: complex
contains:
skipLeadingRows:
description:
- The number of rows at the top of a Google Sheet that BigQuery will skip
when reading the data.
returned: success
type: int
csvOptions:
description:
- Additional properties to set if sourceFormat is set to CSV.
returned: success
type: complex
contains:
allowJaggedRows:
description:
- Indicates if BigQuery should accept rows that are missing trailing optional
columns .
returned: success
type: bool
allowQuotedNewlines:
description:
- Indicates if BigQuery should allow quoted data sections that contain newline
characters in a CSV file .
returned: success
type: bool
encoding:
description:
- The character encoding of the data.
returned: success
type: str
fieldDelimiter:
description:
- The separator for fields in a CSV file.
returned: success
type: str
quote:
description:
- The value that is used to quote data sections in a CSV file.
returned: success
type: str
skipLeadingRows:
description:
- The number of rows at the top of a CSV file that BigQuery will skip when
reading the data.
returned: success
type: int
bigtableOptions:
description:
- Additional options if sourceFormat is set to BIGTABLE.
returned: success
type: complex
contains:
ignoreUnspecifiedColumnFamilies:
description:
- If field is true, then the column families that are not specified in columnFamilies
list are not exposed in the table schema .
returned: success
type: bool
readRowkeyAsString:
description:
- If field is true, then the rowkey column families will be read and converted
to string.
returned: success
type: bool
columnFamilies:
description:
- List of column families to expose in the table schema along with their
types.
returned: success
type: complex
contains:
columns:
description:
- Lists of columns that should be exposed as individual fields as opposed
to a list of (column name, value) pairs.
returned: success
type: complex
contains:
encoding:
description:
- The encoding of the values when the type is not STRING.
returned: success
type: str
fieldName:
description:
- If the qualifier is not a valid BigQuery field identifier, a valid
identifier must be provided as the column field name and is used
as field name in queries.
returned: success
type: str
onlyReadLatest:
description:
- If this is set, only the latest version of value in this column
are exposed .
returned: success
type: bool
qualifierString:
description:
- Qualifier of the column.
returned: success
type: str
type:
description:
- The type to convert the value in cells of this column.
returned: success
type: str
encoding:
description:
- The encoding of the values when the type is not STRING.
returned: success
type: str
familyId:
description:
- Identifier of the column family.
returned: success
type: str
onlyReadLatest:
description:
- If this is set only the latest version of value are exposed for all
columns in this column family .
returned: success
type: bool
type:
description:
- The type to convert the value in cells of this column family.
returned: success
type: str
dataset:
description:
- Name of the dataset.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import (
navigate_hash,
GcpSession,
GcpModule,
GcpRequest,
remove_nones_from_dict,
replace_resource_dict,
)
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
table_reference=dict(type='dict', options=dict(dataset_id=dict(type='str'), project_id=dict(type='str'), table_id=dict(type='str'))),
clustering=dict(type='list', elements='str'),
description=dict(type='str'),
friendly_name=dict(type='str'),
labels=dict(type='dict'),
name=dict(type='str'),
num_rows=dict(type='int'),
view=dict(
type='dict',
options=dict(
use_legacy_sql=dict(type='bool'),
user_defined_function_resources=dict(
type='list', elements='dict', options=dict(inline_code=dict(type='str'), resource_uri=dict(type='str'))
),
),
),
time_partitioning=dict(type='dict', options=dict(expiration_ms=dict(type='int'), field=dict(type='str'), type=dict(type='str'))),
schema=dict(
type='dict',
options=dict(
fields=dict(
type='list',
elements='dict',
options=dict(
description=dict(type='str'),
fields=dict(type='list', elements='str'),
mode=dict(type='str'),
name=dict(type='str'),
type=dict(type='str'),
),
)
),
),
encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))),
expiration_time=dict(type='int'),
external_data_configuration=dict(
type='dict',
options=dict(
autodetect=dict(type='bool'),
compression=dict(type='str'),
ignore_unknown_values=dict(type='bool'),
max_bad_records=dict(default=0, type='int'),
source_format=dict(type='str'),
source_uris=dict(type='list', elements='str'),
schema=dict(
type='dict',
options=dict(
fields=dict(
type='list',
elements='dict',
options=dict(
description=dict(type='str'),
fields=dict(type='list', elements='str'),
mode=dict(type='str'),
name=dict(type='str'),
type=dict(type='str'),
),
)
),
),
google_sheets_options=dict(type='dict', options=dict(skip_leading_rows=dict(default=0, type='int'))),
csv_options=dict(
type='dict',
options=dict(
allow_jagged_rows=dict(type='bool'),
allow_quoted_newlines=dict(type='bool'),
encoding=dict(type='str'),
field_delimiter=dict(type='str'),
quote=dict(type='str'),
skip_leading_rows=dict(default=0, type='int'),
),
),
bigtable_options=dict(
type='dict',
options=dict(
ignore_unspecified_column_families=dict(type='bool'),
read_rowkey_as_string=dict(type='bool'),
column_families=dict(
type='list',
elements='dict',
options=dict(
columns=dict(
type='list',
elements='dict',
options=dict(
encoding=dict(type='str'),
field_name=dict(type='str'),
only_read_latest=dict(type='bool'),
qualifier_string=dict(required=True, type='str'),
type=dict(type='str'),
),
),
encoding=dict(type='str'),
family_id=dict(type='str'),
only_read_latest=dict(type='bool'),
type=dict(type='str'),
),
),
),
),
),
),
dataset=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery']
state = module.params['state']
kind = 'bigquery#table'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.post(link, resource_to_request(module)), kind)
def update(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.put(link, resource_to_request(module)), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.delete(link), kind)
def resource_to_request(module):
request = {
u'kind': 'bigquery#table',
u'tableReference': TableTablereference(module.params.get('table_reference', {}), module).to_request(),
u'clustering': module.params.get('clustering'),
u'description': module.params.get('description'),
u'friendlyName': module.params.get('friendly_name'),
u'labels': module.params.get('labels'),
u'name': module.params.get('name'),
u'numRows': module.params.get('num_rows'),
u'view': TableView(module.params.get('view', {}), module).to_request(),
u'timePartitioning': TableTimepartitioning(module.params.get('time_partitioning', {}), module).to_request(),
u'schema': TableSchema(module.params.get('schema', {}), module).to_request(),
u'encryptionConfiguration': TableEncryptionconfiguration(module.params.get('encryption_configuration', {}), module).to_request(),
u'expirationTime': module.params.get('expiration_time'),
u'externalDataConfiguration': TableExternaldataconfiguration(module.params.get('external_data_configuration', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'bigquery')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables/{name}".format(**module.params)
def collection(module):
return "https://bigquery.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'tableReference': TableTablereference(response.get(u'tableReference', {}), module).from_response(),
u'clustering': response.get(u'clustering'),
u'creationTime': response.get(u'creationTime'),
u'description': response.get(u'description'),
u'friendlyName': response.get(u'friendlyName'),
u'id': response.get(u'id'),
u'labels': response.get(u'labels'),
u'lastModifiedTime': response.get(u'lastModifiedTime'),
u'location': response.get(u'location'),
u'name': response.get(u'name'),
u'numBytes': response.get(u'numBytes'),
u'numLongTermBytes': response.get(u'numLongTermBytes'),
u'numRows': response.get(u'numRows'),
u'requirePartitionFilter': response.get(u'requirePartitionFilter'),
u'type': response.get(u'type'),
u'view': TableView(response.get(u'view', {}), module).from_response(),
u'timePartitioning': TableTimepartitioning(response.get(u'timePartitioning', {}), module).from_response(),
u'streamingBuffer': TableStreamingbuffer(response.get(u'streamingBuffer', {}), module).from_response(),
u'schema': TableSchema(response.get(u'schema', {}), module).from_response(),
u'encryptionConfiguration': TableEncryptionconfiguration(response.get(u'encryptionConfiguration', {}), module).from_response(),
u'expirationTime': response.get(u'expirationTime'),
u'externalDataConfiguration': TableExternaldataconfiguration(response.get(u'externalDataConfiguration', {}), module).from_response(),
}
class TableTablereference(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')}
)
def from_response(self):
return remove_nones_from_dict(
{u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')}
)
class TableView(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'useLegacySql': self.request.get('use_legacy_sql'),
u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray(
self.request.get('user_defined_function_resources', []), self.module
).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'useLegacySql': self.request.get(u'useLegacySql'),
u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray(
self.request.get(u'userDefinedFunctionResources', []), self.module
).from_response(),
}
)
class TableUserdefinedfunctionresourcesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'inlineCode': item.get('inline_code'), u'resourceUri': item.get('resource_uri')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'inlineCode': item.get(u'inlineCode'), u'resourceUri': item.get(u'resourceUri')})
class TableTimepartitioning(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'expirationMs': self.request.get('expiration_ms'), u'field': self.request.get('field'), u'type': self.request.get('type')}
)
def from_response(self):
return remove_nones_from_dict(
{u'expirationMs': self.request.get(u'expirationMs'), u'field': self.request.get(u'field'), u'type': self.request.get(u'type')}
)
class TableStreamingbuffer(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({})
def from_response(self):
return remove_nones_from_dict({})
class TableSchema(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()})
class TableFieldsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get('description'),
u'fields': item.get('fields'),
u'mode': item.get('mode'),
u'name': item.get('name'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get(u'description'),
u'fields': item.get(u'fields'),
u'mode': item.get(u'mode'),
u'name': item.get(u'name'),
u'type': item.get(u'type'),
}
)
class TableEncryptionconfiguration(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')})
def from_response(self):
return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')})
class TableExternaldataconfiguration(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'autodetect': self.request.get('autodetect'),
u'compression': self.request.get('compression'),
u'ignoreUnknownValues': self.request.get('ignore_unknown_values'),
u'maxBadRecords': self.request.get('max_bad_records'),
u'sourceFormat': self.request.get('source_format'),
u'sourceUris': self.request.get('source_uris'),
u'schema': TableSchema(self.request.get('schema', {}), self.module).to_request(),
u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get('google_sheets_options', {}), self.module).to_request(),
u'csvOptions': TableCsvoptions(self.request.get('csv_options', {}), self.module).to_request(),
u'bigtableOptions': TableBigtableoptions(self.request.get('bigtable_options', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'autodetect': self.request.get(u'autodetect'),
u'compression': self.request.get(u'compression'),
u'ignoreUnknownValues': self.request.get(u'ignoreUnknownValues'),
u'maxBadRecords': self.request.get(u'maxBadRecords'),
u'sourceFormat': self.request.get(u'sourceFormat'),
u'sourceUris': self.request.get(u'sourceUris'),
u'schema': TableSchema(self.request.get(u'schema', {}), self.module).from_response(),
u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get(u'googleSheetsOptions', {}), self.module).from_response(),
u'csvOptions': TableCsvoptions(self.request.get(u'csvOptions', {}), self.module).from_response(),
u'bigtableOptions': TableBigtableoptions(self.request.get(u'bigtableOptions', {}), self.module).from_response(),
}
)
class TableSchema(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()})
class TableFieldsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get('description'),
u'fields': item.get('fields'),
u'mode': item.get('mode'),
u'name': item.get('name'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'description': item.get(u'description'),
u'fields': item.get(u'fields'),
u'mode': item.get(u'mode'),
u'name': item.get(u'name'),
u'type': item.get(u'type'),
}
)
class TableGooglesheetsoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'skipLeadingRows': self.request.get('skip_leading_rows')})
def from_response(self):
return remove_nones_from_dict({u'skipLeadingRows': self.request.get(u'skipLeadingRows')})
class TableCsvoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'allowJaggedRows': self.request.get('allow_jagged_rows'),
u'allowQuotedNewlines': self.request.get('allow_quoted_newlines'),
u'encoding': self.request.get('encoding'),
u'fieldDelimiter': self.request.get('field_delimiter'),
u'quote': self.request.get('quote'),
u'skipLeadingRows': self.request.get('skip_leading_rows'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'allowJaggedRows': self.request.get(u'allowJaggedRows'),
u'allowQuotedNewlines': self.request.get(u'allowQuotedNewlines'),
u'encoding': self.request.get(u'encoding'),
u'fieldDelimiter': self.request.get(u'fieldDelimiter'),
u'quote': self.request.get(u'quote'),
u'skipLeadingRows': self.request.get(u'skipLeadingRows'),
}
)
class TableBigtableoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'ignoreUnspecifiedColumnFamilies': self.request.get('ignore_unspecified_column_families'),
u'readRowkeyAsString': self.request.get('read_rowkey_as_string'),
u'columnFamilies': TableColumnfamiliesArray(self.request.get('column_families', []), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'ignoreUnspecifiedColumnFamilies': self.request.get(u'ignoreUnspecifiedColumnFamilies'),
u'readRowkeyAsString': self.request.get(u'readRowkeyAsString'),
u'columnFamilies': TableColumnfamiliesArray(self.request.get(u'columnFamilies', []), self.module).from_response(),
}
)
class TableColumnfamiliesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'columns': TableColumnsArray(item.get('columns', []), self.module).to_request(),
u'encoding': item.get('encoding'),
u'familyId': item.get('family_id'),
u'onlyReadLatest': item.get('only_read_latest'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'columns': TableColumnsArray(item.get(u'columns', []), self.module).from_response(),
u'encoding': item.get(u'encoding'),
u'familyId': item.get(u'familyId'),
u'onlyReadLatest': item.get(u'onlyReadLatest'),
u'type': item.get(u'type'),
}
)
class TableColumnsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'encoding': item.get('encoding'),
u'fieldName': item.get('field_name'),
u'onlyReadLatest': item.get('only_read_latest'),
u'qualifierString': item.get('qualifier_string'),
u'type': item.get('type'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'encoding': item.get(u'encoding'),
u'fieldName': item.get(u'fieldName'),
u'onlyReadLatest': item.get(u'onlyReadLatest'),
u'qualifierString': item.get(u'qualifierString'),
u'type': item.get(u'type'),
}
)
if __name__ == '__main__':
main()
| 34.438156
| 146
| 0.570315
|
4a1703881f79bdf0a617f3bdf28b0bb5f7f6b253
| 6,426
|
py
|
Python
|
linear_algebra_by_j_borwnlee/ch_10/scribble_of_different_types_of_matrices.py
|
pavelexpertov/scribbles
|
50ebcd6a686fd32be20d401563db7cc87781a428
|
[
"MIT"
] | null | null | null |
linear_algebra_by_j_borwnlee/ch_10/scribble_of_different_types_of_matrices.py
|
pavelexpertov/scribbles
|
50ebcd6a686fd32be20d401563db7cc87781a428
|
[
"MIT"
] | null | null | null |
linear_algebra_by_j_borwnlee/ch_10/scribble_of_different_types_of_matrices.py
|
pavelexpertov/scribbles
|
50ebcd6a686fd32be20d401563db7cc87781a428
|
[
"MIT"
] | null | null | null |
'''Purpose of the module is to describe different matrix types in code'''
import numpy as np
# Setting default random generator
rng = np.random.default_rng(12345)
def get_square_matrix(n, random=False, integer=1):
'''Return a square matrix with integer scalaras. Whether random or specified'''
if random:
return rng.integers(50, size=(n, n))
else:
return np.ones((n, n), dtype=int) * integer
def get_symmetrical_matrix(n):
'''Return a matrix that's symetrical'''
# Creating permutations of rows
current_integer_list = [i for i in range(1, n+1)]
permutations = []
while(current_integer_list[-1] != 1):
permutations.append(current_integer_list[:])
current_integer_list.pop()
current_integer_list.insert(0, current_integer_list[0] + 1)
# To add the last row
permutations.append(current_integer_list[:])
return np.array(permutations)
def get_triangular_matrix(matrix, triangular_type):
'''Return a triangular matrix'''
if triangular_type == "triup":
return np.triu(matrix)
elif triangular_type == "trile":
return np.tril(matrix)
else:
raise ValueError("'tringular_type' is either wrong or empty.")
def get_diagonal_matrix(diagonal_vector, extra_row=0, fill_value=0):
'''Return a diagonal matrix'''
rows = []
for index, value in enumerate(diagonal_vector):
row = [0]*len(diagonal_vector)
row[index] = value
rows.append(row)
counter = 0
while(counter < extra_row):
rows.append([fill_value]*len(diagonal_vector))
counter += 1
return np.array(rows)
def format_message_str(string):
'''Return re-formatted string for output messages.
It's expected a docstring with 4 spaces that delimits sentences
with some new lines at each end.
'''
string = string.strip()
return "\n".join([line.strip() for line in string.split(" "*4)])
if __name__ == "__main__":
# Square matrix
m = """
Square matrices are a type of matrices where column and row are equal.
(since it's a square Duh). These types of matrices allow to be used in
matrix arithmetic with ease due to equal sizes."""
print(format_message_str(m))
print('4 x 4 square matrix')
print(get_square_matrix(4, 4, True))
print("Let's do some square matrix arithmetic")
A = get_square_matrix(5, 5, integer=2)
B = get_square_matrix(5, 5, integer=5)
print('A:\n', A)
print('B:\n', B)
print("Let's do some multiplication and division in the form of A * B and A / B")
print("Multiplication:\n", A * B)
print("Division:\n", A / B)
print("Keep in mind that it multiplies across elements, not dot products")
# Symmetrical matrix
m = """
Symmetrical matrix is a square matrix that has values mirrored along a diagonal line.
The diagonal line consists of just '1' scalars and then each scalar get's incremented to left and right
sides of the diagonal line.
For example, a matrix with 16 dimension size would look like this:
"""
print(format_message_str(m))
print("\n", get_symmetrical_matrix(16))
print("It has to be a square such that the values can be mirrored")
# Triangular matrix
m = """
Triangular matrix is a square matrix where a part of matrix
(in a shape of a 'square') has values on one side of diagonal
line and the other one has zeros.
Triangular matrix can be characterised by either 'triangular up' (scalar values above
diagonal line) and 'triangular down' (scalar values below diagonal line).
For example a 5x5 trinagular matrix would like this:
"""
print(format_message_str(m))
m = get_square_matrix(5, random=True)
print(get_triangular_matrix(m, "trile"))
print("or this:")
print(get_triangular_matrix(m, "triup"))
m = """
Functions that can perform the formatting of these matrices are `numpy.tril`
and `numpy.triu`.
"""
print(format_message_str(m))
# Diagonal matrix
m = """
Diagonal matrix is a type of matrix that has values alongside a diagonal line.
This diagonal line is usually called a 'diagonal vector'. Usually scalar values
are line up diagonally and the rest tend to be 0's. For example:
"""
print(format_message_str(m))
print(get_diagonal_matrix([3, 5, 68, 79, 666]))
m = """
Keep in mind that the matrix doesn't have to be a square matrix since the diagonal vector's
last value needs to reach at the last column of the column. For example.
"""
print(format_message_str(m))
print(get_diagonal_matrix([3, 5, 68, 79, 666], 1))
m = """
There is a helper function called `numpy.diag`:
- if it takes a matrix, it will return a diagonal vector.
- if it takes a vector, it will return a matrix containing the provided diagonal vector.
For example:
"""
print(format_message_str(m))
m = get_diagonal_matrix([v**2 for v in range(1, 4)])
print(f"Diagonal matrix as an example.\n{m}")
diagonal_vector = np.diag(m)
print(f"Using numpy.diag function will produce a vector of {diagonal_vector}")
m = np.diag(diagonal_vector)
print(f"And providing the vector to the same function produces a matrix like this:\n{m}")
# Identity matrix
m = """
Identity matrix is a type of a square matrix where a diagonal line consists of 1's and the rest of vectors are 0's.
Special property of it is "a vector doesn't change when multiplied by it".
To produce an identity matrix, the following functions can be used:
1. numpy.matlib.identity
2. numpy.identity
For example, the following matrix can be produced using numpy.identity:
"""
print(format_message_str(m))
m = np.identity(3)
print(m)
print("Then produce a different normal square matrix, like this:")
sq_m = get_square_matrix(3, True)
print(f"{sq_m}")
print(f"If you multiply the identity matrix with the square one, you get:\n{m * sq_m}")
print("As for seeing that a vector is not affected by multiplication with the identitiy vector")
vector = np.array([2, 3, 4, 5, 6])
print(f"You have vector of {vector}")
i = np.identity(5)
print(f"And then an identity matrix like below:\n{i}")
print(f"So if we do 'vector * i', then it's:\n {vector * i}")
print(f"So if we do 'vector @ i', then it's: {vector @ i}")
# Orthogonal matrix
| 39.423313
| 119
| 0.676159
|
4a1703ab5508038456369928f932eb068a94c584
| 11,745
|
py
|
Python
|
config/settings/base.py
|
devGW/cafeteria_back
|
fda99fbc838ee809b1ff28445d66cbd48e2b5016
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
devGW/cafeteria_back
|
fda99fbc838ee809b1ff28445d66cbd48e2b5016
|
[
"MIT"
] | 1
|
2019-11-13T09:06:20.000Z
|
2019-11-13T09:06:20.000Z
|
config/settings/base.py
|
devGW/PostApp
|
fda99fbc838ee809b1ff28445d66cbd48e2b5016
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = (
environ.Path(__file__) - 3
) # (cafeteria/config/settings/base.py - 3 = cafeteria/)
APPS_DIR = ROOT_DIR.path("cafeteria")
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path(".env")))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Asia/Seoul"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [ROOT_DIR.path("locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///instar")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
]
THIRD_PARTY_APPS = [
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"rest_framework.authtoken",
"allauth.socialaccount.providers.kakao",
"rest_auth",
'rest_auth.registration',
'corsheaders', # 리액트에 연결 위함
'storages',
]
LOCAL_APPS = [
"cafeteria.users.apps.UsersConfig",
# Your stuff: custom apps go here
"cafeteria.images.apps.ImagesConfig",
"cafeteria.notifications.apps.NotificationsConfig",
"cafeteria.crawler.apps.CrawlerConfig"
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "cafeteria.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
# PASSWORDS
# ----------------------------------f--------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
# },
# {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
# {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
# {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
# ]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
'corsheaders.middleware.CorsMiddleware',
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR("staticfiles"))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path("static")),
str("/Users/user/Documents/git_repo/cafeteria_front/build/static/"),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR("media"))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR.path("templates"))],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
"debug": DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR.path("fixtures")),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Daniel Roy Greenfeld""", "doscm164@naver.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_ADAPTER = "cafeteria.users.adapters.CustomUserAccountAdapter"
SOCIALACCOUNT_ADAPTER = "cafeteria.users.adapters.SocialAccountAdapter"
# Your stuff...
# ------------------------------------------------------------------------------
REST_AUTH_REGISTER_SERIALIZERS = {
'REGISTER_SERIALIZER': 'cafeteria.users.serializers.RegisterSerializer'
}
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'cafeteria.users.serializers.UserProfileSerializer'
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
JWT_AUTH = {
'JWT_VERIFY_EXPIRATION': False,
}
DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880
REST_USE_JWT = True # jwt 사용
ACCOUNT_LOGOUT_ON_GET = True # rest-auth get 으로 로그아웃 허용
CORS_ORIGIN_ALLOW_ALL을 = True # 모든 호스트 허용
AWS_ACCESS_KEY_ID = 'AKIA2RVDEPHIUSOMBT4J'
AWS_SECRET_ACCESS_KEY = '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'
AWS_REGION = 'ap-northeast-2'
AWS_STORAGE_BUCKET_NAME = 'cafeteria-photo'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.%s.amazonaws.com' % (AWS_STORAGE_BUCKET_NAME, AWS_REGION)
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
DEFAULT_FILE_STORAGE = 'config.asset_storage.MediaStorage'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False
| 37.765273
| 93
| 0.635419
|
4a1703adc34a324c0cfa35d535bb5c2a4612500c
| 116,599
|
py
|
Python
|
python/ccxt/phemex.py
|
ysdede/ccxt
|
c64cfdb0c364f4b965ef588bf67d1bdedad410a5
|
[
"MIT"
] | 1
|
2021-01-12T07:03:55.000Z
|
2021-01-12T07:03:55.000Z
|
python/ccxt/phemex.py
|
ysdede/ccxt
|
c64cfdb0c364f4b965ef588bf67d1bdedad410a5
|
[
"MIT"
] | 3
|
2022-01-27T15:38:05.000Z
|
2022-03-31T23:04:15.000Z
|
python/ccxt/phemex.py
|
RonSherfey/ccxt
|
c64cfdb0c364f4b965ef588bf67d1bdedad410a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DuplicateOrderId
from ccxt.base.errors import DDoSProtection
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class phemex(Exchange):
def describe(self):
return self.deep_extend(super(phemex, self).describe(), {
'id': 'phemex',
'name': 'Phemex',
'countries': ['CN'], # China
'rateLimit': 100,
'version': 'v1',
'certified': False,
'pro': True,
'hostname': 'api.phemex.com',
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/85225056-221eb600-b3d7-11ea-930d-564d2690e3f6.jpg',
'test': {
'v1': 'https://testnet-api.phemex.com/v1',
'public': 'https://testnet-api.phemex.com/exchange/public',
'private': 'https://testnet-api.phemex.com',
},
'api': {
'v1': 'https://{hostname}/v1',
'public': 'https://{hostname}/exchange/public',
'private': 'https://{hostname}',
},
'www': 'https://phemex.com',
'doc': 'https://github.com/phemex/phemex-api-docs',
'fees': 'https://phemex.com/fees-conditions',
'referral': {
'url': 'https://phemex.com/register?referralCode=EDNVJ',
'discount': 0.1,
},
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'3h': '10800',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2592000',
},
'api': {
'public': {
'get': [
'cfg/v2/products', # spot + contracts
'products', # contracts only
'nomics/trades', # ?market=<symbol>&since=<since>
'md/kline', # ?from=1589811875&resolution=1800&symbol=sBTCUSDT&to=1592457935
],
},
'v1': {
'get': [
'md/orderbook', # ?symbol=<symbol>&id=<id>
'md/trade', # ?symbol=<symbol>&id=<id>
'md/ticker/24hr', # ?symbol=<symbol>&id=<id>
'md/ticker/24hr/all', # ?id=<id>
'md/spot/ticker/24hr', # ?symbol=<symbol>&id=<id>
'md/spot/ticker/24hr/all', # ?symbol=<symbol>&id=<id>
'exchange/public/products', # contracts only
],
},
'private': {
'get': [
# spot
'spot/orders/active', # ?symbol=<symbol>&orderID=<orderID>
# 'spot/orders/active', # ?symbol=<symbol>&clOrDID=<clOrdID>
'spot/orders', # ?symbol=<symbol>
'spot/wallets', # ?currency=<currency>
'exchange/spot/order', # ?symbol=<symbol>&ordStatus=<ordStatus1,orderStatus2>ordType=<ordType1,orderType2>&start=<start>&end=<end>&limit=<limit>&offset=<offset>
'exchange/spot/order/trades', # ?symbol=<symbol>&start=<start>&end=<end>&limit=<limit>&offset=<offset>
# swap
'accounts/accountPositions', # ?currency=<currency>
'accounts/positions', # ?currency=<currency>
'orders/activeList', # ?symbol=<symbol>
'exchange/order/list', # ?symbol=<symbol>&start=<start>&end=<end>&offset=<offset>&limit=<limit>&ordStatus=<ordStatus>&withCount=<withCount>
'exchange/order', # ?symbol=<symbol>&orderID=<orderID1,orderID2>
# 'exchange/order', # ?symbol=<symbol>&clOrdID=<clOrdID1,clOrdID2>
'exchange/order/trade', # ?symbol=<symbol>&start=<start>&end=<end>&limit=<limit>&offset=<offset>&withCount=<withCount>
'phemex-user/users/children', # ?offset=<offset>&limit=<limit>&withCount=<withCount>
'phemex-user/wallets/v2/depositAddress', # ?_t=1592722635531¤cy=USDT
'exchange/margins/transfer', # ?start=<start>&end=<end>&offset=<offset>&limit=<limit>&withCount=<withCount>
'exchange/wallets/confirm/withdraw', # ?code=<withdrawConfirmCode>
'exchange/wallets/withdrawList', # ?currency=<currency>&limit=<limit>&offset=<offset>&withCount=<withCount>
'exchange/wallets/depositList', # ?currency=<currency>&offset=<offset>&limit=<limit>
'exchange/wallets/v2/depositAddress', # ?currency=<currency>
],
'post': [
# spot
'spot/orders',
# swap
'orders',
'positions/assign', # ?symbol=<symbol>&posBalance=<posBalance>&posBalanceEv=<posBalanceEv>
'exchange/wallets/transferOut',
'exchange/wallets/transferIn',
'exchange/margins',
'exchange/wallets/createWithdraw', # ?otpCode=<otpCode>
'exchange/wallets/cancelWithdraw',
'exchange/wallets/createWithdrawAddress', # ?otpCode={optCode}
],
'put': [
# spot
'spot/orders', # ?symbol=<symbol>&orderID=<orderID>&origClOrdID=<origClOrdID>&clOrdID=<clOrdID>&priceEp=<priceEp>&baseQtyEV=<baseQtyEV>"eQtyEv=<quoteQtyEv>&stopPxEp=<stopPxEp>
# swap
'orders/replace', # ?symbol=<symbol>&orderID=<orderID>&origClOrdID=<origClOrdID>&clOrdID=<clOrdID>&price=<price>&priceEp=<priceEp>&orderQty=<orderQty>&stopPx=<stopPx>&stopPxEp=<stopPxEp>&takeProfit=<takeProfit>&takeProfitEp=<takeProfitEp>&stopLoss=<stopLoss>&stopLossEp=<stopLossEp>&pegOffsetValueEp=<pegOffsetValueEp>&pegPriceType=<pegPriceType>
'positions/leverage', # ?symbol=<symbol>&leverage=<leverage>&leverageEr=<leverageEr>
'positions/riskLimit', # ?symbol=<symbol>&riskLimit=<riskLimit>&riskLimitEv=<riskLimitEv>
],
'delete': [
# spot
'spot/orders', # ?symbol=<symbol>&orderID=<orderID>
'spot/orders/all', # ?symbol=<symbol>&untriggered=<untriggered>
# 'spot/orders', # ?symbol=<symbol>&clOrdID=<clOrdID>
# swap
'orders/cancel', # ?symbol=<symbol>&orderID=<orderID>
'orders', # ?symbol=<symbol>&orderID=<orderID1>,<orderID2>,<orderID3>
'orders/all', # ?symbol=<symbol>&untriggered=<untriggered>&text=<text>
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0.001'),
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
# not documented
'412': BadRequest, # {"code":412,"msg":"Missing parameter - resolution","data":null}
'6001': BadRequest, # {"error":{"code":6001,"message":"invalid argument"},"id":null,"result":null}
# documented
'19999': BadRequest, # REQUEST_IS_DUPLICATED Duplicated request ID
'10001': DuplicateOrderId, # OM_DUPLICATE_ORDERID Duplicated order ID
'10002': OrderNotFound, # OM_ORDER_NOT_FOUND Cannot find order ID
'10003': CancelPending, # OM_ORDER_PENDING_CANCEL Cannot cancel while order is already in pending cancel status
'10004': CancelPending, # OM_ORDER_PENDING_REPLACE Cannot cancel while order is already in pending cancel status
'10005': CancelPending, # OM_ORDER_PENDING Cannot cancel while order is already in pending cancel status
'11001': InsufficientFunds, # TE_NO_ENOUGH_AVAILABLE_BALANCE Insufficient available balance
'11002': InvalidOrder, # TE_INVALID_RISK_LIMIT Invalid risk limit value
'11003': InsufficientFunds, # TE_NO_ENOUGH_BALANCE_FOR_NEW_RISK_LIMIT Insufficient available balance
'11004': InvalidOrder, # TE_INVALID_LEVERAGE invalid input or new leverage is over maximum allowed leverage
'11005': InsufficientFunds, # TE_NO_ENOUGH_BALANCE_FOR_NEW_LEVERAGE Insufficient available balance
'11006': ExchangeError, # TE_CANNOT_CHANGE_POSITION_MARGIN_WITHOUT_POSITION Position size is zero. Cannot change margin
'11007': ExchangeError, # TE_CANNOT_CHANGE_POSITION_MARGIN_FOR_CROSS_MARGIN Cannot change margin under CrossMargin
'11008': ExchangeError, # TE_CANNOT_REMOVE_POSITION_MARGIN_MORE_THAN_ADDED exceeds the maximum removable Margin
'11009': ExchangeError, # TE_CANNOT_REMOVE_POSITION_MARGIN_DUE_TO_UNREALIZED_PNL exceeds the maximum removable Margin
'11010': InsufficientFunds, # TE_CANNOT_ADD_POSITION_MARGIN_DUE_TO_NO_ENOUGH_AVAILABLE_BALANCE Insufficient available balance
'11011': InvalidOrder, # TE_REDUCE_ONLY_ABORT Cannot accept reduce only order
'11012': InvalidOrder, # TE_REPLACE_TO_INVALID_QTY Order quantity Error
'11013': InvalidOrder, # TE_CONDITIONAL_NO_POSITION Position size is zero. Cannot determine conditional order's quantity
'11014': InvalidOrder, # TE_CONDITIONAL_CLOSE_POSITION_WRONG_SIDE Close position conditional order has the same side
'11015': InvalidOrder, # TE_CONDITIONAL_TRIGGERED_OR_CANCELED
'11016': BadRequest, # TE_ADL_NOT_TRADING_REQUESTED_ACCOUNT Request is routed to the wrong trading engine
'11017': ExchangeError, # TE_ADL_CANNOT_FIND_POSITION Cannot find requested position on current account
'11018': ExchangeError, # TE_NO_NEED_TO_SETTLE_FUNDING The current account does not need to pay a funding fee
'11019': ExchangeError, # TE_FUNDING_ALREADY_SETTLED The current account already pays the funding fee
'11020': ExchangeError, # TE_CANNOT_TRANSFER_OUT_DUE_TO_BONUS Withdraw to wallet needs to remove all remaining bonus. However if bonus is used by position or order cost, withdraw fails.
'11021': ExchangeError, # TE_INVALID_BONOUS_AMOUNT # Grpc command cannot be negative number Invalid bonus amount
'11022': AccountSuspended, # TE_REJECT_DUE_TO_BANNED Account is banned
'11023': ExchangeError, # TE_REJECT_DUE_TO_IN_PROCESS_OF_LIQ Account is in the process of liquidation
'11024': ExchangeError, # TE_REJECT_DUE_TO_IN_PROCESS_OF_ADL Account is in the process of auto-deleverage
'11025': BadRequest, # TE_ROUTE_ERROR Request is routed to the wrong trading engine
'11026': ExchangeError, # TE_UID_ACCOUNT_MISMATCH
'11027': BadSymbol, # TE_SYMBOL_INVALID Invalid number ID or name
'11028': BadSymbol, # TE_CURRENCY_INVALID Invalid currency ID or name
'11029': ExchangeError, # TE_ACTION_INVALID Unrecognized request type
'11030': ExchangeError, # TE_ACTION_BY_INVALID
'11031': DDoSProtection, # TE_SO_NUM_EXCEEDS Number of total conditional orders exceeds the max limit
'11032': DDoSProtection, # TE_AO_NUM_EXCEEDS Number of total active orders exceeds the max limit
'11033': DuplicateOrderId, # TE_ORDER_ID_DUPLICATE Duplicated order ID
'11034': InvalidOrder, # TE_SIDE_INVALID Invalid side
'11035': InvalidOrder, # TE_ORD_TYPE_INVALID Invalid OrderType
'11036': InvalidOrder, # TE_TIME_IN_FORCE_INVALID Invalid TimeInForce
'11037': InvalidOrder, # TE_EXEC_INST_INVALID Invalid ExecType
'11038': InvalidOrder, # TE_TRIGGER_INVALID Invalid trigger type
'11039': InvalidOrder, # TE_STOP_DIRECTION_INVALID Invalid stop direction type
'11040': InvalidOrder, # TE_NO_MARK_PRICE Cannot get valid mark price to create conditional order
'11041': InvalidOrder, # TE_NO_INDEX_PRICE Cannot get valid index price to create conditional order
'11042': InvalidOrder, # TE_NO_LAST_PRICE Cannot get valid last market price to create conditional order
'11043': InvalidOrder, # TE_RISING_TRIGGER_DIRECTLY Conditional order would be triggered immediately
'11044': InvalidOrder, # TE_FALLING_TRIGGER_DIRECTLY Conditional order would be triggered immediately
'11045': InvalidOrder, # TE_TRIGGER_PRICE_TOO_LARGE Conditional order trigger price is too high
'11046': InvalidOrder, # TE_TRIGGER_PRICE_TOO_SMALL Conditional order trigger price is too low
'11047': InvalidOrder, # TE_BUY_TP_SHOULD_GT_BASE TakeProfile BUY conditional order trigger price needs to be greater than reference price
'11048': InvalidOrder, # TE_BUY_SL_SHOULD_LT_BASE StopLoss BUY condition order price needs to be less than the reference price
'11049': InvalidOrder, # TE_BUY_SL_SHOULD_GT_LIQ StopLoss BUY condition order price needs to be greater than liquidation price or it will not trigger
'11050': InvalidOrder, # TE_SELL_TP_SHOULD_LT_BASE TakeProfile SELL conditional order trigger price needs to be less than reference price
'11051': InvalidOrder, # TE_SELL_SL_SHOULD_LT_LIQ StopLoss SELL condition order price needs to be less than liquidation price or it will not trigger
'11052': InvalidOrder, # TE_SELL_SL_SHOULD_GT_BASE StopLoss SELL condition order price needs to be greater than the reference price
'11053': InvalidOrder, # TE_PRICE_TOO_LARGE
'11054': InvalidOrder, # TE_PRICE_WORSE_THAN_BANKRUPT Order price cannot be more aggressive than bankrupt price if self order has instruction to close a position
'11055': InvalidOrder, # TE_PRICE_TOO_SMALL Order price is too low
'11056': InvalidOrder, # TE_QTY_TOO_LARGE Order quantity is too large
'11057': InvalidOrder, # TE_QTY_NOT_MATCH_REDUCE_ONLY Does not allow ReduceOnly order without position
'11058': InvalidOrder, # TE_QTY_TOO_SMALL Order quantity is too small
'11059': InvalidOrder, # TE_TP_SL_QTY_NOT_MATCH_POS Position size is zero. Cannot accept any TakeProfit or StopLoss order
'11060': InvalidOrder, # TE_SIDE_NOT_CLOSE_POS TakeProfit or StopLoss order has wrong side. Cannot close position
'11061': CancelPending, # TE_ORD_ALREADY_PENDING_CANCEL Repeated cancel request
'11062': InvalidOrder, # TE_ORD_ALREADY_CANCELED Order is already canceled
'11063': InvalidOrder, # TE_ORD_STATUS_CANNOT_CANCEL Order is not able to be canceled under current status
'11064': InvalidOrder, # TE_ORD_ALREADY_PENDING_REPLACE Replace request is rejected because order is already in pending replace status
'11065': InvalidOrder, # TE_ORD_REPLACE_NOT_MODIFIED Replace request does not modify any parameters of the order
'11066': InvalidOrder, # TE_ORD_STATUS_CANNOT_REPLACE Order is not able to be replaced under current status
'11067': InvalidOrder, # TE_CANNOT_REPLACE_PRICE Market conditional order cannot change price
'11068': InvalidOrder, # TE_CANNOT_REPLACE_QTY Condtional order for closing position cannot change order quantity, since the order quantity is determined by position size already
'11069': ExchangeError, # TE_ACCOUNT_NOT_IN_RANGE The account ID in the request is not valid or is not in the range of the current process
'11070': BadSymbol, # TE_SYMBOL_NOT_IN_RANGE The symbol is invalid
'11071': InvalidOrder, # TE_ORD_STATUS_CANNOT_TRIGGER
'11072': InvalidOrder, # TE_TKFR_NOT_IN_RANGE The fee value is not valid
'11073': InvalidOrder, # TE_MKFR_NOT_IN_RANGE The fee value is not valid
'11074': InvalidOrder, # TE_CANNOT_ATTACH_TP_SL Order request cannot contain TP/SL parameters when the account already has positions
'11075': InvalidOrder, # TE_TP_TOO_LARGE TakeProfit price is too large
'11076': InvalidOrder, # TE_TP_TOO_SMALL TakeProfit price is too small
'11077': InvalidOrder, # TE_TP_TRIGGER_INVALID Invalid trigger type
'11078': InvalidOrder, # TE_SL_TOO_LARGE StopLoss price is too large
'11079': InvalidOrder, # TE_SL_TOO_SMALL StopLoss price is too small
'11080': InvalidOrder, # TE_SL_TRIGGER_INVALID Invalid trigger type
'11081': InvalidOrder, # TE_RISK_LIMIT_EXCEEDS Total potential position breaches current risk limit
'11082': InsufficientFunds, # TE_CANNOT_COVER_ESTIMATE_ORDER_LOSS The remaining balance cannot cover the potential unrealized PnL for self new order
'11083': InvalidOrder, # TE_TAKE_PROFIT_ORDER_DUPLICATED TakeProfit order already exists
'11084': InvalidOrder, # TE_STOP_LOSS_ORDER_DUPLICATED StopLoss order already exists
'11085': DuplicateOrderId, # TE_CL_ORD_ID_DUPLICATE ClOrdId is duplicated
'11086': InvalidOrder, # TE_PEG_PRICE_TYPE_INVALID PegPriceType is invalid
'11087': InvalidOrder, # TE_BUY_TS_SHOULD_LT_BASE The trailing order's StopPrice should be less than the current last price
'11088': InvalidOrder, # TE_BUY_TS_SHOULD_GT_LIQ The traling order's StopPrice should be greater than the current liquidation price
'11089': InvalidOrder, # TE_SELL_TS_SHOULD_LT_LIQ The traling order's StopPrice should be greater than the current last price
'11090': InvalidOrder, # TE_SELL_TS_SHOULD_GT_BASE The traling order's StopPrice should be less than the current liquidation price
'11091': InvalidOrder, # TE_BUY_REVERT_VALUE_SHOULD_LT_ZERO The PegOffset should be less than zero
'11092': InvalidOrder, # TE_SELL_REVERT_VALUE_SHOULD_GT_ZERO The PegOffset should be greater than zero
'11093': InvalidOrder, # TE_BUY_TTP_SHOULD_ACTIVATE_ABOVE_BASE The activation price should be greater than the current last price
'11094': InvalidOrder, # TE_SELL_TTP_SHOULD_ACTIVATE_BELOW_BASE The activation price should be less than the current last price
'11095': InvalidOrder, # TE_TRAILING_ORDER_DUPLICATED A trailing order exists already
'11096': InvalidOrder, # TE_CLOSE_ORDER_CANNOT_ATTACH_TP_SL An order to close position cannot have trailing instruction
'11097': BadRequest, # TE_CANNOT_FIND_WALLET_OF_THIS_CURRENCY This crypto is not supported
'11098': BadRequest, # TE_WALLET_INVALID_ACTION Invalid action on wallet
'11099': ExchangeError, # TE_WALLET_VID_UNMATCHED Wallet operation request has a wrong wallet vid
'11100': InsufficientFunds, # TE_WALLET_INSUFFICIENT_BALANCE Wallet has insufficient balance
'11101': InsufficientFunds, # TE_WALLET_INSUFFICIENT_LOCKED_BALANCE Locked balance in wallet is not enough for unlock/withdraw request
'11102': BadRequest, # TE_WALLET_INVALID_DEPOSIT_AMOUNT Deposit amount must be greater than zero
'11103': BadRequest, # TE_WALLET_INVALID_WITHDRAW_AMOUNT Withdraw amount must be less than zero
'11104': BadRequest, # TE_WALLET_REACHED_MAX_AMOUNT Deposit makes wallet exceed max amount allowed
'11105': InsufficientFunds, # TE_PLACE_ORDER_INSUFFICIENT_BASE_BALANCE Insufficient funds in base wallet
'11106': InsufficientFunds, # TE_PLACE_ORDER_INSUFFICIENT_QUOTE_BALANCE Insufficient funds in quote wallet
'11107': ExchangeError, # TE_CANNOT_CONNECT_TO_REQUEST_SEQ TradingEngine failed to connect with CrossEngine
'11108': InvalidOrder, # TE_CANNOT_REPLACE_OR_CANCEL_MARKET_ORDER Cannot replace/amend market order
'11109': InvalidOrder, # TE_CANNOT_REPLACE_OR_CANCEL_IOC_ORDER Cannot replace/amend ImmediateOrCancel order
'11110': InvalidOrder, # TE_CANNOT_REPLACE_OR_CANCEL_FOK_ORDER Cannot replace/amend FillOrKill order
'11111': InvalidOrder, # TE_MISSING_ORDER_ID OrderId is missing
'11112': InvalidOrder, # TE_QTY_TYPE_INVALID QtyType is invalid
'11113': BadRequest, # TE_USER_ID_INVALID UserId is invalid
'11114': InvalidOrder, # TE_ORDER_VALUE_TOO_LARGE Order value is too large
'11115': InvalidOrder, # TE_ORDER_VALUE_TOO_SMALL Order value is too small
# not documented
'30018': BadRequest, # {"code":30018,"msg":"phemex.data.size.uplimt","data":null}
'39996': PermissionDenied, # {"code": "39996","msg": "Access denied."}
},
'broad': {
'Failed to find api-key': AuthenticationError, # {"msg":"Failed to find api-key 1c5ec63fd-660d-43ea-847a-0d3ba69e106e","code":10500}
'Missing required parameter': BadRequest, # {"msg":"Missing required parameter","code":10500}
'API Signature verification failed': AuthenticationError, # {"msg":"API Signature verification failed.","code":10500}
},
},
'options': {
'x-phemex-request-expiry': 60, # in seconds
'createOrderByQuoteRequiresPrice': True,
'networks': {
'TRC20': 'TRX',
'ERC20': 'ETH',
},
'defaultNetworks': {
'USDT': 'ETH',
},
},
})
def parse_safe_number(self, value=None):
if value is None:
return value
parts = value.split(',')
value = ''.join(parts)
parts = value.split(' ')
return self.safe_number(parts, 0)
def parse_swap_market(self, market):
#
# {
# "symbol":"BTCUSD",
# "displaySymbol":"BTC / USD",
# "indexSymbol":".BTC",
# "markSymbol":".MBTC",
# "fundingRateSymbol":".BTCFR",
# "fundingRate8hSymbol":".BTCFR8H",
# "contractUnderlyingAssets":"USD",
# "settleCurrency":"BTC",
# "quoteCurrency":"USD",
# "contractSize":"1 USD",
# "lotSize":1,
# "tickSize":0.5,
# "priceScale":4,
# "ratioScale":8,
# "pricePrecision":1,
# "minPriceEp":5000,
# "maxPriceEp":10000000000,
# "maxOrderQty":1000000,
# "type":"Perpetual",
# "status":"Listed",
# "tipOrderQty":1000000,
# "steps":"50",
# "riskLimits":[
# {"limit":100,"initialMargin":"1.0%","initialMarginEr":1000000,"maintenanceMargin":"0.5%","maintenanceMarginEr":500000},
# {"limit":150,"initialMargin":"1.5%","initialMarginEr":1500000,"maintenanceMargin":"1.0%","maintenanceMarginEr":1000000},
# {"limit":200,"initialMargin":"2.0%","initialMarginEr":2000000,"maintenanceMargin":"1.5%","maintenanceMarginEr":1500000},
# ],
# "underlyingSymbol":".BTC",
# "baseCurrency":"BTC",
# "settlementCurrency":"BTC",
# "valueScale":8,
# "defaultLeverage":0,
# "maxLeverage":100,
# "initMarginEr":"1000000",
# "maintMarginEr":"500000",
# "defaultRiskLimitEv":10000000000,
# "deleverage":true,
# "makerFeeRateEr":-250000,
# "takerFeeRateEr":750000,
# "fundingInterval":8,
# "marketUrl":"https://phemex.com/trade/BTCUSD",
# "description":"BTCUSD is a BTC/USD perpetual contract priced on the .BTC Index. Each contract is worth 1 USD of Bitcoin. Funding is paid and received every 8 hours. At UTC time: 00:00, 08:00, 16:00.",
# }
#
id = self.safe_string(market, 'symbol')
baseId = self.safe_string_2(market, 'baseCurrency', 'contractUnderlyingAssets')
quoteId = self.safe_string(market, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
type = self.safe_string_lower(market, 'type')
inverse = False
spot = False
swap = True
settlementCurrencyId = self.safe_string(market, 'settleCurrency')
if settlementCurrencyId != quoteId:
inverse = True
linear = not inverse
symbol = None
if linear:
symbol = base + '/' + quote + ':' + quote
else:
symbol = base + '/' + quote + ':' + base
precision = {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
}
priceScale = self.safe_integer(market, 'priceScale')
ratioScale = self.safe_integer(market, 'ratioScale')
valueScale = self.safe_integer(market, 'valueScale')
minPriceEp = self.safe_string(market, 'minPriceEp')
maxPriceEp = self.safe_string(market, 'maxPriceEp')
makerFeeRateEr = self.safe_string(market, 'makerFeeRateEr')
takerFeeRateEr = self.safe_string(market, 'takerFeeRateEr')
maker = self.parse_number(self.from_en(makerFeeRateEr, ratioScale))
taker = self.parse_number(self.from_en(takerFeeRateEr, ratioScale))
limits = {
'amount': {
'min': precision['amount'],
'max': None,
},
'price': {
'min': self.parse_number(self.from_en(minPriceEp, priceScale)),
'max': self.parse_number(self.from_en(maxPriceEp, priceScale)),
},
'cost': {
'min': None,
'max': self.parse_number(self.safe_string(market, 'maxOrderQty')),
},
}
status = self.safe_string(market, 'status')
active = status == 'Listed'
contractSize = self.safe_string(market, 'contractSize')
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': type,
'spot': spot,
'swap': swap,
'linear': linear,
'inverse': inverse,
'active': active,
'taker': taker,
'maker': maker,
'priceScale': priceScale,
'valueScale': valueScale,
'ratioScale': ratioScale,
'precision': precision,
'contractSize': contractSize,
'limits': limits,
}
def parse_spot_market(self, market):
#
# {
# "symbol":"sBTCUSDT",
# "displaySymbol":"BTC / USDT",
# "quoteCurrency":"USDT",
# "pricePrecision":2,
# "type":"Spot",
# "baseCurrency":"BTC",
# "baseTickSize":"0.000001 BTC",
# "baseTickSizeEv":100,
# "quoteTickSize":"0.01 USDT",
# "quoteTickSizeEv":1000000,
# "minOrderValue":"10 USDT",
# "minOrderValueEv":1000000000,
# "maxBaseOrderSize":"1000 BTC",
# "maxBaseOrderSizeEv":100000000000,
# "maxOrderValue":"5,000,000 USDT",
# "maxOrderValueEv":500000000000000,
# "defaultTakerFee":"0.001",
# "defaultTakerFeeEr":100000,
# "defaultMakerFee":"0.001",
# "defaultMakerFeeEr":100000,
# "baseQtyPrecision":6,
# "quoteQtyPrecision":2,
# "status":"Listed",
# "tipOrderQty":20
# }
#
type = self.safe_string_lower(market, 'type')
id = self.safe_string(market, 'symbol')
quoteId = self.safe_string(market, 'quoteCurrency')
baseId = self.safe_string(market, 'baseCurrency')
linear = None
inverse = None
spot = True
swap = False
taker = self.safe_number(market, 'defaultTakerFee')
maker = self.safe_number(market, 'defaultMakerFee')
precision = {
'amount': self.parse_safe_number(self.safe_string(market, 'baseTickSize')),
'price': self.parse_safe_number(self.safe_string(market, 'quoteTickSize')),
}
limits = {
'amount': {
'min': precision['amount'],
'max': self.parse_safe_number(self.safe_string(market, 'maxBaseOrderSize')),
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': self.parse_safe_number(self.safe_string(market, 'minOrderValue')),
'max': self.parse_safe_number(self.safe_string(market, 'maxOrderValue')),
},
}
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
status = self.safe_string(market, 'status')
active = status == 'Listed'
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': type,
'spot': spot,
'swap': swap,
'linear': linear,
'inverse': inverse,
'active': active,
'taker': taker,
'maker': maker,
'precision': precision,
'priceScale': 8,
'valueScale': 8,
'ratioScale': 8,
'contractSize': None,
'limits': limits,
}
def fetch_markets(self, params={}):
v2Products = self.publicGetCfgV2Products(params)
#
# {
# "code":0,
# "msg":"OK",
# "data":{
# "ratioScale":8,
# "currencies":[
# {"currency":"BTC","valueScale":8,"minValueEv":1,"maxValueEv":5000000000000000000,"name":"Bitcoin"},
# {"currency":"USD","valueScale":4,"minValueEv":1,"maxValueEv":500000000000000,"name":"USD"},
# {"currency":"USDT","valueScale":8,"minValueEv":1,"maxValueEv":5000000000000000000,"name":"TetherUS"},
# ],
# "products":[
# {
# "symbol":"BTCUSD",
# "displaySymbol":"BTC / USD",
# "indexSymbol":".BTC",
# "markSymbol":".MBTC",
# "fundingRateSymbol":".BTCFR",
# "fundingRate8hSymbol":".BTCFR8H",
# "contractUnderlyingAssets":"USD",
# "settleCurrency":"BTC",
# "quoteCurrency":"USD",
# "contractSize":1.0,
# "lotSize":1,
# "tickSize":0.5,
# "priceScale":4,
# "ratioScale":8,
# "pricePrecision":1,
# "minPriceEp":5000,
# "maxPriceEp":10000000000,
# "maxOrderQty":1000000,
# "type":"Perpetual"
# },
# {
# "symbol":"sBTCUSDT",
# "displaySymbol":"BTC / USDT",
# "quoteCurrency":"USDT",
# "pricePrecision":2,
# "type":"Spot",
# "baseCurrency":"BTC",
# "baseTickSize":"0.000001 BTC",
# "baseTickSizeEv":100,
# "quoteTickSize":"0.01 USDT",
# "quoteTickSizeEv":1000000,
# "minOrderValue":"10 USDT",
# "minOrderValueEv":1000000000,
# "maxBaseOrderSize":"1000 BTC",
# "maxBaseOrderSizeEv":100000000000,
# "maxOrderValue":"5,000,000 USDT",
# "maxOrderValueEv":500000000000000,
# "defaultTakerFee":"0.001",
# "defaultTakerFeeEr":100000,
# "defaultMakerFee":"0.001",
# "defaultMakerFeeEr":100000,
# "baseQtyPrecision":6,
# "quoteQtyPrecision":2
# },
# ],
# "riskLimits":[
# {
# "symbol":"BTCUSD",
# "steps":"50",
# "riskLimits":[
# {"limit":100,"initialMargin":"1.0%","initialMarginEr":1000000,"maintenanceMargin":"0.5%","maintenanceMarginEr":500000},
# {"limit":150,"initialMargin":"1.5%","initialMarginEr":1500000,"maintenanceMargin":"1.0%","maintenanceMarginEr":1000000},
# {"limit":200,"initialMargin":"2.0%","initialMarginEr":2000000,"maintenanceMargin":"1.5%","maintenanceMarginEr":1500000},
# ]
# },
# ],
# "leverages":[
# {"initialMargin":"1.0%","initialMarginEr":1000000,"options":[1,2,3,5,10,25,50,100]},
# {"initialMargin":"1.5%","initialMarginEr":1500000,"options":[1,2,3,5,10,25,50,66]},
# {"initialMargin":"2.0%","initialMarginEr":2000000,"options":[1,2,3,5,10,25,33,50]},
# ]
# }
# }
#
v1Products = self.v1GetExchangePublicProducts(params)
v1ProductsData = self.safe_value(v1Products, 'data', [])
#
# {
# "code":0,
# "msg":"OK",
# "data":[
# {
# "symbol":"BTCUSD",
# "underlyingSymbol":".BTC",
# "quoteCurrency":"USD",
# "baseCurrency":"BTC",
# "settlementCurrency":"BTC",
# "maxOrderQty":1000000,
# "maxPriceEp":100000000000000,
# "lotSize":1,
# "tickSize":"0.5",
# "contractSize":"1 USD",
# "priceScale":4,
# "ratioScale":8,
# "valueScale":8,
# "defaultLeverage":0,
# "maxLeverage":100,
# "initMarginEr":"1000000",
# "maintMarginEr":"500000",
# "defaultRiskLimitEv":10000000000,
# "deleverage":true,
# "makerFeeRateEr":-250000,
# "takerFeeRateEr":750000,
# "fundingInterval":8,
# "marketUrl":"https://phemex.com/trade/BTCUSD",
# "description":"BTCUSD is a BTC/USD perpetual contract priced on the .BTC Index. Each contract is worth 1 USD of Bitcoin. Funding is paid and received every 8 hours. At UTC time: 00:00, 08:00, 16:00.",
# "type":"Perpetual"
# },
# ]
# }
#
v2ProductsData = self.safe_value(v2Products, 'data', {})
products = self.safe_value(v2ProductsData, 'products', [])
riskLimits = self.safe_value(v2ProductsData, 'riskLimits', [])
riskLimitsById = self.index_by(riskLimits, 'symbol')
v1ProductsById = self.index_by(v1ProductsData, 'symbol')
result = []
for i in range(0, len(products)):
market = products[i]
type = self.safe_string_lower(market, 'type')
if type == 'perpetual':
id = self.safe_string(market, 'symbol')
riskLimitValues = self.safe_value(riskLimitsById, id, {})
market = self.extend(market, riskLimitValues)
v1ProductsValues = self.safe_value(v1ProductsById, id, {})
market = self.extend(market, v1ProductsValues)
market = self.parse_swap_market(market)
else:
market = self.parse_spot_market(market)
result.append(market)
return result
def fetch_currencies(self, params={}):
response = self.publicGetCfgV2Products(params)
#
# {
# "code":0,
# "msg":"OK",
# "data":{
# ...,
# "currencies":[
# {"currency":"BTC","valueScale":8,"minValueEv":1,"maxValueEv":5000000000000000000,"name":"Bitcoin"},
# {"currency":"USD","valueScale":4,"minValueEv":1,"maxValueEv":500000000000000,"name":"USD"},
# {"currency":"USDT","valueScale":8,"minValueEv":1,"maxValueEv":5000000000000000000,"name":"TetherUS"},
# ],
# ...
# }
# }
data = self.safe_value(response, 'data', {})
currencies = self.safe_value(data, 'currencies', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'currency')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
valueScaleString = self.safe_string(currency, 'valueScale')
valueScale = int(valueScaleString)
minValueEv = self.safe_string(currency, 'minValueEv')
maxValueEv = self.safe_string(currency, 'maxValueEv')
minAmount = None
maxAmount = None
precision = None
if valueScale is not None:
precisionString = self.parse_precision(valueScaleString)
precision = self.parse_number(precisionString)
minAmount = self.parse_number(Precise.string_mul(minValueEv, precisionString))
maxAmount = self.parse_number(Precise.string_mul(maxValueEv, precisionString))
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': None,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'withdraw': {
'min': None,
'max': None,
},
},
'valueScale': valueScale,
}
return result
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1, market=None):
if market is None:
raise ArgumentsRequired(self.id + ' parseBidAsk() requires a market argument')
amount = self.safe_string(bidask, amountKey)
if market['spot']:
amount = self.from_ev(amount, market)
return [
self.parse_number(self.from_ep(self.safe_string(bidask, priceKey), market)),
self.parse_number(amount),
]
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=0, amountKey=1, market=None):
result = {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
sides = [bidsKey, asksKey]
for i in range(0, len(sides)):
side = sides[i]
orders = []
bidasks = self.safe_value(orderbook, side)
for k in range(0, len(bidasks)):
orders.append(self.parse_bid_ask(bidasks[k], priceKey, amountKey, market))
result[side] = orders
result[bidsKey] = self.sort_by(result[bidsKey], 0, True)
result[asksKey] = self.sort_by(result[asksKey], 0)
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'id': 123456789, # optional request id
}
response = self.v1GetMdOrderbook(self.extend(request, params))
#
# {
# "error": null,
# "id": 0,
# "result": {
# "book": {
# "asks": [
# [23415000000, 105262000],
# [23416000000, 147914000],
# [23419000000, 160914000],
# ],
# "bids": [
# [23360000000, 32995000],
# [23359000000, 221887000],
# [23356000000, 284599000],
# ],
# },
# "depth": 30,
# "sequence": 1592059928,
# "symbol": "sETHUSDT",
# "timestamp": 1592387340020000955,
# "type": "snapshot"
# }
# }
#
result = self.safe_value(response, 'result', {})
book = self.safe_value(result, 'book', {})
timestamp = self.safe_integer_product(result, 'timestamp', 0.000001)
orderbook = self.parse_order_book(book, symbol, timestamp, 'bids', 'asks', 0, 1, market)
orderbook['nonce'] = self.safe_integer(result, 'sequence')
return orderbook
def to_en(self, n, scale):
stringN = str(n)
precise = Precise(stringN)
precise.decimals = precise.decimals - scale
precise.reduce()
stringValue = str(precise)
return int(float(stringValue))
def to_ev(self, amount, market=None):
if (amount is None) or (market is None):
return amount
return self.to_en(amount, market['valueScale'])
def to_ep(self, price, market=None):
if (price is None) or (market is None):
return price
return self.to_en(price, market['priceScale'])
def from_en(self, en, scale):
if en is None:
return None
precise = Precise(en)
precise.decimals = self.sum(precise.decimals, scale)
precise.reduce()
return str(precise)
def from_ep(self, ep, market=None):
if (ep is None) or (market is None):
return ep
return self.from_en(ep, self.safe_integer(market, 'priceScale'))
def from_ev(self, ev, market=None):
if (ev is None) or (market is None):
return ev
return self.from_en(ev, self.safe_integer(market, 'valueScale'))
def from_er(self, er, market=None):
if (er is None) or (market is None):
return er
return self.from_en(er, self.safe_integer(market, 'ratioScale'))
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1592467200, # timestamp
# 300, # interval
# 23376000000, # last
# 23322000000, # open
# 23381000000, # high
# 23315000000, # low
# 23367000000, # close
# 208671000, # base volume
# 48759063370, # quote volume
# ]
#
baseVolume = None
if (market is not None) and market['spot']:
baseVolume = self.parse_number(self.from_ev(self.safe_string(ohlcv, 7), market))
else:
baseVolume = self.safe_number(ohlcv, 7)
return [
self.safe_timestamp(ohlcv, 0),
self.parse_number(self.from_ep(self.safe_string(ohlcv, 3), market)),
self.parse_number(self.from_ep(self.safe_string(ohlcv, 4), market)),
self.parse_number(self.from_ep(self.safe_string(ohlcv, 5), market)),
self.parse_number(self.from_ep(self.safe_string(ohlcv, 6), market)),
baseVolume,
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
# 'symbol': market['id'],
'resolution': self.timeframes[timeframe],
# 'from': 1588830682, # seconds
# 'to': self.seconds(),
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is not None:
if limit is None:
limit = 2000 # max 2000
since = int(since / 1000)
request['from'] = since
# time ranges ending in the future are not accepted
# https://github.com/ccxt/ccxt/issues/8050
request['to'] = min(now, self.sum(since, duration * limit))
elif limit is not None:
limit = min(limit, 2000)
request['from'] = now - duration * self.sum(limit, 1)
request['to'] = now
else:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument, or a limit argument, or both')
self.load_markets()
market = self.market(symbol)
request['symbol'] = market['id']
response = self.publicGetMdKline(self.extend(request, params))
#
# {
# "code":0,
# "msg":"OK",
# "data":{
# "total":-1,
# "rows":[
# [1592467200,300,23376000000,23322000000,23381000000,23315000000,23367000000,208671000,48759063370],
# [1592467500,300,23367000000,23314000000,23390000000,23311000000,23331000000,234820000,54848948710],
# [1592467800,300,23331000000,23385000000,23391000000,23326000000,23387000000,152931000,35747882250],
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
rows = self.safe_value(data, 'rows', [])
return self.parse_ohlcvs(rows, market, timeframe, since, limit)
def parse_ticker(self, ticker, market=None):
#
# spot
#
# {
# "askEp": 943836000000,
# "bidEp": 943601000000,
# "highEp": 955946000000,
# "lastEp": 943803000000,
# "lowEp": 924973000000,
# "openEp": 948693000000,
# "symbol": "sBTCUSDT",
# "timestamp": 1592471203505728630,
# "turnoverEv": 111822826123103,
# "volumeEv": 11880532281
# }
#
# swap
#
# {
# "askEp": 2332500,
# "bidEp": 2331000,
# "fundingRateEr": 10000,
# "highEp": 2380000,
# "indexEp": 2329057,
# "lastEp": 2331500,
# "lowEp": 2274000,
# "markEp": 2329232,
# "openEp": 2337500,
# "openInterest": 1298050,
# "predFundingRateEr": 19921,
# "symbol": "ETHUSD",
# "timestamp": 1592474241582701416,
# "turnoverEv": 47228362330,
# "volume": 4053863
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
timestamp = self.safe_integer_product(ticker, 'timestamp', 0.000001)
lastString = self.from_ep(self.safe_string(ticker, 'lastEp'), market)
last = self.parse_number(lastString)
quoteVolume = self.parse_number(self.from_ev(self.safe_string(ticker, 'turnoverEv'), market))
baseVolume = self.safe_number(ticker, 'volume')
if baseVolume is None:
baseVolume = self.parse_number(self.from_ev(self.safe_string(ticker, 'volumeEv'), market))
vwap = None
if (market is not None) and (market['spot']):
vwap = self.vwap(baseVolume, quoteVolume)
change = None
percentage = None
average = None
openString = self.from_ep(self.safe_string(ticker, 'openEp'), market)
open = self.parse_number(openString)
if (openString is not None) and (lastString is not None):
change = self.parse_number(Precise.string_sub(lastString, openString))
average = self.parse_number(Precise.string_div(Precise.string_add(lastString, openString), '2'))
percentage = self.parse_number(Precise.string_mul(Precise.string_sub(Precise.string_div(lastString, openString), '1'), '100'))
result = {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.parse_number(self.from_ep(self.safe_string(ticker, 'highEp'), market)),
'low': self.parse_number(self.from_ep(self.safe_string(ticker, 'lowEp'), market)),
'bid': self.parse_number(self.from_ep(self.safe_string(ticker, 'bidEp'), market)),
'bidVolume': None,
'ask': self.parse_number(self.from_ep(self.safe_string(ticker, 'askEp'), market)),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'id': 123456789, # optional request id
}
method = 'v1GetMdSpotTicker24hr' if market['spot'] else 'v1GetMdTicker24hr'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "error": null,
# "id": 0,
# "result": {
# "askEp": 943836000000,
# "bidEp": 943601000000,
# "highEp": 955946000000,
# "lastEp": 943803000000,
# "lowEp": 924973000000,
# "openEp": 948693000000,
# "symbol": "sBTCUSDT",
# "timestamp": 1592471203505728630,
# "turnoverEv": 111822826123103,
# "volumeEv": 11880532281
# }
# }
#
# swap
#
# {
# "error": null,
# "id": 0,
# "result": {
# "askEp": 2332500,
# "bidEp": 2331000,
# "fundingRateEr": 10000,
# "highEp": 2380000,
# "indexEp": 2329057,
# "lastEp": 2331500,
# "lowEp": 2274000,
# "markEp": 2329232,
# "openEp": 2337500,
# "openInterest": 1298050,
# "predFundingRateEr": 19921,
# "symbol": "ETHUSD",
# "timestamp": 1592474241582701416,
# "turnoverEv": 47228362330,
# "volume": 4053863
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'id': 123456789, # optional request id
}
response = self.v1GetMdTrade(self.extend(request, params))
#
# {
# "error": null,
# "id": 0,
# "result": {
# "sequence": 1315644947,
# "symbol": "BTCUSD",
# "trades": [
# [1592541746712239749, 13156448570000, "Buy", 93070000, 40173],
# [1592541740434625085, 13156447110000, "Sell", 93065000, 5000],
# [1592541732958241616, 13156441390000, "Buy", 93070000, 3460],
# ],
# "type": "snapshot"
# }
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value(result, 'trades', [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# 1592541746712239749,
# 13156448570000,
# "Buy",
# 93070000,
# 40173
# ]
#
# fetchMyTrades(private)
#
# spot
#
# {
# "qtyType": "ByQuote",
# "transactTimeNs": 1589450974800550100,
# "clOrdID": "8ba59d40-df25-d4b0-14cf-0703f44e9690",
# "orderID": "b2b7018d-f02f-4c59-b4cf-051b9c2d2e83",
# "symbol": "sBTCUSDT",
# "side": "Buy",
# "priceEP": 970056000000,
# "baseQtyEv": 0,
# "quoteQtyEv": 1000000000,
# "action": "New",
# "execStatus": "MakerFill",
# "ordStatus": "Filled",
# "ordType": "Limit",
# "execInst": "None",
# "timeInForce": "GoodTillCancel",
# "stopDirection": "UNSPECIFIED",
# "tradeType": "Trade",
# "stopPxEp": 0,
# "execId": "c6bd8979-07ba-5946-b07e-f8b65135dbb1",
# "execPriceEp": 970056000000,
# "execBaseQtyEv": 103000,
# "execQuoteQtyEv": 999157680,
# "leavesBaseQtyEv": 0,
# "leavesQuoteQtyEv": 0,
# "execFeeEv": 0,
# "feeRateEr": 0
# }
#
# swap
#
# {
# "transactTimeNs": 1578026629824704800,
# "symbol": "BTCUSD",
# "currency": "BTC",
# "action": "Replace",
# "side": "Sell",
# "tradeType": "Trade",
# "execQty": 700,
# "execPriceEp": 71500000,
# "orderQty": 700,
# "priceEp": 71500000,
# "execValueEv": 9790209,
# "feeRateEr": -25000,
# "execFeeEv": -2447,
# "ordType": "Limit",
# "execID": "b01671a1-5ddc-5def-b80a-5311522fd4bf",
# "orderID": "b63bc982-be3a-45e0-8974-43d6375fb626",
# "clOrdID": "uuid-1577463487504",
# "execStatus": "MakerFill"
# }
#
priceString = None
amountString = None
timestamp = None
id = None
side = None
costString = None
type = None
fee = None
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
orderId = None
takerOrMaker = None
if isinstance(trade, list):
tradeLength = len(trade)
timestamp = self.safe_integer_product(trade, 0, 0.000001)
if tradeLength > 4:
id = self.safe_string(trade, tradeLength - 4)
side = self.safe_string_lower(trade, tradeLength - 3)
priceString = self.from_ep(self.safe_string(trade, tradeLength - 2), market)
amountString = self.from_ev(self.safe_string(trade, tradeLength - 1), market)
else:
timestamp = self.safe_integer_product(trade, 'transactTimeNs', 0.000001)
id = self.safe_string_2(trade, 'execId', 'execID')
orderId = self.safe_string(trade, 'orderID')
side = self.safe_string_lower(trade, 'side')
type = self.parse_order_type(self.safe_string(trade, 'ordType'))
execStatus = self.safe_string(trade, 'execStatus')
if execStatus == 'MakerFill':
takerOrMaker = 'maker'
priceString = self.from_ep(self.safe_string(trade, 'execPriceEp'), market)
amountString = self.from_ev(self.safe_string(trade, 'execBaseQtyEv'), market)
amountString = self.safe_string(trade, 'execQty', amountString)
costString = self.from_ev(self.safe_string_2(trade, 'execQuoteQtyEv', 'execValueEv'), market)
feeCostString = self.from_ev(self.safe_string(trade, 'execFeeEv'), market)
if feeCostString is not None:
feeRateString = self.from_er(self.safe_string(trade, 'feeRateEr'), market)
feeCurrencyCode = None
if market['spot']:
feeCurrencyCode = market['base'] if (side == 'buy') else market['quote']
else:
info = self.safe_value(market, 'info')
if info is not None:
settlementCurrencyId = self.safe_string(info, 'settlementCurrency')
feeCurrencyCode = self.safe_currency_code(settlementCurrencyId)
fee = {
'cost': self.parse_number(feeCostString),
'rate': self.parse_number(feeRateString),
'currency': feeCurrencyCode,
}
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
def parse_spot_balance(self, response):
#
# {
# "code":0,
# "msg":"",
# "data":[
# {
# "currency":"USDT",
# "balanceEv":0,
# "lockedTradingBalanceEv":0,
# "lockedWithdrawEv":0,
# "lastUpdateTimeNs":1592065834511322514,
# "walletVid":0
# },
# {
# "currency":"ETH",
# "balanceEv":0,
# "lockedTradingBalanceEv":0,
# "lockedWithdrawEv":0,
# "lastUpdateTimeNs":1592065834511322514,
# "walletVid":0
# }
# ]
# }
#
timestamp = None
result = {'info': response}
data = self.safe_value(response, 'data', [])
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
currency = self.safe_value(self.currencies, code, {})
scale = self.safe_integer(currency, 'valueScale', 8)
account = self.account()
balanceEv = self.safe_string(balance, 'balanceEv')
lockedTradingBalanceEv = self.safe_string(balance, 'lockedTradingBalanceEv')
lockedWithdrawEv = self.safe_string(balance, 'lockedWithdrawEv')
total = self.from_en(balanceEv, scale)
lockedTradingBalance = self.from_en(lockedTradingBalanceEv, scale)
lockedWithdraw = self.from_en(lockedWithdrawEv, scale)
used = Precise.string_add(lockedTradingBalance, lockedWithdraw)
lastUpdateTimeNs = self.safe_integer_product(balance, 'lastUpdateTimeNs', 0.000001)
timestamp = lastUpdateTimeNs if (timestamp is None) else max(timestamp, lastUpdateTimeNs)
account['total'] = total
account['used'] = used
result[code] = account
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
return self.safe_balance(result)
def parse_swap_balance(self, response):
#
# {
# "code":0,
# "msg":"",
# "data":{
# "account":{
# "accountId":6192120001,
# "currency":"BTC",
# "accountBalanceEv":1254744,
# "totalUsedBalanceEv":0,
# "bonusBalanceEv":1254744
# },
# "positions":[
# {
# "accountID":6192120001,
# "symbol":"BTCUSD",
# "currency":"BTC",
# "side":"None",
# "positionStatus":"Normal",
# "crossMargin":false,
# "leverageEr":0,
# "leverage":0E-8,
# "initMarginReqEr":1000000,
# "initMarginReq":0.01000000,
# "maintMarginReqEr":500000,
# "maintMarginReq":0.00500000,
# "riskLimitEv":10000000000,
# "riskLimit":100.00000000,
# "size":0,
# "value":0E-8,
# "valueEv":0,
# "avgEntryPriceEp":0,
# "avgEntryPrice":0E-8,
# "posCostEv":0,
# "posCost":0E-8,
# "assignedPosBalanceEv":0,
# "assignedPosBalance":0E-8,
# "bankruptCommEv":0,
# "bankruptComm":0E-8,
# "bankruptPriceEp":0,
# "bankruptPrice":0E-8,
# "positionMarginEv":0,
# "positionMargin":0E-8,
# "liquidationPriceEp":0,
# "liquidationPrice":0E-8,
# "deleveragePercentileEr":0,
# "deleveragePercentile":0E-8,
# "buyValueToCostEr":1150750,
# "buyValueToCost":0.01150750,
# "sellValueToCostEr":1149250,
# "sellValueToCost":0.01149250,
# "markPriceEp":96359083,
# "markPrice":9635.90830000,
# "markValueEv":0,
# "markValue":null,
# "unRealisedPosLossEv":0,
# "unRealisedPosLoss":null,
# "estimatedOrdLossEv":0,
# "estimatedOrdLoss":0E-8,
# "usedBalanceEv":0,
# "usedBalance":0E-8,
# "takeProfitEp":0,
# "takeProfit":null,
# "stopLossEp":0,
# "stopLoss":null,
# "realisedPnlEv":0,
# "realisedPnl":null,
# "cumRealisedPnlEv":0,
# "cumRealisedPnl":null
# }
# ]
# }
# }
#
result = {'info': response}
data = self.safe_value(response, 'data', {})
balance = self.safe_value(data, 'account', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
currency = self.currency(code)
account = self.account()
accountBalanceEv = self.safe_string(balance, 'accountBalanceEv')
totalUsedBalanceEv = self.safe_string(balance, 'totalUsedBalanceEv')
valueScale = self.safe_integer(currency, 'valueScale', 8)
account['total'] = self.from_en(accountBalanceEv, valueScale)
account['used'] = self.from_en(totalUsedBalanceEv, valueScale)
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'defaultType', 'fetchBalance', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetSpotWallets'
request = {}
if type == 'swap':
code = self.safe_string(params, 'code')
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
params = self.omit(params, 'code')
else:
currency = self.safe_string(params, 'currency')
if currency is None:
raise ArgumentsRequired(self.id + ' fetchBalance() requires a code parameter or a currency parameter for ' + type + ' type')
method = 'privateGetAccountsAccountPositions'
params = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code":0,
# "msg":"",
# "data":[
# {
# "currency":"USDT",
# "balanceEv":0,
# "lockedTradingBalanceEv":0,
# "lockedWithdrawEv":0,
# "lastUpdateTimeNs":1592065834511322514,
# "walletVid":0
# },
# {
# "currency":"ETH",
# "balanceEv":0,
# "lockedTradingBalanceEv":0,
# "lockedWithdrawEv":0,
# "lastUpdateTimeNs":1592065834511322514,
# "walletVid":0
# }
# ]
# }
#
# swap
#
# {
# "code":0,
# "msg":"",
# "data":{
# "account":{
# "accountId":6192120001,
# "currency":"BTC",
# "accountBalanceEv":1254744,
# "totalUsedBalanceEv":0,
# "bonusBalanceEv":1254744
# },
# "positions":[
# {
# "accountID":6192120001,
# "symbol":"BTCUSD",
# "currency":"BTC",
# "side":"None",
# "positionStatus":"Normal",
# "crossMargin":false,
# "leverageEr":0,
# "leverage":0E-8,
# "initMarginReqEr":1000000,
# "initMarginReq":0.01000000,
# "maintMarginReqEr":500000,
# "maintMarginReq":0.00500000,
# "riskLimitEv":10000000000,
# "riskLimit":100.00000000,
# "size":0,
# "value":0E-8,
# "valueEv":0,
# "avgEntryPriceEp":0,
# "avgEntryPrice":0E-8,
# "posCostEv":0,
# "posCost":0E-8,
# "assignedPosBalanceEv":0,
# "assignedPosBalance":0E-8,
# "bankruptCommEv":0,
# "bankruptComm":0E-8,
# "bankruptPriceEp":0,
# "bankruptPrice":0E-8,
# "positionMarginEv":0,
# "positionMargin":0E-8,
# "liquidationPriceEp":0,
# "liquidationPrice":0E-8,
# "deleveragePercentileEr":0,
# "deleveragePercentile":0E-8,
# "buyValueToCostEr":1150750,
# "buyValueToCost":0.01150750,
# "sellValueToCostEr":1149250,
# "sellValueToCost":0.01149250,
# "markPriceEp":96359083,
# "markPrice":9635.90830000,
# "markValueEv":0,
# "markValue":null,
# "unRealisedPosLossEv":0,
# "unRealisedPosLoss":null,
# "estimatedOrdLossEv":0,
# "estimatedOrdLoss":0E-8,
# "usedBalanceEv":0,
# "usedBalance":0E-8,
# "takeProfitEp":0,
# "takeProfit":null,
# "stopLossEp":0,
# "stopLoss":null,
# "realisedPnlEv":0,
# "realisedPnl":null,
# "cumRealisedPnlEv":0,
# "cumRealisedPnl":null
# }
# ]
# }
# }
#
result = self.parse_swap_balance(response) if (type == 'swap') else self.parse_spot_balance(response)
return result
def parse_order_status(self, status):
statuses = {
'Created': 'open',
'Untriggered': 'open',
'Deactivated': 'closed',
'Triggered': 'open',
'Rejected': 'rejected',
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_type(self, type):
types = {
'Limit': 'limit',
'Market': 'market',
}
return self.safe_string(types, type, type)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GoodTillCancel': 'GTC',
'PostOnly': 'PO',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_spot_order(self, order, market=None):
#
# spot
#
# {
# "orderID": "d1d09454-cabc-4a23-89a7-59d43363f16d",
# "clOrdID": "309bcd5c-9f6e-4a68-b775-4494542eb5cb",
# "priceEp": 0,
# "action": "New",
# "trigger": "UNSPECIFIED",
# "pegPriceType": "UNSPECIFIED",
# "stopDirection": "UNSPECIFIED",
# "bizError": 0,
# "symbol": "sBTCUSDT",
# "side": "Buy",
# "baseQtyEv": 0,
# "ordType": "Limit",
# "timeInForce": "GoodTillCancel",
# "ordStatus": "Created",
# "cumFeeEv": 0,
# "cumBaseQtyEv": 0,
# "cumQuoteQtyEv": 0,
# "leavesBaseQtyEv": 0,
# "leavesQuoteQtyEv": 0,
# "avgPriceEp": 0,
# "cumBaseAmountEv": 0,
# "cumQuoteAmountEv": 0,
# "quoteQtyEv": 0,
# "qtyType": "ByBase",
# "stopPxEp": 0,
# "pegOffsetValueEp": 0
# }
#
# {
# "orderID":"99232c3e-3d6a-455f-98cc-2061cdfe91bc",
# "stopPxEp":0,
# "avgPriceEp":0,
# "qtyType":"ByBase",
# "leavesBaseQtyEv":0,
# "leavesQuoteQtyEv":0,
# "baseQtyEv":"1000000000",
# "feeCurrency":"4",
# "stopDirection":"UNSPECIFIED",
# "symbol":"sETHUSDT",
# "side":"Buy",
# "quoteQtyEv":250000000000,
# "priceEp":25000000000,
# "ordType":"Limit",
# "timeInForce":"GoodTillCancel",
# "ordStatus":"Rejected",
# "execStatus":"NewRejected",
# "createTimeNs":1592675305266037130,
# "cumFeeEv":0,
# "cumBaseValueEv":0,
# "cumQuoteValueEv":0
# }
#
id = self.safe_string(order, 'orderID')
clientOrderId = self.safe_string(order, 'clOrdID')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
price = self.from_ep(self.safe_string(order, 'priceEp'), market)
amount = self.from_ev(self.safe_string(order, 'baseQtyEv'), market)
remaining = self.omit_zero(self.from_ev(self.safe_string(order, 'leavesBaseQtyEv'), market))
filled = self.from_ev(self.safe_string_2(order, 'cumBaseQtyEv', 'cumBaseValueEv'), market)
cost = self.from_ev(self.safe_string_2(order, 'cumQuoteValueEv', 'quoteQtyEv'), market)
average = self.from_ep(self.safe_string(order, 'avgPriceEp'), market)
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
side = self.safe_string_lower(order, 'side')
type = self.parse_order_type(self.safe_string(order, 'ordType'))
timestamp = self.safe_integer_product_2(order, 'actionTimeNs', 'createTimeNs', 0.000001)
fee = None
feeCost = self.from_ev(self.safe_string(order, 'cumFeeEv'), market)
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.parse_number(self.omit_zero(self.from_ep(self.safe_string(order, 'stopPxEp', market))))
postOnly = (timeInForce == 'PO')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
def parse_swap_order(self, order, market=None):
#
# {
# "bizError":0,
# "orderID":"7a1ad384-44a3-4e54-a102-de4195a29e32",
# "clOrdID":"",
# "symbol":"ETHUSD",
# "side":"Buy",
# "actionTimeNs":1592668973945065381,
# "transactTimeNs":0,
# "orderType":"Market",
# "priceEp":2267500,
# "price":226.75000000,
# "orderQty":1,
# "displayQty":0,
# "timeInForce":"ImmediateOrCancel",
# "reduceOnly":false,
# "closedPnlEv":0,
# "closedPnl":0E-8,
# "closedSize":0,
# "cumQty":0,
# "cumValueEv":0,
# "cumValue":0E-8,
# "leavesQty":1,
# "leavesValueEv":11337,
# "leavesValue":1.13370000,
# "stopDirection":"UNSPECIFIED",
# "stopPxEp":0,
# "stopPx":0E-8,
# "trigger":"UNSPECIFIED",
# "pegOffsetValueEp":0,
# "execStatus":"PendingNew",
# "pegPriceType":"UNSPECIFIED",
# "ordStatus":"Created"
# }
#
id = self.safe_string(order, 'orderID')
clientOrderId = self.safe_string(order, 'clOrdID')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
side = self.safe_string_lower(order, 'side')
type = self.parse_order_type(self.safe_string(order, 'orderType'))
price = self.parse_number(self.from_ep(self.safe_string(order, 'priceEp'), market))
amount = self.safe_number(order, 'orderQty')
filled = self.safe_number(order, 'cumQty')
remaining = self.safe_number(order, 'leavesQty')
timestamp = self.safe_integer_product(order, 'actionTimeNs', 0.000001)
cost = self.safe_number(order, 'cumValue')
lastTradeTimestamp = self.safe_integer_product(order, 'transactTimeNs', 0.000001)
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
timeInForce = self.parse_time_in_force(self.safe_string(order, 'timeInForce'))
stopPrice = self.safe_number(order, 'stopPx')
postOnly = (timeInForce == 'PO')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'average': None,
'status': status,
'fee': None,
'trades': None,
}
def parse_order(self, order, market=None):
if 'closedPnl' in order:
return self.parse_swap_order(order, market)
return self.parse_spot_order(order, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
side = self.capitalize(side)
type = self.capitalize(type)
request = {
# common
'symbol': market['id'],
'side': side, # Sell, Buy
'ordType': type, # Market, Limit, Stop, StopLimit, MarketIfTouched, LimitIfTouched or Pegged for swap orders
# 'stopPxEp': self.to_ep(stopPx, market), # for conditional orders
# 'priceEp': self.to_ep(price, market), # required for limit orders
# 'timeInForce': 'GoodTillCancel', # GoodTillCancel, PostOnly, ImmediateOrCancel, FillOrKill
# ----------------------------------------------------------------
# spot
# 'qtyType': 'ByBase', # ByBase, ByQuote
# 'quoteQtyEv': self.to_ep(cost, market),
# 'baseQtyEv': self.to_ev(amount, market),
# 'trigger': 'ByLastPrice', # required for conditional orders
# ----------------------------------------------------------------
# swap
# 'clOrdID': self.uuid(), # max length 40
# 'orderQty': self.amount_to_precision(amount, symbol),
# 'reduceOnly': False,
# 'closeOnTrigger': False, # implicit reduceOnly and cancel other orders in the same direction
# 'takeProfitEp': self.to_ep(takeProfit, market),
# 'stopLossEp': self.to_ep(stopLossEp, market),
# 'triggerType': 'ByMarkPrice', # ByMarkPrice, ByLastPrice
# 'pegOffsetValueEp': integer, # Trailing offset from current price. Negative value when position is long, positive when position is short
# 'pegPriceType': 'TrailingStopPeg', # TrailingTakeProfitPeg
# 'text': 'comment',
}
if market['spot']:
qtyType = self.safe_value(params, 'qtyType', 'ByBase')
if (type == 'Market') or (type == 'Stop') or (type == 'MarketIfTouched'):
if price is not None:
qtyType = 'ByQuote'
request['qtyType'] = qtyType
if qtyType == 'ByQuote':
cost = self.safe_number(params, 'cost')
params = self.omit(params, 'cost')
if self.options['createOrderByQuoteRequiresPrice']:
if price is not None:
cost = amount * price
elif cost is None:
raise ArgumentsRequired(self.id + ' createOrder() ' + qtyType + ' requires a price argument or a cost parameter')
cost = amount if (cost is None) else cost
costString = str(cost)
request['quoteQtyEv'] = self.to_ev(costString, market)
else:
amountString = str(amount)
request['baseQtyEv'] = self.to_ev(amountString, market)
elif market['swap']:
request['orderQty'] = int(amount)
if type == 'Limit':
priceString = str(price)
request['priceEp'] = self.to_ep(priceString, market)
stopPrice = self.safe_string_2(params, 'stopPx', 'stopPrice')
if stopPrice is not None:
request['stopPxEp'] = self.to_ep(stopPrice, market)
params = self.omit(params, ['stopPx', 'stopPrice'])
method = 'privatePostSpotOrders' if market['spot'] else 'privatePostOrders'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code": 0,
# "msg": "",
# "data": {
# "orderID": "d1d09454-cabc-4a23-89a7-59d43363f16d",
# "clOrdID": "309bcd5c-9f6e-4a68-b775-4494542eb5cb",
# "priceEp": 0,
# "action": "New",
# "trigger": "UNSPECIFIED",
# "pegPriceType": "UNSPECIFIED",
# "stopDirection": "UNSPECIFIED",
# "bizError": 0,
# "symbol": "sBTCUSDT",
# "side": "Buy",
# "baseQtyEv": 0,
# "ordType": "Limit",
# "timeInForce": "GoodTillCancel",
# "ordStatus": "Created",
# "cumFeeEv": 0,
# "cumBaseQtyEv": 0,
# "cumQuoteQtyEv": 0,
# "leavesBaseQtyEv": 0,
# "leavesQuoteQtyEv": 0,
# "avgPriceEp": 0,
# "cumBaseAmountEv": 0,
# "cumQuoteAmountEv": 0,
# "quoteQtyEv": 0,
# "qtyType": "ByBase",
# "stopPxEp": 0,
# "pegOffsetValueEp": 0
# }
# }
#
# swap
#
# {
# "code":0,
# "msg":"",
# "data":{
# "bizError":0,
# "orderID":"7a1ad384-44a3-4e54-a102-de4195a29e32",
# "clOrdID":"",
# "symbol":"ETHUSD",
# "side":"Buy",
# "actionTimeNs":1592668973945065381,
# "transactTimeNs":0,
# "orderType":"Market",
# "priceEp":2267500,
# "price":226.75000000,
# "orderQty":1,
# "displayQty":0,
# "timeInForce":"ImmediateOrCancel",
# "reduceOnly":false,
# "closedPnlEv":0,
# "closedPnl":0E-8,
# "closedSize":0,
# "cumQty":0,
# "cumValueEv":0,
# "cumValue":0E-8,
# "leavesQty":1,
# "leavesValueEv":11337,
# "leavesValue":1.13370000,
# "stopDirection":"UNSPECIFIED",
# "stopPxEp":0,
# "stopPx":0E-8,
# "trigger":"UNSPECIFIED",
# "pegOffsetValueEp":0,
# "execStatus":"PendingNew",
# "pegPriceType":"UNSPECIFIED",
# "ordStatus":"Created"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def edit_order(self, id, symbol, type=None, side=None, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder() requires a symbol argument')
if type is not None:
raise ArgumentsRequired(self.id + ' editOrder() type changing is not implemented. Try to cancel & recreate order for that purpose')
if side is not None:
raise ArgumentsRequired(self.id + ' editOrder() side changing is not implemented. Try to cancel & recreate order for that purpose')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'clOrdID')
params = self.omit(params, ['clientOrderId', 'clOrdID'])
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
else:
request['orderID'] = id
if price is not None:
request['priceEp'] = self.to_ep(price, market)
# Note the uppercase 'V' in 'baseQtyEV' request. that is exchange's requirement at self moment. However, to avoid mistakes from user side, let's support lowercased 'baseQtyEv' too
finalQty = self.safe_string(params, 'baseQtyEv')
params = self.omit(params, ['baseQtyEv'])
if finalQty is not None:
request['baseQtyEV'] = finalQty
elif amount is not None:
request['baseQtyEV'] = self.to_ev(amount, market)
stopPrice = self.safe_string_2(params, 'stopPx', 'stopPrice')
if stopPrice is not None:
request['stopPxEp'] = self.to_ep(stopPrice, market)
params = self.omit(params, ['stopPx', 'stopPrice'])
method = 'privatePutSpotOrders' if market['spot'] else 'privatePutOrdersReplace'
response = getattr(self, method)(self.extend(request, params))
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'clOrdID')
params = self.omit(params, ['clientOrderId', 'clOrdID'])
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
else:
request['orderID'] = id
method = 'privateDeleteSpotOrders' if market['spot'] else 'privateDeleteOrdersCancel'
response = getattr(self, method)(self.extend(request, params))
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
request = {
# 'symbol': market['id'],
# 'untriggerred': False, # False to cancel non-conditional orders, True to cancel conditional orders
# 'text': 'up to 40 characters max',
}
market = self.market(symbol)
method = 'privateDeleteSpotOrdersAll'
if market['swap']:
method = 'privateDeleteOrdersAll'
request['symbol'] = market['id']
return getattr(self, method)(self.extend(request, params))
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privateGetSpotOrdersActive' if market['spot'] else 'privateGetExchangeOrder'
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'clOrdID')
params = self.omit(params, ['clientOrderId', 'clOrdID'])
if clientOrderId is not None:
request['clOrdID'] = clientOrderId
else:
request['orderID'] = id
response = getattr(self, method)(self.extend(request, params))
data = self.safe_value(response, 'data', {})
order = data
if isinstance(data, list):
numOrders = len(data)
if numOrders < 1:
if clientOrderId is not None:
raise OrderNotFound(self.id + ' fetchOrder ' + symbol + ' order with clientOrderId ' + clientOrderId + ' not found')
else:
raise OrderNotFound(self.id + ' fetchOrder ' + symbol + ' order with id ' + id + ' not found')
order = self.safe_value(data, 0, {})
return self.parse_order(order, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privateGetSpotOrders' if market['spot'] else 'privateGetExchangeOrderList'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
data = self.safe_value(response, 'data', {})
rows = self.safe_value(data, 'rows', [])
return self.parse_orders(rows, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privateGetSpotOrders' if market['spot'] else 'privateGetOrdersActiveList'
request = {
'symbol': market['id'],
}
response = None
try:
response = getattr(self, method)(self.extend(request, params))
except Exception as e:
if isinstance(e, OrderNotFound):
return []
data = self.safe_value(response, 'data', {})
if isinstance(data, list):
return self.parse_orders(data, market, since, limit)
else:
rows = self.safe_value(data, 'rows', [])
return self.parse_orders(rows, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privateGetExchangeSpotOrder' if market['spot'] else 'privateGetExchangeOrderList'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code":0,
# "msg":"OK",
# "data":{
# "total":8,
# "rows":[
# {
# "orderID":"99232c3e-3d6a-455f-98cc-2061cdfe91bc",
# "stopPxEp":0,
# "avgPriceEp":0,
# "qtyType":"ByBase",
# "leavesBaseQtyEv":0,
# "leavesQuoteQtyEv":0,
# "baseQtyEv":"1000000000",
# "feeCurrency":"4",
# "stopDirection":"UNSPECIFIED",
# "symbol":"sETHUSDT",
# "side":"Buy",
# "quoteQtyEv":250000000000,
# "priceEp":25000000000,
# "ordType":"Limit",
# "timeInForce":"GoodTillCancel",
# "ordStatus":"Rejected",
# "execStatus":"NewRejected",
# "createTimeNs":1592675305266037130,
# "cumFeeEv":0,
# "cumBaseValueEv":0,
# "cumQuoteValueEv":0
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
if isinstance(data, list):
return self.parse_orders(data, market, since, limit)
else:
rows = self.safe_value(data, 'rows', [])
return self.parse_orders(rows, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
method = 'privateGetExchangeSpotOrderTrades' if market['spot'] else 'privateGetExchangeOrderTrade'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
if market['swap'] and (limit is not None):
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code": 0,
# "msg": "OK",
# "data": {
# "total": 1,
# "rows": [
# {
# "qtyType": "ByQuote",
# "transactTimeNs": 1589450974800550100,
# "clOrdID": "8ba59d40-df25-d4b0-14cf-0703f44e9690",
# "orderID": "b2b7018d-f02f-4c59-b4cf-051b9c2d2e83",
# "symbol": "sBTCUSDT",
# "side": "Buy",
# "priceEP": 970056000000,
# "baseQtyEv": 0,
# "quoteQtyEv": 1000000000,
# "action": "New",
# "execStatus": "MakerFill",
# "ordStatus": "Filled",
# "ordType": "Limit",
# "execInst": "None",
# "timeInForce": "GoodTillCancel",
# "stopDirection": "UNSPECIFIED",
# "tradeType": "Trade",
# "stopPxEp": 0,
# "execId": "c6bd8979-07ba-5946-b07e-f8b65135dbb1",
# "execPriceEp": 970056000000,
# "execBaseQtyEv": 103000,
# "execQuoteQtyEv": 999157680,
# "leavesBaseQtyEv": 0,
# "leavesQuoteQtyEv": 0,
# "execFeeEv": 0,
# "feeRateEr": 0
# }
# ]
# }
# }
#
#
# swap
#
# {
# "code": 0,
# "msg": "OK",
# "data": {
# "total": 79,
# "rows": [
# {
# "transactTimeNs": 1606054879331565300,
# "symbol": "BTCUSD",
# "currency": "BTC",
# "action": "New",
# "side": "Buy",
# "tradeType": "Trade",
# "execQty": 5,
# "execPriceEp": 182990000,
# "orderQty": 5,
# "priceEp": 183870000,
# "execValueEv": 27323,
# "feeRateEr": 75000,
# "execFeeEv": 21,
# "ordType": "Market",
# "execID": "5eee56a4-04a9-5677-8eb0-c2fe22ae3645",
# "orderID": "ee0acb82-f712-4543-a11d-d23efca73197",
# "clOrdID": "",
# "execStatus": "TakerFill"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
rows = self.safe_value(data, 'rows', [])
return self.parse_trades(rows, market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
defaultNetworks = self.safe_value(self.options, 'defaultNetworks')
defaultNetwork = self.safe_string_upper(defaultNetworks, code)
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network', defaultNetwork)
network = self.safe_string(networks, network, network)
if network is None:
request['chainName'] = currency['id']
else:
request['chainName'] = network
params = self.omit(params, 'network')
response = self.privateGetPhemexUserWalletsV2DepositAddress(self.extend(request, params))
# {
# "code":0,
# "msg":"OK",
# "data":{
# "address":"0x5bfbf60e0fa7f63598e6cfd8a7fd3ffac4ccc6ad",
# "tag":null
# }
# }
#
data = self.safe_value(response, 'data', {})
address = self.safe_string(data, 'address')
tag = self.safe_string(data, 'tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
response = self.privateGetExchangeWalletsDepositList(params)
#
# {
# "code":0,
# "msg":"OK",
# "data":[
# {
# "id":29200,
# "currency":"USDT",
# "currencyCode":3,
# "txHash":"0x0bdbdc47807769a03b158d5753f54dfc58b92993d2f5e818db21863e01238e5d",
# "address":"0x5bfbf60e0fa7f63598e6cfd8a7fd3ffac4ccc6ad",
# "amountEv":3000000000,
# "confirmations":13,
# "type":"Deposit",
# "status":"Success",
# "createdAt":1592722565000
# }
# ]
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transactions(data, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
response = self.privateGetExchangeWalletsWithdrawList(params)
#
# {
# "code":0,
# "msg":"OK",
# "data":[
# {
# "address": "1Lxxxxxxxxxxx"
# "amountEv": 200000
# "currency": "BTC"
# "currencyCode": 1
# "expiredTime": 0
# "feeEv": 50000
# "rejectReason": null
# "status": "Succeed"
# "txHash": "44exxxxxxxxxxxxxxxxxxxxxx"
# "withdrawStatus: ""
# }
# ]
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transactions(data, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Success': 'ok',
'Succeed': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# ...
#
# fetchDeposits
#
# {
# "id":29200,
# "currency":"USDT",
# "currencyCode":3,
# "txHash":"0x0bdbdc47807769a03b158d5753f54dfc58b92993d2f5e818db21863e01238e5d",
# "address":"0x5bfbf60e0fa7f63598e6cfd8a7fd3ffac4ccc6ad",
# "amountEv":3000000000,
# "confirmations":13,
# "type":"Deposit",
# "status":"Success",
# "createdAt":1592722565000
# }
#
# fetchWithdrawals
#
# {
# "address": "1Lxxxxxxxxxxx"
# "amountEv": 200000
# "currency": "BTC"
# "currencyCode": 1
# "expiredTime": 0
# "feeEv": 50000
# "rejectReason": null
# "status": "Succeed"
# "txHash": "44exxxxxxxxxxxxxxxxxxxxxx"
# "withdrawStatus: ""
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = None
txid = self.safe_string(transaction, 'txHash')
currencyId = self.safe_string(transaction, 'currency')
currency = self.safe_currency(currencyId, currency)
code = currency['code']
timestamp = self.safe_integer_2(transaction, 'createdAt', 'submitedAt')
type = self.safe_string_lower(transaction, 'type')
feeCost = self.parse_number(self.from_en(self.safe_string(transaction, 'feeEv'), currency['valueScale']))
fee = None
if feeCost is not None:
type = 'withdrawal'
fee = {
'cost': feeCost,
'currency': code,
}
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.parse_number(self.from_en(self.safe_string(transaction, 'amountEv'), currency['valueScale']))
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
code = self.safe_string(params, 'code')
request = {}
if code is None:
currencyId = self.safe_string(params, 'currency')
if currencyId is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires a currency parameter or a code parameter')
else:
currency = self.currency(code)
params = self.omit(params, 'code')
request['currency'] = currency['id']
response = self.privateGetAccountsAccountPositions(self.extend(request, params))
#
# {
# "code":0,"msg":"",
# "data":{
# "account":{
# "accountId":6192120001,
# "currency":"BTC",
# "accountBalanceEv":1254744,
# "totalUsedBalanceEv":0,
# "bonusBalanceEv":1254744
# },
# "positions":[
# {
# "accountID":6192120001,
# "symbol":"BTCUSD",
# "currency":"BTC",
# "side":"None",
# "positionStatus":"Normal",
# "crossMargin":false,
# "leverageEr":100000000,
# "leverage":1.00000000,
# "initMarginReqEr":100000000,
# "initMarginReq":1.00000000,
# "maintMarginReqEr":500000,
# "maintMarginReq":0.00500000,
# "riskLimitEv":10000000000,
# "riskLimit":100.00000000,
# "size":0,
# "value":0E-8,
# "valueEv":0,
# "avgEntryPriceEp":0,
# "avgEntryPrice":0E-8,
# "posCostEv":0,
# "posCost":0E-8,
# "assignedPosBalanceEv":0,
# "assignedPosBalance":0E-8,
# "bankruptCommEv":0,
# "bankruptComm":0E-8,
# "bankruptPriceEp":0,
# "bankruptPrice":0E-8,
# "positionMarginEv":0,
# "positionMargin":0E-8,
# "liquidationPriceEp":0,
# "liquidationPrice":0E-8,
# "deleveragePercentileEr":0,
# "deleveragePercentile":0E-8,
# "buyValueToCostEr":100225000,
# "buyValueToCost":1.00225000,
# "sellValueToCostEr":100075000,
# "sellValueToCost":1.00075000,
# "markPriceEp":135736070,
# "markPrice":13573.60700000,
# "markValueEv":0,
# "markValue":null,
# "unRealisedPosLossEv":0,
# "unRealisedPosLoss":null,
# "estimatedOrdLossEv":0,
# "estimatedOrdLoss":0E-8,
# "usedBalanceEv":0,
# "usedBalance":0E-8,
# "takeProfitEp":0,
# "takeProfit":null,
# "stopLossEp":0,
# "stopLoss":null,
# "cumClosedPnlEv":0,
# "cumFundingFeeEv":0,
# "cumTransactFeeEv":0,
# "realisedPnlEv":0,
# "realisedPnl":null,
# "cumRealisedPnlEv":0,
# "cumRealisedPnl":null
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
positions = self.safe_value(data, 'positions', [])
# todo unify parsePosition/parsePositions
return positions
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
requestPath = '/' + self.implode_params(path, params)
url = requestPath
queryString = ''
if (method == 'GET') or (method == 'DELETE') or (method == 'PUT'):
if query:
queryString = self.urlencode_with_array_repeat(query)
url += '?' + queryString
if api == 'private':
self.check_required_credentials()
timestamp = self.seconds()
xPhemexRequestExpiry = self.safe_integer(self.options, 'x-phemex-request-expiry', 60)
expiry = self.sum(timestamp, xPhemexRequestExpiry)
expiryString = str(expiry)
headers = {
'x-phemex-access-token': self.apiKey,
'x-phemex-request-expiry': expiryString,
}
payload = ''
if method == 'POST':
payload = self.json(params)
body = payload
headers['Content-Type'] = 'application/json'
auth = requestPath + queryString + expiryString + payload
headers['x-phemex-request-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
url = self.implode_hostname(self.urls['api'][api]) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"code":30018,"msg":"phemex.data.size.uplimt","data":null}
# {"code":412,"msg":"Missing parameter - resolution","data":null}
# {"code":412,"msg":"Missing parameter - to","data":null}
# {"error":{"code":6001,"message":"invalid argument"},"id":null,"result":null}
#
error = self.safe_value(response, 'error', response)
errorCode = self.safe_string(error, 'code')
message = self.safe_string(error, 'msg')
if (errorCode is not None) and (errorCode != '0'):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| 47.244327
| 371
| 0.492174
|
4a170401b8c93be66f6021e7df0cbca714bbe31c
| 251
|
py
|
Python
|
RMutils/__init__.py
|
AlecThomson/RM-Tools
|
c9107f12f46583f07ee6950f92a9ef0111dbd938
|
[
"MIT"
] | 14
|
2020-07-20T07:09:50.000Z
|
2022-03-11T03:26:13.000Z
|
RMutils/__init__.py
|
AlecThomson/RM-Tools
|
c9107f12f46583f07ee6950f92a9ef0111dbd938
|
[
"MIT"
] | 38
|
2020-05-21T19:03:45.000Z
|
2022-01-17T03:44:25.000Z
|
RMutils/__init__.py
|
AlecThomson/RM-Tools
|
c9107f12f46583f07ee6950f92a9ef0111dbd938
|
[
"MIT"
] | 8
|
2020-06-01T14:23:07.000Z
|
2022-01-28T19:05:52.000Z
|
#! /usr/bin/env python
"""Dependencies for RM utilities """
__all__ = ['mpfit',
'normalize',
'util_FITS',
'util_misc',
'util_plotFITS',
'util_plotTk',
'util_rec',
'util_RM']
| 22.818182
| 36
| 0.474104
|
4a1704bd9f3da491492173b6519959dd8c64daaf
| 6,700
|
py
|
Python
|
tfHelpers.py
|
edualvarado/Guided_fine_tuning_SVBRDF
|
fc8c1fd79abd9b6285f9d2ee8f810bd31ee5240c
|
[
"MIT"
] | null | null | null |
tfHelpers.py
|
edualvarado/Guided_fine_tuning_SVBRDF
|
fc8c1fd79abd9b6285f9d2ee8f810bd31ee5240c
|
[
"MIT"
] | null | null | null |
tfHelpers.py
|
edualvarado/Guided_fine_tuning_SVBRDF
|
fc8c1fd79abd9b6285f9d2ee8f810bd31ee5240c
|
[
"MIT"
] | null | null | null |
#import tensorflow as tf
import numpy as np
#import tensorflow.contrib.slim as slim
#!!!If running TF v > 2.0 uncomment those lines (also remove the tensorflow import on line 1):!!!
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def conv(batch_input, out_channels, stride, filterSize=4, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="REFLECT")#CONSTANT
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def AdaIN(tensor, materialMean, materialVariance):
mean, variance = tf.nn.moments(tensor, axes=[1, 2], keep_dims=True)
tensorNormalized = (tensor - mean) / variance
return (materialVariance * tensorNormalized) + materialMean
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def deconv(batch_input, out_channels):
with tf.variable_scope("deconv"):
in_height, in_width, in_channels = [int(batch_input.get_shape()[1]), int(batch_input.get_shape()[2]), int(batch_input.get_shape()[3])]
filter = tf.get_variable("filter", [3, 3, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
filter1 = tf.get_variable("filter1", [3, 3, out_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
resized_images = tf.image.resize_images(batch_input, [in_height * 2, in_width * 2], method = tf.image.ResizeMethod.BILINEAR, align_corners=True)#NEAREST_NEIGHBOR BILINEAR
paddingSize = 1
padded = tf.pad(resized_images, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="REFLECT")#CONSTANT
conv = tf.nn.conv2d(padded, filter, [1, 1, 1, 1], padding="VALID")
padded = tf.pad(conv, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter1, [1, 1, 1, 1], padding="VALID")
#conv = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conv
#input is of shape [batch, X]. Returns the outputs of the layer.
def fullyConnected(input, outputDim, useBias, layerName = "fully_connected", initMultiplyer = 1.0):
with tf.variable_scope(layerName):
batchSize = tf.shape(input)[0];
inputChannels = int(input.get_shape()[-1])
weights = tf.get_variable("weight", [inputChannels, outputDim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, initMultiplyer * tf.sqrt(1.0/float(inputChannels))))
weightsTiled = tf.tile(tf.expand_dims(weights, axis = 0), [batchSize, 1,1])
squeezedInput = input
if (len(input.get_shape()) > 3) :
squeezedInput = tf.squeeze(squeezedInput, [1])
squeezedInput = tf.squeeze(squeezedInput, [1])
outputs = tf.matmul(tf.expand_dims(squeezedInput, axis = 1), weightsTiled)
outputs = tf.squeeze(outputs, [1])
if(useBias):
bias = tf.get_variable("bias", [outputDim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.002))
outputs = outputs + tf.expand_dims(bias, axis = 0)
return outputs
#Takes a globalGenerator output as input and transforms it so it can be added to the main network
def GlobalToGenerator(inputs, channels):
with tf.variable_scope("GlobalToGenerator1"):
fc1 = fullyConnected(inputs, channels, False, "fullyConnected_global_to_unet" ,0.01) #Why so low ?
return tf.expand_dims(tf.expand_dims(fc1, axis = 1), axis=1)
def instancenorm(input):
with tf.variable_scope("instancenorm"):
# this block looks like it has 3 inputs on the graph unless we do this
input = tf.identity(input)
channels = input.get_shape()[3]
offset = tf.get_variable("offset", [1, 1, 1, channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [1, 1, 1, channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[1, 2], keep_dims=True)
#[batchsize ,1,1, channelNb]
variance_epsilon = 1e-5
#Batch normalization function does the mean substraction then divide by the standard deviation (to normalize it). It finally multiply by scale and adds offset.
#normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
#For instanceNorm we do it ourselves :
normalized = (((input - mean) / tf.sqrt(variance + variance_epsilon)) * scale) + offset
return normalized, mean, variance
def batchnorm(input):
with tf.variable_scope("batchnorm"):
# this block looks like it has 3 inputs on the graph unless we do this
input = tf.identity(input)
channels = input.get_shape()[3]
offset = tf.get_variable("offset", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[0, 1, 2], keep_dims=False)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
return normalized
| 59.821429
| 284
| 0.682239
|
4a1704f9074b4a9f2cb6cb9a0abd919e6d157da9
| 18,708
|
py
|
Python
|
golf/.~c9_invoke_XseC3l.py
|
kenrumer/scorekeeper
|
c7f22676e84dfdf6ca3361c6ff56719f68fce31f
|
[
"MIT"
] | null | null | null |
golf/.~c9_invoke_XseC3l.py
|
kenrumer/scorekeeper
|
c7f22676e84dfdf6ca3361c6ff56719f68fce31f
|
[
"MIT"
] | null | null | null |
golf/.~c9_invoke_XseC3l.py
|
kenrumer/scorekeeper
|
c7f22676e84dfdf6ca3361c6ff56719f68fce31f
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from django.urls import reverse
class Tournament(models.Model):
"""
Model representing a Tournament, a set of tournament rounds
"""
name = models.CharField(max_length=200, verbose_name='Name', help_text='Enter the name of the tournament (e.g. John Doe Memorial)')
format_plugin = models.ForeignKey('FormatPlugin', verbose_name='Format', on_delete=models.SET_NULL, null=True, blank=True, help_text='Select the tournament format')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class TournamentRound(models.Model):
"""
Basically every tournament page in the view
name, format and date_scheduled should be the uniqueness
Editting these fields will be a pain!!!
"""
name = models.CharField(max_length=200, verbose_name='Name', help_text='Enter the name of this round of the tournament')
date_scheduled = models.DateField(verbose_name='Date Scheduled', null=True, blank=True, help_text='Date this round of the tournament was supposed to be played')
date_started = models.DateField(verbose_name='Date Started', null=True, blank=True, help_text='Select the date started (rarely different than scheduled)for this tournament')
date_finished = models.DateField(verbose_name='Date Finished', null=True, blank=True, help_text='Select the date finished (rarely different than scheduled)for this tournament')
round_format_plugin = models.ForeignKey('RoundFormatPlugin', verbose_name='Format', on_delete=models.SET_NULL, null=True, blank=True, help_text='Select the tournament format')
tournament = models.ForeignKey('Tournament', verbose_name='Tournament Played', on_delete=models.SET_NULL, null=True, blank=True, help_text='Select the tournament')
courses = models.ManyToManyField('Course', verbose_name='Courses', blank=True, help_text='Select the courses players are playing and set the default for the card')
course_tees = models.ManyToManyField('CourseTee', verbose_name='Course and tee', blank=True, help_text='Select the courses and tees players are playing')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class FormatPlugin(models.Model):
"""
Model representing a Format Plugin
"""
name = models.CharField(max_length=200, help_text='Enter the name of the tournament format')
priority = models.IntegerField(verbose_name='Priority', default=-1, help_text='Highest priority will be listed first in selecting a tournament format')
class_package = models.CharField(max_length=200, null=True, blank=True, help_text='Name of the module (filename without the .py) containing the class of your plugin')
class_name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the class with the module')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Scorecard(models.Model):
"""
Model representing a Scorecard is a ForeignKey to rounds
"""
tee_time = models.TimeField(verbose_name='Tee Time', null=True, blank=True, help_text='Enter the tee time for the scorecard')
finish_time = models.TimeField(verbose_name='Finish Time', null=True, blank=True, help_text='Enter the finish time for the scorecard')
external_scorer = models.CharField(max_length=200, verbose_name='External Scorer Name', null=True, blank=True, help_text='Enter the name of the scorer if it is not a player')
external_attest = models.CharField(max_length=200, verbose_name='External Attestation Name', null=True, blank=True, help_text='Enter the name of the attestation if it is not a player')
scorer = models.ForeignKey('Player', related_name='player_scorer', verbose_name='Scorer Player Id', null=True, blank=True, help_text='Enter the player that kept score')
attest = models.ForeignKey('Player', related_name='player_attest', verbose_name='Attest Player Id', null=True, blank=True, help_text='Enter the player that attests with the score')
tournament_round = models.ForeignKey('TournamentRound', verbose_name='Tournament Round', on_delete=models.SET_NULL, null=True, blank=True, help_text='map to tournament round')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
retStr = ''
try:
dateStr = 'Date: '+self.tournament_round.date.strftime('%m/%d/%Y') + ', '
except:
dateStr = ''
try:
teeTime = 'Tee Time: '+self.tee_time.strftime('%H:%M') + ', '
except:
teeTime = ''
try:
finishTime = 'Finish time: '+self.finish_time.strftime('%H:%M')
except:
finishTime = ''
return dateStr+teeTime+finishTime
class PayoutPlugin(models.Model):
"""
Model representing the plugins that will calculate payout for the overall tournament
total_time = models.TimeField(verbose_name='Tee Time', null=True, blank=True, help_text='Enter the tee time for the scorecard')
This plugin needs to return players that are paid (overall tournament and per round) for net scores, gross scores, skins, values for pins, number of drawings, others (magic holes, hole in one)
"""
name = models.CharField(max_length=200, help_text="Enter the name of the plugin")
class_package = models.CharField(max_length=200, help_text="Name of the module (filename with the .py) containing the class of your plugin")
class_name = models.CharField(max_length=200, help_text="Enter the name of the class with the module")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.class_package
class Round(models.Model):
"""
Model representing a Round. This with scores creates a single page of the tournament view
Each player has a round in a tournament, a little ambigous, it'll make sense some day
Each round has many scores
"""
handicap_index = models.DecimalField(max_digits=3, verbose_name='Handicap Index', decimal_places=1, help_text='Enter the players handicap index at time of tournament')
course_handicap = models.IntegerField(verbose_name='Course Handicap', help_text='Enter the course handicap at time of tournament')
total_out = models.IntegerField(verbose_name='OUT', null=True, blank=True, help_text='Enter the score of the front 9 holes')
total_out_style = models.CharField(max_length=200, verbose_name='Style for total out gross style', null=True, blank=True, help_text='Enter the background-color for the cell in gross view')
total_out_net = models.IntegerField(verbose_name='Front 9 Net', null=True, blank=True, help_text='Enter the net score for the front nine')
total_out_net_style = models.CharField(max_length=200, verbose_name='Style for total out net style', null=True, blank=True, help_text='Enter the background-color for the cell in net view')
total_in = models.IntegerField(verbose_name='IN', null=True, blank=True, help_text='Enter the score of the back 9 holes')
total_in_style = models.CharField(max_length=200, verbose_name='Style for total in gross style', null=True, blank=True, help_text='Enter the background-color for the cell in gross view')
total_in_net = models.IntegerField(verbose_name='Back 9 Net', null=True, blank=True, help_text='Enter the net score for the back 9')
total_in_net_style = models.CharField(max_length=200, verbose_name='Style for total in net style', null=True, blank=True, help_text='Enter the background-color for the cell in net view')
total = models.IntegerField(verbose_name='Total', null=True, blank=True, help_text='Enter the total score for the round')
total_style = models.CharField(max_length=200, verbose_name='Style for total gross style', null=True, blank=True, help_text='Enter the background-color for the cell in gross view')
net = models.IntegerField(verbose_name='Course Handicap', null=True, blank=True, help_text='Enter the net score for the round')
net_style = models.CharField(max_length=200, verbose_name='Style for total net style', null=True, blank=True, help_text='Enter the background-color for the cell in net view')
player = models.ForeignKey('Player', verbose_name='Player Id')
date_started = models.DateField(verbose_name='Date Started', null=True, blank=True, help_text='Select the date this round was started')
date_finished = models.DateField(verbose_name='Date Finished', null=True, blank=True, help_text='Select the date this round was finished')
tournament_round = models.ForeignKey('TournamentRound', verbose_name='Tournament Round', on_delete=models.SET_NULL, null=True, blank=True, help_text='map to tournament round')
scorecard = models.ForeignKey('Scorecard', verbose_name='Scorecard')
course_tee = models.ForeignKey('CourseTee', verbose_name='Course and Tee', null=True, blank=True, help_text='Course and Tee This Round was Played on')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.player.name + ' - ' + self.tournament_round.date.strftime('%m/%d/%Y')
class Score(models.Model):
"""
Model representing a single raw score. These values will be altered in a tournament
when calibrated with a format, course, handicap, tee handicap
"""
score = models.IntegerField(verbose_name='Score', help_text='Enter the score for the hole')
score_style = models.CharField(max_length=200, verbose_name='Style Applied to the Cell', null=True, blank=True, help_text='Enter the background-color for the cell in gross view')
score_net = models.IntegerField(verbose_name='Score Net', null=True, blank=True, help_text='Enter the net score for the hole')
score_net_style = models.CharField(max_length=200, verbose_name='External Scorer Name', null=True, blank=True, help_text='Enter the background-color for the cell in net view')
hole_played = models.IntegerField(verbose_name='Hole Played', null=True, blank=True, help_text='Enter the hole number played (e.g. in shotgun start if this is hole 16, but the second hole played enter 2)')
tee = models.ForeignKey('Tee', verbose_name='Hole and Tee Id')
round = models.ForeignKey('Round', null=True, blank=True, verbose_name='Round for this score')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
#return str(self.score)
#try:
# td = TournamentRound.objects.get(tournament=self.round.tournament.id)
#except TournamentRound.DoesNotExist:
# return str(self.score)
#return td.date.strftime('%m/%d/%Y') + ': ' + self.tee.hole.course.name + ' #' + str(self.tee.hole.number) + ' "' + str(self.score) + '" - ' + self.round.player.name
return str(self.score)
class RoundFormatPlugin(models.Model):
"""
Model representing a Tournament Format
"""
name = models.CharField(max_length=200, help_text='Enter the name of the format')
priority = models.IntegerField(verbose_name='Priority', default=-1, help_text='Highest priority will be listed first in selecting format')
class_package = models.CharField(max_length=200, null=True, blank=True, help_text='Name of the module (filename without the .py) containing the class of your plugin')
class_name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the class with the module')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Course(models.Model):
"""
Model representing a course this is the sum of all course tees
"""
name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the Course (e.g. Callaway Gardens)')
priority = models.IntegerField(verbose_name='Priority', default=-1, help_text='Lowest number greater than 0 will be listed first in selecting format')
default = models.BooleanField(verbose_name='Default', default=False, help_text='Set a default for faster starts to putting scores in')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class CourseTee(models.Model):
"""
Model representing a single Tee for a course
"""
name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the Course and Tee box (e.g. Callaway Gardens - White)')
priority = models.IntegerField(verbose_name='Priority', default=-1, help_text='Highest priority will be listed first in selecting format')
default = models.BooleanField(verbose_name='Default', default=False, help_text='Set a default for faster starts to putting scores in')
slope = models.IntegerField(verbose_name='Slope', help_text='Enter the slope for this course and tee')
color = models.CharField(max_length=200, verbose_name='Tee Color', help_text='Enter the number associated with the tee color')
course = models.ForeignKey('Course', default=113, verbose_name='Course Id')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.course.name+' - '+self.name
class Tee(models.Model):
"""
Model representing a single Tee for a single hole for a course
"""
yardage = models.IntegerField(verbose_name='Yardage', help_text='Enter the yardage for the tee')
par = models.PositiveSmallIntegerField(verbose_name='Par', help_text='Enter the par for this tee')
handicap = models.PositiveSmallIntegerField(verbose_name='Handicap', help_text='Enter the handicap for this tee')
hole = models.ForeignKey('Hole', verbose_name='Hole Id', null=True, on_delete=models.SET_NULL)
course_tee = models.ForeignKey('CourseTee', verbose_name='Course Tee Id')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.course_tee.name
class Hole(models.Model):
"""
Model representing a single hole for a course
"""
name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the hole')
number = models.IntegerField(help_text='Enter the number of the hole')
course = models.ForeignKey('Course', on_delete=models.CASCADE, verbose_name='Course this hole belongs to')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Activity(models.Model):
"""
Model representing recent activity from the app
"""
title = models.CharField(max_length=40, null=True, blank=True, help_text='Enter the title for this activity')
notes = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the notes for this activity')
date = models.DateField(verbose_name='Date', null=True, blank=True, help_text='Enter the date for the scorecard')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.title
class Club(models.Model):
"""
Model representing the club probably a configuration item because there can only be 1
"""
name = models.CharField(max_length=200, help_text='Enter the name of the club')
logo = models.ImageField(max_length=200, null=True, blank=True, help_text='logo')
default_tournament_name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter default prefix for a tournament name')
web_site = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the web site for the club')
data = models.CharField(max_length=516, null=True, blank=True, help_text='Data such as username and password used to login to your clubs player data store (used by your plugin)')
players_last_updated = models.DateTimeField(null=True, blank=True, help_text='Enter the date the players handicaps were last updated')
player_plugin = models.ForeignKey('PlayerPlugin', null=True, blank=True, verbose_name='Player Plugin Id')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Player(models.Model):
"""
Model representing a Player
"""
club_member_number = models.IntegerField(verbose_name='Club Member Number', default=-1, help_text='Enter the players club member number (GHIN number)')
name = models.CharField(max_length=200, help_text='Enter the name of the player')
handicap_index = models.DecimalField(max_digits=3, verbose_name='Current Handicap Index', decimal_places=1, help_text='Enter the handicap index')
high_handicap_index = models.DecimalField(max_digits=3, verbose_name='High Handicap Index', decimal_places=1, help_text='Enter the high handicap index')
low_handicap_index = models.DecimalField(max_digits=3, verbose_name='Low Handicap Index', decimal_places=1, help_text='Enter the low handicap index')
last_updated = models.DateTimeField(verbose_name='Last Updated', null=True, blank=True, help_text='Last time the player plugin was used to get this player')
data = models.CharField(max_length=516, null=True, blank=True, help_text='Data such as address, phone number, age')
priority = models.IntegerField(verbose_name='Priority', default=-1, help_text='Highest priority will be listed first in selecting format')
club = models.ForeignKey('Club', null=True, blank=True, verbose_name='Club')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class PlayerPlugin(models.Model):
"""
Model representing the plugins that can communicate with external player stores
"""
name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the plugin')
class_package = models.CharField(max_length=200, null=True, blank=True, help_text='Name of the module (filename without the .py) containing the class of your plugin')
class_name = models.CharField(max_length=200, null=True, blank=True, help_text='Enter the name of the class with the module')
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.class_package
| 65.184669
| 209
| 0.719371
|
4a17050a2db22187fa9625ac00d6b5c1ee2da50e
| 3,124
|
py
|
Python
|
virt_backup/config.py
|
sanminaben/virt-backup
|
514decc6fa71e307f4e649104abbbffbbc6694a2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
virt_backup/config.py
|
sanminaben/virt-backup
|
514decc6fa71e307f4e649104abbbffbbc6694a2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
virt_backup/config.py
|
sanminaben/virt-backup
|
514decc6fa71e307f4e649104abbbffbbc6694a2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import appdirs
import errno
import logging
import os
import yaml
from virt_backup import APP_NAME
logger = logging.getLogger("virt_backup")
os.environ["XDG_CONFIG_DIRS"] = "/etc"
CONFIG_DIRS = (
appdirs.user_config_dir(APP_NAME),
appdirs.site_config_dir(APP_NAME),
)
CONFIG_FILENAME = "config.yml"
def get_config(custom_path=None):
"""
Get config file and load it with yaml
:returns: loaded config in yaml, as a dict object
"""
if custom_path:
config_path = custom_path
else:
for d in CONFIG_DIRS:
config_path = os.path.join(d, CONFIG_FILENAME)
if os.path.isfile(config_path):
break
try:
with open(config_path, "r") as config_file:
return yaml.safe_load(config_file)
except FileNotFoundError as e:
logger.debug(e)
if custom_path:
logger.error(
"Configuration file {} not found.".format(custom_path)
)
else:
logger.error(
"No configuration file can be found. Please create a "
"config.yml in one of these directories:\n"
"{}".format(", ".join(CONFIG_DIRS))
)
raise FileNotFoundError
class Config(dict):
"""
Works like a dict but can be filled directly from a yaml configuration
file. Inspired from the Flask Config class (a part of their code has been
copied here).
:param defaults: an optional dictionary of default values
"""
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})
self.refresh_global_logger_lvl()
def refresh_global_logger_lvl(self):
if self.get("debug", None):
logging.getLogger("virt_backup").setLevel(logging.DEBUG)
else:
logging.getLogger("virt_backup").setLevel(logging.INFO)
def from_dict(self, conf_dict):
"""
Copy values from dict
"""
self.update(conf_dict)
def from_str(self, conf_str):
"""
Read configuration from string
"""
self.from_dict(yaml.load(conf_str))
def from_yaml(self, filename, silent=False):
"""
Updates the values in the config from a yaml file.
:param filename: filename of the config.
:param silent: set to ``True`` if you want silent failure for missing
files.
"""
filename = os.path.join(filename)
try:
with open(filename) as conf_yaml:
self.from_dict(yaml.safe_load(conf_yaml))
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return True
def get_groups(self):
"""
Get backup groups with default values
"""
groups = {}
for g, prop in self.get("groups", {}).items():
d = self.get("default", {}).copy()
d.update(prop)
groups[g] = d
return groups
| 28.4
| 78
| 0.588668
|
4a17052ff158c84436c97db9d6334c0f563dc737
| 4,892
|
py
|
Python
|
virtual/lib/python3.8/site-packages/asgiref/local.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 169
|
2022-01-04T06:46:48.000Z
|
2022-03-31T07:17:59.000Z
|
virtual/lib/python3.8/site-packages/asgiref/local.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 212
|
2022-01-04T09:46:05.000Z
|
2022-03-31T23:18:08.000Z
|
virtual/lib/python3.8/site-packages/asgiref/local.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 86
|
2022-01-04T06:32:30.000Z
|
2022-03-30T13:05:51.000Z
|
import random
import string
import sys
import threading
import weakref
class Local:
"""
A drop-in replacement for threading.locals that also works with asyncio
Tasks (via the current_task asyncio method), and passes locals through
sync_to_async and async_to_sync.
Specifically:
- Locals work per-coroutine on any thread not spawned using asgiref
- Locals work per-thread on any thread not spawned using asgiref
- Locals are shared with the parent coroutine when using sync_to_async
- Locals are shared with the parent thread when using async_to_sync
(and if that thread was launched using sync_to_async, with its parent
coroutine as well, with this working for indefinite levels of nesting)
Set thread_critical to True to not allow locals to pass from an async Task
to a thread it spawns. This is needed for code that truly needs
thread-safety, as opposed to things used for helpful context (e.g. sqlite
does not like being called from a different thread to the one it is from).
Thread-critical code will still be differentiated per-Task within a thread
as it is expected it does not like concurrent access.
This doesn't use contextvars as it needs to support 3.6. Once it can support
3.7 only, we can then reimplement the storage more nicely.
"""
CLEANUP_INTERVAL = 60 # seconds
def __init__(self, thread_critical: bool = False) -> None:
self._thread_critical = thread_critical
self._thread_lock = threading.RLock()
self._context_refs: "weakref.WeakSet[object]" = weakref.WeakSet()
# Random suffixes stop accidental reuse between different Locals,
# though we try to force deletion as well.
self._attr_name = "_asgiref_local_impl_{}_{}".format(
id(self),
"".join(random.choice(string.ascii_letters) for i in range(8)),
)
def _get_context_id(self):
"""
Get the ID we should use for looking up variables
"""
# Prevent a circular reference
from .sync import AsyncToSync, SyncToAsync
# First, pull the current task if we can
context_id = SyncToAsync.get_current_task()
context_is_async = True
# OK, let's try for a thread ID
if context_id is None:
context_id = threading.current_thread()
context_is_async = False
# If we're thread-critical, we stop here, as we can't share contexts.
if self._thread_critical:
return context_id
# Now, take those and see if we can resolve them through the launch maps
for i in range(sys.getrecursionlimit()):
try:
if context_is_async:
# Tasks have a source thread in AsyncToSync
context_id = AsyncToSync.launch_map[context_id]
context_is_async = False
else:
# Threads have a source task in SyncToAsync
context_id = SyncToAsync.launch_map[context_id]
context_is_async = True
except KeyError:
break
else:
# Catch infinite loops (they happen if you are screwing around
# with AsyncToSync implementations)
raise RuntimeError("Infinite launch_map loops")
return context_id
def _get_storage(self):
context_obj = self._get_context_id()
if not hasattr(context_obj, self._attr_name):
setattr(context_obj, self._attr_name, {})
self._context_refs.add(context_obj)
return getattr(context_obj, self._attr_name)
def __del__(self):
try:
for context_obj in self._context_refs:
try:
delattr(context_obj, self._attr_name)
except AttributeError:
pass
except TypeError:
# WeakSet.__iter__ can crash when interpreter is shutting down due
# to _IterationGuard being None.
pass
def __getattr__(self, key):
with self._thread_lock:
storage = self._get_storage()
if key in storage:
return storage[key]
else:
raise AttributeError(f"{self!r} object has no attribute {key!r}")
def __setattr__(self, key, value):
if key in ("_context_refs", "_thread_critical", "_thread_lock", "_attr_name"):
return super().__setattr__(key, value)
with self._thread_lock:
storage = self._get_storage()
storage[key] = value
def __delattr__(self, key):
with self._thread_lock:
storage = self._get_storage()
if key in storage:
del storage[key]
else:
raise AttributeError(f"{self!r} object has no attribute {key!r}")
| 39.772358
| 86
| 0.630826
|
4a17056f584507f7874e3500f2b9d941e239a519
| 180
|
py
|
Python
|
pygluu/__init__.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 23
|
2020-04-18T14:51:41.000Z
|
2022-03-31T19:59:40.000Z
|
pygluu/__init__.py
|
danilosoarescardoso/cloud-native-edition
|
b8aa66119dc4440b1ca3741a4065c9ae7feb42fb
|
[
"Apache-2.0"
] | 236
|
2020-04-22T08:59:27.000Z
|
2022-03-31T07:21:12.000Z
|
pygluu/__init__.py
|
danilosoarescardoso/cloud-native-edition
|
b8aa66119dc4440b1ca3741a4065c9ae7feb42fb
|
[
"Apache-2.0"
] | 23
|
2020-04-19T15:25:59.000Z
|
2022-03-16T17:17:36.000Z
|
"""
License terms and conditions for Gluu Cloud Native Edition:
https://www.apache.org/licenses/LICENSE-2.0
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| 25.714286
| 64
| 0.755556
|
4a1705afe118d492bea073496cbfd51c2537e635
| 9,255
|
py
|
Python
|
monk/system_functionality_tests/pytorch/test_update_copy_from.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 542
|
2019-11-10T12:09:31.000Z
|
2022-03-28T11:39:07.000Z
|
monk/system_functionality_tests/pytorch/test_update_copy_from.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 117
|
2019-11-12T09:39:24.000Z
|
2022-03-12T00:20:41.000Z
|
monk/system_functionality_tests/pytorch/test_update_copy_from.py
|
take2rohit/monk_v1
|
9c567bf2c8b571021b120d879ba9edf7751b9f92
|
[
"Apache-2.0"
] | 246
|
2019-11-09T21:53:24.000Z
|
2022-03-29T00:57:07.000Z
|
import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
def test_update_copy_from(system_dict):
forward = True;
if(not os.path.isdir("datasets")):
os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
os.system("! unzip -qq datasets.zip")
test = "update_copy_from_object_creation";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf = prototype(verbose=0);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_Prototype()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.Prototype("sample-project-1", "sample-experiment-2", copy_from=["sample-project-1", "sample-experiment-1"]);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_reset_transforms()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.reset_transforms();
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_apply_transforms()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.apply_random_resized_crop(256, train=True, val=True, test=True);
ptf.apply_random_perspective(train=True, val=True);
ptf.apply_random_vertical_flip(train=True, val=True);
ptf.apply_random_horizontal_flip(train=True, val=True);
ptf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_dataset()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_dataset(dataset_path=["datasets/dataset_cats_dogs_train", "datasets/dataset_cats_dogs_eval"]);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_input_size()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_input_size(256);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_batch_size()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_batch_size(6);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_shuffle_data()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_shuffle_data(False);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_num_processors()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_num_processors(16);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_update_trainval_split()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.update_trainval_split(0.6);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_Reload()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.Reload();
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_EDA()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.EDA(check_missing=True, check_corrupt=True);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
test = "update_copy_from_Train()";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
ptf.Train();
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 34.662921
| 415
| 0.612642
|
4a170670786c0021df99f5862fba537af4fa506e
| 2,542
|
py
|
Python
|
swamp/wrappers/mtz2various.py
|
rigdenlab/SWAMP
|
3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e
|
[
"BSD-3-Clause"
] | 2
|
2020-02-15T11:06:34.000Z
|
2020-04-10T08:48:49.000Z
|
swamp/wrappers/mtz2various.py
|
rigdenlab/SWAMP
|
3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e
|
[
"BSD-3-Clause"
] | 15
|
2020-02-04T10:56:07.000Z
|
2021-02-12T09:11:03.000Z
|
swamp/wrappers/mtz2various.py
|
rigdenlab/SWAMP
|
3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e
|
[
"BSD-3-Clause"
] | 4
|
2020-02-04T13:25:09.000Z
|
2022-03-23T13:44:17.000Z
|
import os
from pyjob import cexec
from swamp.parsers import MtzParser
from swamp.wrappers.wrapper import Wrapper
class Mtz2Various(Wrapper):
"""Wrapper around mtz2various
:param str workdir: working directory
:param str mtzin: mtz file name
:param str hklout: hkl output file name
:param `~swamp.logger.swamplogger.SwampLogger` logger: logging interface for the wrapper (default None)
:param bool silent_start: if True, the logger will not display the start banner (default False)
:ivar bool error: if True an error has occurred along the process
:example:
>>> from swamp.wrappers import Mtz2Various
>>> my_mtz2various = Mtz2Various('<workdir>' '<mtzin>', '<hklout>')
>>> my_mtz2various.run()
>>> my_mtz2various.make_logfile()
"""
def __init__(self, workdir, mtzin, hklout, logger=None, silent_start=False):
super(Mtz2Various, self).__init__(workdir=workdir, logger=logger, silent_start=silent_start)
self.mtzin = mtzin
self.hklout = hklout
@property
def keywords(self):
"""Keywords to pass to mtz2various through stdin"""
mtz_head = MtzParser(self.mtzin)
mtz_head.parse()
# We prefer I over F
if mtz_head.i is not None and mtz_head.sigi is not None:
return 'LABIN I=%s SIGI=%s FREE=%s' % (
mtz_head.i, mtz_head.sigi, mtz_head.free) + os.linesep + "OUTPUT SHELX" + os.linesep + "END"
else:
return 'LABIN FP=%s SIGFP=%s FREE=%s' % (mtz_head.f, mtz_head.sigf, mtz_head.free) \
+ os.linesep + "OUTPUT SHELX" + os.linesep + "FSQUARED" + os.linesep + "END"
@property
def summary_results(self):
"""No figures of merit are obtained with mtz2various"""
return None
@property
def wrapper_name(self):
"""The name of this `~swamp.wrapper.wrapper.Wrapper` child class (mtz2various)"""
return "mtz2various"
@property
def cmd(self):
"""Command to be executed on the shell"""
return [self.source, 'HKLIN', self.mtzin, 'HKLOUT', self.hklout]
def get_scores(self, logfile=None):
"""Abstract method to get scores (not implemented in this class)"""
pass
def _run(self):
"""Run the :py:attr:`~swamp.wrappers.mtz2various.Mtz2Various.cmd` and store the stdout"""
self.logger.info(self.wrapper_header)
self.logger.debug(" ".join(self.cmd))
self.make_workdir()
self.logcontents = cexec(self.cmd, stdin=self.keywords)
| 34.351351
| 108
| 0.649489
|
4a1709539ca6216b57b126efba8f2ae003dc510a
| 14,217
|
py
|
Python
|
rootfs/usr/lib/python3/dist-packages/numpy/testing/noseclasses.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
rootfs/usr/lib/python3/dist-packages/numpy/testing/noseclasses.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
rootfs/usr/lib/python3/dist-packages/numpy/testing/noseclasses.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
# These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src, getpackage
import numpy
from .nosetester import get_package_name
import inspect
_doctest_ignore = ['generate_numpy_api.py', 'scons_support.py',
'setupscons.py', 'setup.py']
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self,tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule, isfunction, \
ismethod
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in list(obj.__dict__.items()):
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in list(obj.__dict__.items()):
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
class NumpyDocTestCase(npd.DocTestCase):
"""Proxy for DocTestCase: provides an address() method that
returns the correct address for the doctest case. Otherwise
acts as a proxy to the test case. To provide hints for address(),
an obj may also be passed -- this will be used as the test object
for purposes of determining the test address, if it is provided.
"""
# doctests loaded via find(obj) omit the module name
# so we need to override id, __repr__ and shortDescription
# bonus: this will squash a 2.3 vs 2.4 incompatiblity
def id(self):
name = self._dt_test.name
filename = self._dt_test.filename
if filename is not None:
pk = getpackage(filename)
if pk is not None and not name.startswith(pk):
name = "%s.%s" % (pk, name)
return name
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>","'<")
want= want.replace("'>","'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4,8]:
got = got.replace("'<i%d'"%sz,"int")
want= want.replace("'<i%d'"%sz,"int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
enabled = True
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.doctest_tests = True
self.finder = NumpyDocTestFinder()
self.parser = doctest.DocTestParser()
# Turn on whitespace normalization, set a minimal execution context
# for doctests, implement a "#random" directive to allow executing a
# command while ignoring its output.
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p1 = '.'.join(p[:-1])
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# always use whitespace and ellipsis options
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
yield NumpyDocTestCase(test,
optionflags=optionflags,
checker=NumpyOutputChecker())
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in _doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
class NpConfig(nose.core.Config):
''' Class to pull out nose doctest plugin after configuration
This allows the user to set doctest related settings in their
configuration. For example, without this fix, a setting of
'with-doctest=1' in the user's .noserc file would cause an error, if
we remove the doctest extension before this stage. Our configure
uses the plugin to parse any settings, but then removed the doctest
plugin because the numpy doctester should be used for doctests
instead.
'''
def __init__(self, config):
self.__dict__ = config.__dict__
def configure(self, *args, **kwargs):
super(NpConfig, self).configure(*args, **kwargs)
self.plugins.plugins = [p for p in self.plugins.plugins
if p.name != 'doctest']
# Our class has two uses. First, to allow us to use NpConfig above to
# remove the doctest plugin after it has parsed the configuration.
# Second we save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def makeConfig(self, *args, **kwargs):
"""Load a Config, pre-filled with user config files if any are
found.
We override this method only to allow us to return a NpConfig
object instead of a Config object.
"""
config = super(NumpyTestProgram, self).makeConfig(*args, **kwargs)
return NpConfig(config)
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
| 40.389205
| 87
| 0.604136
|
4a170a4df586c240648149d5bc8691d5963da89e
| 312
|
gyp
|
Python
|
binding.gyp
|
UrielCh/rpio-pwm
|
49140f2c90ee99e3615cf64997b1833ad4d14fa2
|
[
"MIT"
] | 10
|
2018-05-27T16:46:04.000Z
|
2021-07-27T02:51:42.000Z
|
binding.gyp
|
UrielCh/rpio-pwm
|
49140f2c90ee99e3615cf64997b1833ad4d14fa2
|
[
"MIT"
] | 8
|
2018-09-26T13:01:09.000Z
|
2021-12-17T19:59:15.000Z
|
binding.gyp
|
UrielCh/rpio-pwm
|
49140f2c90ee99e3615cf64997b1833ad4d14fa2
|
[
"MIT"
] | 3
|
2018-09-15T01:01:42.000Z
|
2021-08-22T03:24:11.000Z
|
{
"targets": [
{
"target_name": "rpiopwm",
"sources": [ "rpiopwm.cc", "pwm.cpp", "mailbox.c", "dma.cpp" ],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"/opt/vc/include"
],
"libraries": [
"-lbcm_host",
"-L/opt/vc/lib"
]
}
]
}
| 19.5
| 69
| 0.407051
|
4a170a84db12950c4891f7c02f821e3fb5d7f507
| 1,775
|
py
|
Python
|
openerp/addons/account/wizard/account_report_general_journal.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | 3
|
2016-01-29T14:39:49.000Z
|
2018-12-29T22:42:00.000Z
|
openerp/addons/account/wizard/account_report_general_journal.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | 2
|
2016-03-23T14:29:41.000Z
|
2017-02-20T17:11:30.000Z
|
openerp/addons/account/wizard/account_report_general_journal.py
|
ntiufalara/openerp7
|
903800da0644ec0dd9c1dcd34205541f84d45fe4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_general_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.general.journal'
_description = 'Account General Journal'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml', 'report_name': 'account.general.journal', 'datas': data}
account_general_journal()
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 42.261905
| 153
| 0.655775
|
4a170b256bd56dfee7451f7552aa492e1eadf027
| 936
|
py
|
Python
|
venv/Lib/site-packages/sqlalchemy/dialects/sybase/mxodbc.py
|
svercillo/flaskwebapi
|
48e3417c25fc25166203cb88f959345f548a38bc
|
[
"Apache-2.0"
] | 2
|
2020-05-27T19:53:05.000Z
|
2020-05-27T19:53:07.000Z
|
venv/Lib/site-packages/sqlalchemy/dialects/sybase/mxodbc.py
|
svercillo/flaskwebapi
|
48e3417c25fc25166203cb88f959345f548a38bc
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/sqlalchemy/dialects/sybase/mxodbc.py
|
svercillo/flaskwebapi
|
48e3417c25fc25166203cb88f959345f548a38bc
|
[
"Apache-2.0"
] | null | null | null |
# sybase/mxodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: sybase+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
.. note::
This dialect is a stub only and is likely non functional at this time.
"""
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.sybase.base import SybaseDialect
from sqlalchemy.dialects.sybase.base import SybaseExecutionContext
class SybaseExecutionContext_mxodbc(SybaseExecutionContext):
pass
class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_mxodbc
dialect = SybaseDialect_mxodbc
| 26.742857
| 75
| 0.74359
|
4a170b5be1036ac836002e140526d147c50cb6d2
| 1,406
|
py
|
Python
|
generator/modules/pytorch.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | 1
|
2021-11-18T18:34:29.000Z
|
2021-11-18T18:34:29.000Z
|
generator/modules/pytorch.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
generator/modules/pytorch.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .python import Python
@dependency(Python)
@version('1.4')
@source('pip')
class Pytorch(Module):
def build(self):
py_ver = int(float(self.composer.ver(Python))*10)
if py_ver not in [27, 35, 36, 37]:
raise NotImplementedError('unsupported python version for pytorch')
if self.version not in ['1.1', '1.2', '1.3', '1.3.1', '1.4']:
raise NotImplementedError('unsupported pytorch version')
cuver = 'cpu' if self.composer.cuda_ver is None else 'cu%d' % (
float(self.composer.cuda_ver) * 10)
if str(self.version)[:3] == '1.1':
torchvisionver = '0.3'
elif str(self.version) == '1.2':
torchvisionver = '0.4'
elif str(self.version) == '1.3':
torchvisionver = '0.4.1'
elif str(self.version) == '1.3.1':
torchvisionver = '0.4.2'
elif str(self.version) == '1.4':
torchvisionver = '0.5'
return r'''
RUN $PIP_INSTALL torch=={torchver} torchvision=={torchvisionver}
'''.format(torchver=self.version, torchvisionver=torchvisionver)
return r'''
RUN $PIP_INSTALL torch=={torchver} torchvision=={torchvisionver} Pillow==6.1
'''.format(torchver=self.version, torchvisionver=torchvisionver)
| 39.055556
| 84
| 0.585349
|
4a170d9cd161f8d3bfe190053a67cabd5f334b81
| 15,728
|
py
|
Python
|
pandas/tests/indexes/multi/test_setops.py
|
Pawel-Kranzberg/pandas
|
6f90cb3d7bd5891d15a427252fba00027ca6084d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-05-07T04:58:36.000Z
|
2021-05-07T04:58:59.000Z
|
pandas/tests/indexes/multi/test_setops.py
|
Pawel-Kranzberg/pandas
|
6f90cb3d7bd5891d15a427252fba00027ca6084d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-05-11T00:05:40.000Z
|
2021-05-11T00:05:40.000Z
|
pandas/tests/indexes/multi/test_setops.py
|
Pawel-Kranzberg/pandas
|
6f90cb3d7bd5891d15a427252fba00027ca6084d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-06-16T07:19:12.000Z
|
2021-12-16T10:24:44.000Z
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(idx, case, sort, method):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case, sort=sort)
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_intersection_base(idx, sort, klass):
first = idx[2::-1] # first 3 elements reversed
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
intersect = first.intersection(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(intersect, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3], sort=sort)
@pytest.mark.arm_slow
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_union_base(idx, sort, klass):
first = idx[::-1]
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
union = first.union(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(union, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3], sort=sort)
def test_difference_base(idx, sort):
second = idx[4:]
answer = idx[:4]
result = idx.difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
assert result.equals(answer)
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = idx.difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
idx.difference([1, 2, 3], sort=sort)
def test_symmetric_difference(idx, sort):
first = idx[1:]
second = idx[:-1]
answer = idx[[-1, 0]]
result = first.symmetric_difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3], sort=sort)
def test_multiindex_symmetric_difference():
# GH 13490
idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"])
with tm.assert_produces_warning(FutureWarning):
result = idx ^ idx
assert result.names == idx.names
idx2 = idx.copy().rename(["A", "B"])
with tm.assert_produces_warning(FutureWarning):
result = idx ^ idx2
assert result.names == [None, None]
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
def test_difference(idx, sort):
first = idx
result = first.difference(idx[-3:], sort=sort)
vals = idx[:-3].values
if sort is None:
vals = sorted(vals)
expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
tm.assert_index_equal(result, expected)
# empty difference: reflexive
result = idx.difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ["foo", "baz"]
result = first.difference(chunklet, sort=sort)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0], sort=sort)
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values, sort=sort)
assert result.equals(first[:0])
# name from empty array
result = first.difference([], sort=sort)
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([("foo", "one")], sort=sort)
expected = MultiIndex.from_tuples(
[("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")]
)
expected.names = first.names
assert first.names == result.names
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3, 4, 5], sort=sort)
def test_difference_sort_special():
# GH-24959
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
# sort=None, the default
result = idx.difference([])
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_special_true():
# TODO decide on True behaviour
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
result = idx.difference([], sort=True)
expected = MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_difference_sort_incomparable():
# GH-24959
idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
# sort=None, the default
# MultiIndex.difference deviates here from other difference
# implementations in not catching the TypeError
msg = "'<' not supported between instances of 'Timestamp' and 'int'"
with pytest.raises(TypeError, match=msg):
result = idx.difference(other)
# sort=False
result = idx.difference(other, sort=False)
tm.assert_index_equal(result, idx)
def test_difference_sort_incomparable_true():
idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
msg = "The 'sort' keyword only takes the values of None or False; True was passed."
with pytest.raises(ValueError, match=msg):
idx.difference(other, sort=True)
def test_union(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1.union(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_union, idx.sort_values())
assert tm.equalContents(the_union, idx)
# corner case, pass self or empty thing:
the_union = idx.union(idx, sort=sort)
tm.assert_index_equal(the_union, idx)
the_union = idx.union(idx[:0], sort=sort)
tm.assert_index_equal(the_union, idx)
# FIXME: dont leave commented-out
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
def test_intersection(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1.intersection(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_int, idx[3:5])
assert tm.equalContents(the_int, idx[3:5])
# corner case, pass self
the_int = idx.intersection(idx, sort=sort)
tm.assert_index_equal(the_int, idx)
# empty intersection: disjoint
empty = idx[:2].intersection(idx[2:], sort=sort)
expected = idx[:0]
assert empty.equals(expected)
# FIXME: dont leave commented-out
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(idx, sort, method):
other = idx.to_flat_index().astype("category")
res_names = [None] * idx.nlevels
result = getattr(idx, method)(other, sort=sort)
expected = getattr(idx, method)(idx, sort=sort).rename(res_names)
tm.assert_index_equal(result, expected)
result = getattr(idx, method)(other[:5], sort=sort)
expected = getattr(idx, method)(idx[:5], sort=sort).rename(res_names)
tm.assert_index_equal(result, expected)
def test_intersection_non_object(idx, sort):
other = Index(range(3), name="foo")
result = idx.intersection(other, sort=sort)
expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=None)
tm.assert_index_equal(result, expected, exact=True)
# if we pass a length-0 ndarray (i.e. no name, we retain our idx.name)
result = idx.intersection(np.asarray(other)[:0], sort=sort)
expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=idx.names)
tm.assert_index_equal(result, expected, exact=True)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
# With non-zero length non-index, we try and fail to convert to tuples
idx.intersection(np.asarray(other), sort=sort)
def test_intersect_equal_sort():
# GH-24959
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_intersect_equal_sort_true():
# TODO decide on True behaviour
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
sorted_ = MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_empty(slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
# MultiIndex does not special case empty.union(idx)
# tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_empty_sort(slice_):
# TODO decide on True behaviour
# # sort=True
idx = MultiIndex.from_product([[1, 0], ["a", "b"]])
other = idx[:0]
result = idx.union(other, sort=True)
expected = MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable():
# https://github.com/pandas-dev/pandas/issues/24959
idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
# default, sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_incomparable_sort():
# TODO decide on True behaviour
# # sort=True
idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
with pytest.raises(TypeError, match="Cannot compare"):
idx.union(idx[:1], sort=True)
def test_union_non_object_dtype_raises():
# GH#32646 raise NotImplementedError instead of less-informative error
mi = MultiIndex.from_product([["a", "b"], [1, 2]])
idx = mi.levels[1]
msg = "Can only union MultiIndex with MultiIndex or Index of tuples"
with pytest.raises(NotImplementedError, match=msg):
mi.union(idx)
def test_union_empty_self_different_names():
# GH#38423
mi = MultiIndex.from_arrays([[]])
mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"])
result = mi.union(mi2)
expected = MultiIndex.from_arrays([[1, 2], [3, 4]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_setops_disallow_true(method):
idx1 = MultiIndex.from_product([["a", "b"], [1, 2]])
idx2 = MultiIndex.from_product([["b", "c"], [1, 2]])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
@pytest.mark.parametrize(
("tuples", "exp_tuples"),
[
([("val1", "test1")], [("val1", "test1")]),
([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]),
(
[("val2", "test2"), ("val1", "test1")],
[("val2", "test2"), ("val1", "test1")],
),
],
)
def test_intersect_with_duplicates(tuples, exp_tuples):
# GH#36915
left = MultiIndex.from_tuples(tuples, names=["first", "second"])
right = MultiIndex.from_tuples(
[("val1", "test1"), ("val1", "test1"), ("val2", "test2")],
names=["first", "second"],
)
result = left.intersection(right)
expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"data, names, expected",
[
((1,), None, [None, None]),
((1,), ["a"], [None, None]),
((1,), ["b"], [None, None]),
((1, 2), ["c", "d"], [None, None]),
((1, 2), ["b", "a"], [None, None]),
((1, 2, 3), ["a", "b", "c"], [None, None]),
((1, 2), ["a", "c"], ["a", None]),
((1, 2), ["c", "b"], [None, "b"]),
((1, 2), ["a", "b"], ["a", "b"]),
((1, 2), [None, "b"], [None, "b"]),
],
)
def test_maybe_match_names(data, names, expected):
# GH#38323
mi = MultiIndex.from_tuples([], names=["a", "b"])
mi2 = MultiIndex.from_tuples([data], names=names)
result = mi._maybe_match_names(mi2)
assert result == expected
def test_intersection_equal_different_names():
# GH#30302
mi1 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["c", "b"])
mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"])
result = mi1.intersection(mi2)
expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[None, "b"])
tm.assert_index_equal(result, expected)
def test_intersection_different_names():
# GH#38323
mi = MultiIndex.from_arrays([[1], [3]], names=["c", "b"])
mi2 = MultiIndex.from_arrays([[1], [3]])
result = mi.intersection(mi2)
tm.assert_index_equal(result, mi2)
def test_intersection_with_missing_values_on_both_sides(nulls_fixture):
# GH#38623
mi1 = MultiIndex.from_arrays([[3, nulls_fixture, 4, nulls_fixture], [1, 2, 4, 2]])
mi2 = MultiIndex.from_arrays([[3, nulls_fixture, 3], [1, 2, 4]])
result = mi1.intersection(mi2)
expected = MultiIndex.from_arrays([[3.0, nulls_fixture], [1, 2]])
tm.assert_index_equal(result, expected)
| 31.519038
| 88
| 0.642294
|
4a170e21baba4ea0edc14a5bdea730101e2d7303
| 4,138
|
py
|
Python
|
testing/test_ntheory.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | 1
|
2021-02-02T02:58:38.000Z
|
2021-02-02T02:58:38.000Z
|
testing/test_ntheory.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | null | null | null |
testing/test_ntheory.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | null | null | null |
import math
import mathipy as mpy
from mathipy.math.ntheory import *
import numpy as np
#TODO
#Migrate testing to pytest
# Significant places for when comparing
# with assertAlmostEqual
sig_places = 10
def test_Infinite():
assert Infinite() == -Infinite(sign=False)
assert Infinite() == Infinite() + 10e10
assert -Infinite() == 10e7 - Infinite()
def test_pascal_triangle():
pass
def test_gcd(self):
self.assertEqual(gcd(100, 10), 10)
self.assertEqual(gcd(250, 3), 1)
self.assertEqual(gcd(-18, 9), 9)
def test_lcm(self):
self.assertEqual(lcm(2, 6), 6)
self.assertEqual(lcm(8, 6), 24)
def test_fibonacci(self):
self.assertEqual(fibonacci(1), 1)
self.assertEqual(fibonacci(3), 5)
with self.assertRaises(TypeError):
fibonacci(1.5)
fibonacci(3.0)
def test_fibonacci_seq(self):
seq1 : list[int] = list(fibonacci_seq(5))
seq2 : list[int] = list(fibonacci_seq(10))
self.assertEqual(seq1, [0, 1, 1, 2, 3])
self.assertEqual(seq2, [0, 1, 1, 2, 3, 5, 8, 13, 21, 34])
def test_index_of_fib(self):
self.assertEqual(index_of_fib(5), 6)
self.assertEqual(index_of_fib(55), 11)
self.assertEqual(index_of_fib(7), None)
def test_abs(self):
self.assertEqual(mpy.abs(-100), 100)
self.assertEqual(mpy.abs(3 + 4j), 5)
self.assertAlmostEqual(mpy.abs(1 + 1j), mpy.sqrt(2), self.sig_places)
def test_factorial(self):
self.assertEqual(factorial(5), 120)
self.assertEqual(factorial(0), 1)
with self.assertRaises(ValueError, TypeError):
factorial(3.5)
factorial(-1)
def test_subfactorial(self):
self.assertEqual(subfactorial(16), 7697064251745)
self.assertEqual(subfactorial(1), 0)
self.assertEqual(subfactorial(0), 1)
with self.assertRaises(ValueError, TypeError):
subfactorial(-1)
subfactorial(-4.1)
def test_summation():
a = 4
r = 3
k = 55
arithmetic_prog_test = summation(lambda n: a + n * r, k, 0)
geometric_prog_test = summation(lambda n: a * (r ** n), k, 0)
assert arithmetic_prog_test == arithmetic_prog(k)
assert geometric_prog_test == geometric_prog(k)
assert summation(lambda n: n, 1, 5) == 0
def test_productory():
k_0 = 30
assert productory(lambda n: 1 - n, k_0, 2) == (1 / k_0)
k_1 = 25
x = np.random.randint(100, size=k_1)
y = np.random.randint(100, size=k_1)
prod1 = productory(lambda i: x[i-1] * y[i-1], k_1, 1)
prod2 = productory(lambda i: x[i-1], k_1, 1) * productory(lambda i: y[i-1], k_1, 1)
assert prod1 == prod2
k_2 = 18
z = np.random.randint(100, size=k_2)
c = 3
assert productory(lambda i: c * z[i-1], k_2, 1) == c ** k_2 * productory(lambda i: z[i-1], k_2, 1)
def test_differential():
pass
def test_sin(self):
self.assertAlmostEqual(mpy.sin(1.345), np.sin(1.345), self.sig_places)
self.assertEqual(mpy.sin(mpy.pi), 1)
self.assertEqual(mpy.sin(- math_constants['tau']), 0)
def test_cos(self):
self.assertEqual(mpy.cos(0), 1)
self.assertEqual(mpy.cos(math_constants['pi/2']), 0)
self.assertAlmostEqual(mpy.cos(-mpy.pi / 4), mpy.sqrt2 / 2, self.sig_places)
def test_tan(self):
self.assertAlmostEqual(mpy.tan(mpy.pi / 2), 1, 3)
self.assertAlmostEqual(mpy.tan(mpy.pi), 0, 3)
self.assertAlmostEqual(mpy.tan((2/3) * mpy.pi) -math.sqrt(3), self.sig_places)
self.assertEqual(mpy.tan(0), 0)
self.assertEqual(mpy.tan(math_constants['pi/2']), Infinite())
def test_arcsin(self):
self.assertEqual(mpy.arcsin(0), 0)
self.assertAlmostEqual(mpy.arcsin(1), math_constants['pi/2'], self.sig)
self.assertAlmostEqual(mpy.arcsin(-mpy.srqt2 / 2), -mpy.pi / 4, self.sig_places)
def test_arccos(self):
pass
def test_arctan(self):
pass
def test_cosh(self):
pass
def test_sinh(self):
pass
def test_tanh(self):
pass
def test_cosec(self):
pass
def test_sec(self):
pass
def test_cotan(self):
pass
def test_cosech(self):
pass
def test_sech(self):
pass
def test_cotanh(self):
pass
def test_ln(self):
pass
def test_log(self):
pass
if __name__ == '__main__':
pass
| 24.630952
| 102
| 0.658289
|
4a170fa0182a28d2861dd87f6c5b4b20d826a968
| 4,612
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/apps/homescreen/regions/search_panel.py
|
charleyf/gaia
|
90c1b9c146b2a4abe545bf758f2e47d898820ad1
|
[
"Apache-2.0"
] | 1
|
2019-04-26T21:30:24.000Z
|
2019-04-26T21:30:24.000Z
|
tests/python/gaia-ui-tests/gaiatest/apps/homescreen/regions/search_panel.py
|
charleyf/gaia
|
90c1b9c146b2a4abe545bf758f2e47d898820ad1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaia-ui-tests/gaiatest/apps/homescreen/regions/search_panel.py
|
charleyf/gaia
|
90c1b9c146b2a4abe545bf758f2e47d898820ad1
|
[
"Apache-2.0"
] | 1
|
2021-09-03T10:18:22.000Z
|
2021-09-03T10:18:22.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import urllib
from marionette import expected
from marionette import Wait
from marionette.by import By
from gaiatest.apps.base import Base
from gaiatest.apps.base import PageRegion
class SearchPanel(Base):
_search_results_app_frame_locator = (By.CSS_SELECTOR, '.searchWindow.active iframe')
_search_results_locator = (By.CSS_SELECTOR, 'gaia-grid .icon')
_search_suggestion_ok_button_locator = (By.ID, 'suggestions-notice-confirm')
_rocketbar_input_locator = (By.ID, 'rocketbar-input')
_search_results_offline_locator = (By.ID, 'offline-message')
_search_results_offline_settings_locator = (By.ID, 'settings-connectivity')
def _switch_to_search_results_frame(self):
self.marionette.switch_to_frame()
self.marionette.switch_to_frame(self.marionette.find_element(*self._search_results_app_frame_locator))
@property
def offline_search_message(self):
return self.marionette.find_element(*self._search_results_offline_locator).text
@property
def is_offline_message_visible(self):
return self.is_element_displayed(*self._search_results_offline_locator)
def tap_offline_settings_button(self):
self.marionette.find_element(*self._search_results_offline_settings_locator).tap()
from gaiatest.apps.settings.app import Settings
settings = Settings(self.marionette)
settings.switch_to_settings_app()
return settings
def type_into_search_box(self, search_term):
self.keyboard.send(search_term)
# The search results frame is not findable with AppWindowManager
self._switch_to_search_results_frame()
def go_to_url(self, url):
# If a URL exists already, clear the field
self.marionette.find_element(*self._rocketbar_input_locator).clear()
self.keyboard.send(url)
#TODO Remove hack once Bug 1062309 is fixed
self.marionette.switch_to_frame()
Wait(self.marionette).until(lambda m: not self.keyboard.is_keyboard_displayed)
self.marionette.find_element(*self._rocketbar_input_locator).tap()
Wait(self.marionette).until(lambda m: self.keyboard.is_keyboard_displayed)
self.keyboard.tap_enter()
Wait(self.marionette).until(lambda m: urllib.quote(url, safe=':/?=&~') in self.apps.displayed_app.name)
from gaiatest.apps.search.regions.browser import Browser
return Browser(self.marionette)
def wait_for_search_results_to_load(self, minimum_expected_results=1):
Wait(self.marionette).until(lambda m: len(m.find_elements(
*self._search_results_locator)) > minimum_expected_results)
def confirm_suggestion_notice(self):
confirm = Wait(self.marionette).until(expected.element_present(
*self._search_suggestion_ok_button_locator))
Wait(self.marionette).until(expected.element_displayed(confirm))
confirm.tap()
Wait(self.marionette).until(expected.element_not_displayed(confirm))
def _is_result_a_webapp(self, result_element):
# An app result is to an installable (via marketplace) webapp
return '.webapp' in result_element.get_attribute('data-identifier')
@property
def app_results(self):
# An app result is to an installable (via marketplace) webapp
return [self.Result(marionette=self.marionette, element=result)
for result in self.marionette.find_elements(*self._search_results_locator)
if self._is_result_a_webapp(result)]
@property
def link_results(self):
# A link result just opens a page in a frame
return [self.Result(marionette=self.marionette, element=result)
for result in self.marionette.find_elements(*self._search_results_locator)
if not self._is_result_a_webapp(result)]
class Result(PageRegion):
@property
def name(self):
return self.root_element.text
def tap(self):
app_name = self.name
self.root_element.tap()
# Wait for the displayed app to be that we have tapped
Wait(self.marionette).until(lambda m: self.apps.displayed_app.name == app_name)
self.apps.switch_to_displayed_app()
# Wait for title to load (we cannot be more specific because the aut may change)
Wait(self.marionette).until(lambda m: bool(m.title))
| 41.178571
| 111
| 0.714441
|
4a1710697b925371de3b8906bdedb7da489b5bb9
| 223
|
py
|
Python
|
tests/test_importers.py
|
rbrecheisen/pyminer
|
c1b088cd9eddf0347b5c3e9354991374c7344a33
|
[
"Apache-2.0"
] | null | null | null |
tests/test_importers.py
|
rbrecheisen/pyminer
|
c1b088cd9eddf0347b5c3e9354991374c7344a33
|
[
"Apache-2.0"
] | 1
|
2015-05-15T12:06:25.000Z
|
2015-10-18T15:43:34.000Z
|
tests/test_importers.py
|
rbrecheisen/pyminer
|
c1b088cd9eddf0347b5c3e9354991374c7344a33
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class TestImporters(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 13.117647
| 39
| 0.618834
|
4a171097e75dd08019066f0c3e9d2dc94a51e391
| 6,151
|
py
|
Python
|
lensing/raytrace/__init__.py
|
tracijo32/lensing
|
84b412a6020e461de9162fdb26684cf3916c2bca
|
[
"MIT"
] | null | null | null |
lensing/raytrace/__init__.py
|
tracijo32/lensing
|
84b412a6020e461de9162fdb26684cf3916c2bca
|
[
"MIT"
] | null | null | null |
lensing/raytrace/__init__.py
|
tracijo32/lensing
|
84b412a6020e461de9162fdb26684cf3916c2bca
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy import interpolate
from ..helper_functions import param2array, array2param
def delens(ix,iy,deflectx,deflecty,ratio=1,dx=0,dy=0):
'''
delenses a set of x and y coordinates in the image plane
Parameters:
ix,iy: scalar or array-like, image plane x,y coordinates (units: pixels)
deflectx,deflecty: 2d numpy array, deflection matrices (units: pixels)
ratio: scalar scaling factor for the deflection matrices
ex. if defl. maps are @ z = 2 and your source is z = 4:
ratio = dls/ds(z=4) / dls/ds(z=2)
dx,dy: scalars, offset in x-y pixels between grid that ix,iy
are on and where your deflection matrices start
if both are on the same grid, then dx=dy=0
returns: srcx,srcy, numpy array of source plane x,y positions
'''
ix,xptype = param2array(ix)
iy,yptype = param2array(iy)
x = ix-dx
y = iy-dy
n = len(ix)
# load deflection matrices and convert from arcsec -> pix
dplx = deflectx*ratio
dply = deflecty*ratio
srcx = np.zeros(n)
srcy = np.zeros(n)
# create interpolation functions for the deflection matrices
xpixvals = np.arange(dplx.shape[0])
ypixvals = np.arange(dply.shape[1])
dplx_interp = interpolate.interp2d(xpixvals,ypixvals,dplx)
dply_interp = interpolate.interp2d(xpixvals,ypixvals,dply)
for i in range(n):
deflectx = dplx_interp(x[i]-1,y[i]-1)[0]
deflecty = dply_interp(x[i]-1,y[i]-1)[0]
srcx[i] = x[i] - deflectx
srcy[i] = y[i] - deflecty
srcx = srcx+dx
srcy = srcy+dy
return srcx,srcy
def lens(xs,ys,deflectx,deflecty,ratio=1,dx=0,dy=0,maxdist=0.2,return_dist=False):
'''
lenses a set of x and y coordinates in the source plane to the image plane
Parameters:
xs,ys: array-like, source plane x,y coordinates (units: pixels)
deflectx,deflecty: 2d numpy array, deflection matrices (units: pixels)
ratio: scalar, scaling factor for the deflection matrices
ex. if defl. maps are @ z = 2 and your source is @ z = 4:
ratio = dls/ds(z=4) / dls/ds(z=2)
dx,dy: scalar, offset in x-y pixels between grid that ix,iy
are on and where your deflection matrices start
if both are on the same grid, then dx=dy=0
maxdist: scalar, the maximum distance in the source plane for which pixels are considered
associated with the same source plane position
-setting this value too high may cause nearby images to merge into "giant arcs"
-setting this value too low may cause demagnified images to not appear
return_dist (boolean): if True, the function returns a third variable representing the
distance from the desired source plane position for each image pixel
Returns:
ximp,yimp(,dist): numpy arrays of image plane position x,y (source plane distance)
'''
xs,xptype = param2array(xs)
ys,yptype = param2array(ys)
xs = xs-dx
ys = ys-dy
dims = deflectx.shape
source_x = np.zeros_like(deflectx)
source_y = np.zeros_like(deflecty)
if dims[0] == dims[1]:
for i in range(dims[0]):
source_x[:,i] = i + 1 - deflectx[:,i]*ratio
source_y[i,:] = i + 1 - deflecty[i,:]*ratio
else:
for j in range(dims[0]): source_x[:,j] = j + 1 - deflectx[:,j]*ratio
for k in range(dims[1]): source_y[k,:] = k + 1 - deflecty[k,:]*ratio
d = np.sqrt((source_x-xs)**2+(source_y-ys)**2)
indices = np.where(d<maxdist)
dist = d[indices]
ximp = []
yimp = []
for i,j in zip(indices[1],indices[0]): ximp.append(i+1),yimp.append(j+1)
ximp = np.array(ximp)+dx
yimp = np.array(yimp)+dy
if return_dist:
return ximp, yimp, dist
else:
return ximp, yimp
def lens_fine(x,y,deflectx,deflecty,xa,ya,ratio=1,dx=0,dy=0):
x,xptype = param2array(x)
y,yptype = param2array(y)
x -= dx
y -= dy
dplx = deflectx*ratio
dply = deflecty*ratio
# create interpolation functions for the deflection matrices
xpixvals = np.arange(dplx.shape[0])
ypixvals = np.arange(dply.shape[1])
dplx_interp = interpolate.interp2d(xpixvals,ypixvals,dplx)
dply_interp = interpolate.interp2d(xpixvals,ypixvals,dply)
## find the source plane position of every pixel in the image plane
dims = dplx.shape
source_x = dplx*0
source_y = dply*0
if dims[0] == dims[1]:
for i in range(dims[0]):
source_x[:,i] = i + 1 - dplx[:,i]
source_y[i,:] = i + 1 - dply[i,:]
else:
for j in range(dims[0]): source_x[:,j] = j + 1 - dplx[:,j]
for k in range(dims[1]): source_y[k,:] = k + 1 - dply[k,:]
X,Y = np.meshgrid(np.arange(dplx.shape[1]),np.arange(dplx.shape[0]))
conditions = np.array([X >= xa[0]-dx,
X < xa[1]-dx,
Y >= ya[0]-dy,
Y < ya[1]-dy])
pixels = np.all(conditions,axis=0)
ix = np.zeros(x.size)
iy = np.zeros(y.size)
for i in range(x.size):
dist = (source_x-x[i])**2+(source_y-y[i])**2
closest = np.where(dist[pixels].flat == np.amin(dist[pixels]))
# find the approximate position of the source in the image plane
ix_close = x[i] + (dplx[pixels]).flat[closest]
iy_close = y[i] + (dply[pixels]).flat[closest]
# trace around the approximate position until you find a spot really close to source plane
gridsize = 1001
ixval = np.linspace(ix_close-0.5,ix_close+0.5,gridsize)
iyval = np.linspace(iy_close-0.5,iy_close+0.5,gridsize)
ixgrid,iygrid = np.meshgrid(ixval,iyval)
deflectx = dplx_interp(ixval,iyval)
deflecty = dply_interp(ixval,iyval)
sxgrid = ixgrid - deflectx
sygrid = iygrid - deflecty
dist_fine = (sxgrid - x[i])**2+(sygrid - y[i])**2
new_closest = np.where(dist_fine.flat == np.amin(dist_fine))[0][0]
ix[i] = ixgrid.flat[new_closest]
iy[i] = iygrid.flat[new_closest]
ix = array2param(ix,xptype)+dx
iy = array2param(iy,yptype)+dy
return ix,iy
| 35.554913
| 98
| 0.620062
|
4a1711ce76c8e5e2e0ea6732169f0a5a852866b9
| 161
|
py
|
Python
|
accounting/enums.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | 1
|
2020-09-19T21:56:40.000Z
|
2020-09-19T21:56:40.000Z
|
accounting/enums.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | null | null | null |
accounting/enums.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | 5
|
2020-09-18T18:53:03.000Z
|
2020-10-21T14:42:00.000Z
|
from django.db.models import TextChoices
from django.utils.translation import gettext as _
class FinancialDocument(TextChoices):
DARAMAD='درآمد',_('درآمد')
| 26.833333
| 49
| 0.795031
|
4a17127b261fab52e961ac98579883e3a4023201
| 5,824
|
py
|
Python
|
predict.py
|
G-hx/faster-rcnn-pytorch
|
2a056ebcf1cf0d68a167fcefb687cb09481e58cf
|
[
"MIT"
] | 1
|
2022-03-25T08:38:45.000Z
|
2022-03-25T08:38:45.000Z
|
predict.py
|
G-hx/faster-rcnn-pytorch
|
2a056ebcf1cf0d68a167fcefb687cb09481e58cf
|
[
"MIT"
] | null | null | null |
predict.py
|
G-hx/faster-rcnn-pytorch
|
2a056ebcf1cf0d68a167fcefb687cb09481e58cf
|
[
"MIT"
] | null | null | null |
#----------------------------------------------------#
# 将单张图片预测、摄像头检测和FPS测试功能
# 整合到了一个py文件中,通过指定mode进行模式的修改。
#----------------------------------------------------#
import time
import cv2
import numpy as np
from PIL import Image
from frcnn import FRCNN
if __name__ == "__main__":
frcnn = FRCNN()
#----------------------------------------------------------------------------------------------------------#
# mode用于指定测试的模式:
# 'predict'表示单张图片预测,如果想对预测过程进行修改,如保存图片,截取对象等,可以先看下方详细的注释
# 'video'表示视频检测,可调用摄像头或者视频进行检测,详情查看下方注释。
# 'fps'表示测试fps,使用的图片是img里面的street.jpg,详情查看下方注释。
# 'dir_predict'表示遍历文件夹进行检测并保存。默认遍历img文件夹,保存img_out文件夹,详情查看下方注释。
#----------------------------------------------------------------------------------------------------------#
mode = "predict"
#-------------------------------------------------------------------------#
# crop指定了是否在单张图片预测后对目标进行截取
# crop仅在mode='predict'时有效
#-------------------------------------------------------------------------#
crop = False
#----------------------------------------------------------------------------------------------------------#
# video_path用于指定视频的路径,当video_path=0时表示检测摄像头
# 想要检测视频,则设置如video_path = "xxx.mp4"即可,代表读取出根目录下的xxx.mp4文件。
# video_save_path表示视频保存的路径,当video_save_path=""时表示不保存
# 想要保存视频,则设置如video_save_path = "yyy.mp4"即可,代表保存为根目录下的yyy.mp4文件。
# video_fps用于保存的视频的fps
# video_path、video_save_path和video_fps仅在mode='video'时有效
# 保存视频时需要ctrl+c退出或者运行到最后一帧才会完成完整的保存步骤。
#----------------------------------------------------------------------------------------------------------#
video_path = 0
video_save_path = ""
video_fps = 25.0
#-------------------------------------------------------------------------#
# test_interval用于指定测量fps的时候,图片检测的次数
# 理论上test_interval越大,fps越准确。
#-------------------------------------------------------------------------#
test_interval = 100
#-------------------------------------------------------------------------#
# dir_origin_path指定了用于检测的图片的文件夹路径
# dir_save_path指定了检测完图片的保存路径
# dir_origin_path和dir_save_path仅在mode='dir_predict'时有效
#-------------------------------------------------------------------------#
dir_origin_path = "img/"
dir_save_path = "img_out/"
if mode == "predict":
'''
1、该代码无法直接进行批量预测,如果想要批量预测,可以利用os.listdir()遍历文件夹,利用Image.open打开图片文件进行预测。
具体流程可以参考get_dr_txt.py,在get_dr_txt.py即实现了遍历还实现了目标信息的保存。
2、如果想要进行检测完的图片的保存,利用r_image.save("img.jpg")即可保存,直接在predict.py里进行修改即可。
3、如果想要获得预测框的坐标,可以进入frcnn.detect_image函数,在绘图部分读取top,left,bottom,right这四个值。
4、如果想要利用预测框截取下目标,可以进入frcnn.detect_image函数,在绘图部分利用获取到的top,left,bottom,right这四个值
在原图上利用矩阵的方式进行截取。
5、如果想要在预测图上写额外的字,比如检测到的特定目标的数量,可以进入frcnn.detect_image函数,在绘图部分对predicted_class进行判断,
比如判断if predicted_class == 'car': 即可判断当前目标是否为车,然后记录数量即可。利用draw.text即可写字。
'''
while True:
img = input('Input image filename:')
try:
image = Image.open(img)
except:
print('Open Error! Try again!')
continue
else:
r_image = frcnn.detect_image(image, crop = crop)
r_image.show()
elif mode == "video":
capture=cv2.VideoCapture(video_path)
if video_save_path!="":
fourcc = cv2.VideoWriter_fourcc(*'XVID')
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)
fps = 0.0
while(True):
t1 = time.time()
# 读取某一帧
ref,frame=capture.read()
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
# 进行检测
frame = np.array(frcnn.detect_image(frame))
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("video",frame)
c= cv2.waitKey(1) & 0xff
if video_save_path!="":
out.write(frame)
if c==27:
capture.release()
break
capture.release()
out.release()
cv2.destroyAllWindows()
elif mode == "fps":
img = Image.open('img/street.jpg')
tact_time = frcnn.get_FPS(img, test_interval)
print(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1')
elif mode == "dir_predict":
import os
from tqdm import tqdm
img_names = os.listdir(dir_origin_path)
for img_name in tqdm(img_names):
if img_name.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
image_path = os.path.join(dir_origin_path, img_name)
image = Image.open(image_path)
r_image = frcnn.detect_image(image)
if not os.path.exists(dir_save_path):
os.makedirs(dir_save_path)
r_image.save(os.path.join(dir_save_path, img_name.replace(".jpg", ".png")), quality=95, subsampling=0)
else:
raise AssertionError("Please specify the correct mode: 'predict', 'video', 'fps' or 'dir_predict'.")
| 43.789474
| 126
| 0.482315
|
4a171294f2aa7a1e3ad1925153eb252d594dbcf3
| 2,234
|
py
|
Python
|
Python/rosalind_splc.py
|
angeeranaser/Rosalind
|
088610530754f6bb1cf2013f416b6f45ba334028
|
[
"MIT"
] | null | null | null |
Python/rosalind_splc.py
|
angeeranaser/Rosalind
|
088610530754f6bb1cf2013f416b6f45ba334028
|
[
"MIT"
] | null | null | null |
Python/rosalind_splc.py
|
angeeranaser/Rosalind
|
088610530754f6bb1cf2013f416b6f45ba334028
|
[
"MIT"
] | null | null | null |
# Rosalind: RNA Splicing
# Given: A DNA string s and a collection of substrings acting as introns.
# Result: A protein string transcribed from the exons.
def removeIntron(dna, intron):
dna = dna.replace(intron,'')
return dna
def refTable(first):
codonList = [('UUU','F'),('CUU','L'),('AUU','I'),('GUU','V'),
('UUC','F'),('CUC','L'),('AUC','I'),('GUC','V'),
('UUA','L'),('CUA','L'),('AUA','I'),('GUA','V'),
('UUG','L'),('CUG','L'),('AUG','M'),('GUG','V'),
('UCU','S'),('CCU','P'),('ACU','T'),('GCU','A'),
('UCC','S'),('CCC','P'),('ACC','T'),('GCC','A'),
('UCA','S'),('CCA','P'),('ACA','T'),('GCA','A'),
('UCG','S'),('CCG','P'),('ACG','T'),('GCG','A'),
('UAU','Y'),('CAU','H'),('AAU','N'),('GAU','D'),
('UAC','Y'),('CAC','H'),('AAC','N'),('GAC','D'),
('UAA','Stop'),('CAA','Q'),('AAA','K'),('GAA','E'),
('UAG','Stop'),('CAG','Q'),('AAG','K'),('GAG','E'),
('UGU','C'),('CGU','R'),('AGU','S'),('GGU','G'),
('UGC','C'),('CGC','R'),('AGC','S'),('GGC','G'),
('UGA','Stop'),('CGA','R'),('AGA','R'),('GGA','G'),
('UGG','W'),('CGG','R'),('AGG','R'),('GGG','G')]
codonList = dict(codonList)
return codonList[first]
def main():
# open file, extract data
dataFile = open('rosalind_splc.txt','r')
dnaList = dataFile.read().strip()
dnaList = dnaList.replace('\n','').split('>')
dataFile.close()
# remove introns from dna
dnaString = dnaList[1][13:]
for x in range(2, len(dnaList)):
dnaString = removeIntron(dnaString, dnaList[x][13:])
# transcribe dna into rna
rnaString = dnaString.replace('T','U')
# break string into codons
rnaString = [rnaString[i:i + 3] for i in range(0, len(rnaString), 3)]
# translate rna into protein
proteinString = ''
for x in range(len(rnaString)):
proteinLetter = refTable(rnaString[x])
if proteinLetter != 'Stop':
proteinString += proteinLetter
else:
proteinString += '\n'
# output results
outputFile = open('output.txt','w')
outputFile.writelines(proteinString)
outputFile.close()
if __name__ == "__main__":
main()
| 34.369231
| 76
| 0.491047
|
4a1712b488cd805f5efcbc7c692bfd0ea281808a
| 2,466
|
py
|
Python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/_container_registry_management_client_enums.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/_container_registry_management_client_enums.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/_container_registry_management_client_enums.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class LastModifiedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that last modified the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the resource.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class TokenCertificateName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
CERTIFICATE1 = "certificate1"
CERTIFICATE2 = "certificate2"
class TokenPasswordName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The password name "password1" or "password2"
"""
PASSWORD1 = "password1"
PASSWORD2 = "password2"
class TokenStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the token example enabled or disabled.
"""
ENABLED = "enabled"
DISABLED = "disabled"
| 32.447368
| 94
| 0.658151
|
4a17130a890a1b21a18278d713fc4bc0e5c5857c
| 3,500
|
py
|
Python
|
myYOLO_v1.py
|
Varat7v2/Person-Detection
|
b8b33f1206839d94119f1aa7a6b7b62ec9c5048e
|
[
"MIT"
] | null | null | null |
myYOLO_v1.py
|
Varat7v2/Person-Detection
|
b8b33f1206839d94119f1aa7a6b7b62ec9c5048e
|
[
"MIT"
] | null | null | null |
myYOLO_v1.py
|
Varat7v2/Person-Detection
|
b8b33f1206839d94119f1aa7a6b7b62ec9c5048e
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse
import cv2
import subprocess
import time
import os
class YOLO_INFERENCE:
def __init__(self, confidence, threshold):
# self.config_path = FLAGS.config
# self.weights_path = FLAGS.weights
# self.labels = open(FLAGS.labels).read().strip().split('\n')
self.confidence = confidence
self.threshold = threshold
def draw_labels_and_boxes(self, img, boxes, confidences, classids, idxs, labels):
for obj, score, box in zip(classids, confidences, boxes):
if obj == 0 and score > 0.7:
# Get the bounding box coordinates
x, y = box[0], box[1]
w, h = box[2], box[3]
# Draw the bounding box rectangle and label on the image
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
text = "{}: {:.3f}".format(labels[0], score)
cv2.putText(img, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
return img
def generate_boxes_confidences_classids(self, outs, height, width, tconf):
boxes = []
confidences = []
classids = []
for out in outs:
for detection in out:
# Get the scores, classid, and the confidence of the prediction
scores = detection[5:]
classid = np.argmax(scores)
confidence = scores[classid]
# Consider only the predictions that are above a certain confidence level
if confidence > tconf:
# TODO Check detection
box = detection[0:4] * np.array([width, height, width, height])
centerX, centerY, bwidth, bheight = box.astype('int')
# Using the center x, y coordinates to derive the top
# and the left corner of the bounding box
left = int(centerX - (bwidth / 2))
top = int(centerY - (bheight / 2))
# right = int(centerX + (bwidth / 2))
# bottom = int(centerY + (bwidth / 2))
# Append to list
boxes.append([left, top, int(bwidth), int(bheight)])
confidences.append(float(confidence))
classids.append(classid)
return boxes, confidences, classids
def run_yolo(self, img, net, layer_names, width, height, labels):
# Contructing a blob from the input image
blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)
# Perform a forward pass of the YOLO object detector
net.setInput(blob)
# Getting the outputs from the output layers
start = time.time()
outs = net.forward(layer_names)
end = time.time()
print("YOLO model inference time: ", (end-start), "seconds")
# Generate the boxes, confidences, and classIDs
boxes, confidences, classids = self.generate_boxes_confidences_classids(outs, height,
width, self.confidence)
# Apply Non-Maxima Suppression to suppress overlapping bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)
# Draw labels and boxes on the image
img = self.draw_labels_and_boxes(img, boxes, confidences, classids, idxs, labels)
return img, boxes, confidences, classids, idxs
| 38.888889
| 94
| 0.568571
|
4a17135db2cfefaf33ac0e53881a0d17c0de2567
| 48,492
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/virtual_machine_scale_set_vms_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/virtual_machine_scale_set_vms_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/virtual_machine_scale_set_vms_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class VirtualMachineScaleSetVMsOperations(object):
"""VirtualMachineScaleSetVMsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-30"
self.config = config
def _reimage_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reimage(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Reimages (upgrade the operating system) a specific virtual machine in a
VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reimage_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _reimage_all_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reimage_all(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Allows you to re-image all the disks ( including data disks ) in the a
VM scale set instance. This operation is only supported for managed
disks.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reimage_all_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _deallocate_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def deallocate(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Deallocates a specific virtual machine in a VM scale set. Shuts down
the virtual machine and releases the compute resources it uses. You are
not billed for the compute resources of this virtual machine once it is
deallocated.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._deallocate_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _delete_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineScaleSetVM or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVM or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetVM', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_instance_view(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Gets the status of a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineScaleSetVMInstanceView or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVMInstanceView
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetVMInstanceView', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, virtual_machine_scale_set_name, filter=None, select=None, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all virtual machines in a VM scale sets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the VM scale set.
:type virtual_machine_scale_set_name: str
:param filter: The filter to apply to the operation.
:type filter: str
:param select: The list parameters.
:type select: str
:param expand: The expand expression to apply to the operation.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineScaleSetVM
:rtype:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVMPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVM]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineScaleSetVMPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineScaleSetVMPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def _power_off_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def power_off(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Power off (stop) a virtual machine in a VM scale set. Note that
resources are still attached and you are getting charged for the
resources. Instead, use deallocate to release resources and avoid
charges.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._power_off_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _restart_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def restart(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Restarts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _start_initial(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start(
self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, **operation_config):
"""Starts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationStatusResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._start_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| 45.704053
| 193
| 0.668708
|
4a171372638d637af57a12c0ac77171cefedb34e
| 5,470
|
py
|
Python
|
Hangman2.0.py
|
Rahul-coder69/Hangman
|
ea879355786146b2284cf34ff8850bdc809a0ae2
|
[
"MIT"
] | null | null | null |
Hangman2.0.py
|
Rahul-coder69/Hangman
|
ea879355786146b2284cf34ff8850bdc809a0ae2
|
[
"MIT"
] | null | null | null |
Hangman2.0.py
|
Rahul-coder69/Hangman
|
ea879355786146b2284cf34ff8850bdc809a0ae2
|
[
"MIT"
] | null | null | null |
from tkinter import *;PhotoImage
import random
from tkinter import messagebox
import time
chances=5
def click(event):
global status,words,choice,z,find,chances,num
text= str(event.widget.cget("text"))
if text in choice:
status.set("letter already present")
correct.update()
elif text in z:
index= z.index(text)
del choice[index]
choice.insert(index,text)
wording.config(text=choice)
wording.update()
status.set("Correct guess!!!")
correct.update()
if choice.count("_")==0:
status.set("Congrats !!!")
correct.update()
gmail=PhotoImage(file=r"C:\Users\RAHUL\Downloads\Inkedsaved man_LI.png")
lab=Label(image=gmail)
lab.photo=gmail
lab.place(x=200,y=205)
messagebox.showinfo("Result","YOU SAVED THE MAN!!")
time.sleep(2)
root.destroy()
else:
status.set(f"Try again! chances left:{chances-1}")
correct.update()
chances-=1
if chances==0:
status.set(f"Correct word: {z}")
correct.update()
gmail=PhotoImage(file=r"C:\Users\RAHUL\Downloads\hanged final.png")
lab=Label(image=gmail)
lab.photo=gmail
lab.place(x=165,y=164)
messagebox.showinfo("Result","YOU MURDERED THE MAN!!")
time.sleep(2)
root.destroy()
root= Tk()
root.geometry("600x570")
root.title("HANGMAN 2.0")
root.resizable(0,0)
root.config(bg="green")
header=Label(root,text="HANGMAN",font="algerian 20 bold",fg="red",relief=SUNKEN)
header.pack(fill=BOTH)
f1= Frame(root,borderwidth=3,bg="green")
f1.place(y=70)
A= Button(f1,text="A",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="B",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="C",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="D",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="E",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="F",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="G",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="H",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="I",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="J",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="K",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="L",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f1,text="M",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
#Row 2 starts
f2= Frame(root,borderwidth=3,bg="green")
f2.place(y=110)
A= Button(f2,text="N",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="O",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="P",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="Q",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="R",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="S",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="T",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="U",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="V",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="W",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="X",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="Y",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
A= Button(f2,text="Z",bg="yellow",padx=7,pady=4)
A.pack(side=LEFT,padx=7)
A.bind("<Button-1>",click)
#Buttons end
#List operations
words= ["DIFFICULT","OUTSTANDING","SUFFRAGE","ZOOLOGIST","BOTANIST","EXPERIENCE"]
find= [["_","I","F","F","I","_","_","_","_"],["_","_","T","_","T","_","N","D","_","N","_"],["S","_","F","F","_","_","_","E"],["_","O","O","_","O","_","_","S","_"],["_","O","T","_","_","I","_","T"],["E","_","_","E","_","_","E","N","_","E"]]
choice= random.choice(find)
num=find.index(choice)
z= words[num]
#status and labelling starts
l1= Label(root,text="Word :-",fg="blue",font="algerian 20 bold",bg="green")
l1.place(y=450,x=110)
wording= Label(root,fg="red",text=choice,font="algerian 20 bold",bg="green")
wording.place(y=450,x=230)
stat=Label(root,text="STATUS :",fg="yellow",font="algerian 18 bold",bg="green")
stat.place(y=499,x=60)
status= StringVar()
correct= Entry(root,textvariable=status,font="algerian 18")
correct.place(y=499,x=200,width=350)
root.mainloop()
| 33.765432
| 240
| 0.611883
|
4a17156e702222e2b040beb925242184f29479c2
| 4,000
|
py
|
Python
|
isi_sdk_8_0/isi_sdk_8_0/models/group_members.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0/isi_sdk_8_0/models/group_members.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0/isi_sdk_8_0/models/group_members.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0.models.group_member import GroupMember # noqa: F401,E501
class GroupMembers(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'members': 'list[GroupMember]',
'resume': 'str'
}
attribute_map = {
'members': 'members',
'resume': 'resume'
}
def __init__(self, members=None, resume=None): # noqa: E501
"""GroupMembers - a model defined in Swagger""" # noqa: E501
self._members = None
self._resume = None
self.discriminator = None
if members is not None:
self.members = members
if resume is not None:
self.resume = resume
@property
def members(self):
"""Gets the members of this GroupMembers. # noqa: E501
:return: The members of this GroupMembers. # noqa: E501
:rtype: list[GroupMember]
"""
return self._members
@members.setter
def members(self, members):
"""Sets the members of this GroupMembers.
:param members: The members of this GroupMembers. # noqa: E501
:type: list[GroupMember]
"""
self._members = members
@property
def resume(self):
"""Gets the resume of this GroupMembers. # noqa: E501
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:return: The resume of this GroupMembers. # noqa: E501
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""Sets the resume of this GroupMembers.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:param resume: The resume of this GroupMembers. # noqa: E501
:type: str
"""
self._resume = resume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroupMembers):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.972028
| 170
| 0.577
|
4a1715bf99338632aa84970ec950cca1d227ec21
| 5,542
|
py
|
Python
|
test/functional/rpc_txoutproof.py
|
jardelfrank42/paymecoin
|
36e3ce6d64839a37c45b6e17aedfb2238c3a5257
|
[
"MIT"
] | null | null | null |
test/functional/rpc_txoutproof.py
|
jardelfrank42/paymecoin
|
36e3ce6d64839a37c45b6e17aedfb2238c3a5257
|
[
"MIT"
] | null | null | null |
test/functional/rpc_txoutproof.py
|
jardelfrank42/paymecoin
|
36e3ce6d64839a37c45b6e17aedfb2238c3a5257
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.messages import CMerkleBlock, FromHex, ToHex
from test_framework.test_framework import IsocoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes
class MerkleBlockTest(IsocoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing
self.extra_args = [[], [], [], ["-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransactionwithwallet(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransactionwithwallet(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransactionwithwallet(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
# Now we'll try tweaking a proof.
proof = self.nodes[3].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = FromHex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[2].verifytxoutproof(ToHex(tweaked_proof))
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False]*7
for n in self.nodes:
assert not n.verifytxoutproof(ToHex(tweaked_proof))
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid
if __name__ == '__main__':
MerkleBlockTest().main()
| 50.844037
| 142
| 0.690184
|
4a17169b6d1d2a8deea1324f0d3da3d10ed64ef9
| 285
|
py
|
Python
|
1_beginner/chapter4/practice/money_check.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 4
|
2021-03-01T00:32:45.000Z
|
2021-05-21T22:01:52.000Z
|
1_beginner/chapter4/practice/money_check.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 29
|
2020-09-12T22:56:04.000Z
|
2021-09-25T17:08:42.000Z
|
1_beginner/chapter4/practice/money_check.py
|
code4tomorrow/Python
|
035b6f5d8fd635a16caaff78bcd3f582663dadc3
|
[
"MIT"
] | 7
|
2021-02-25T01:50:55.000Z
|
2022-02-28T00:00:42.000Z
|
# Money Check
# Write a program that asks for a person's
# amount of money (floating point).
# If the person's amount of money is 0,
# print "Bankrupt". If not, print "Not Bankrupt"
# If the person's amount of money is
# greater than 1000.0, then print "Rich".
# Write your code here
| 28.5
| 48
| 0.712281
|
4a1716d467fdd16121dc4c9205d0a21fef20ad07
| 597
|
py
|
Python
|
ig_clone_api/permissions.py
|
whosgriffith/ig-clone-api
|
83b79ed62e21c654d0945decaaf6571e19c8c12a
|
[
"MIT"
] | null | null | null |
ig_clone_api/permissions.py
|
whosgriffith/ig-clone-api
|
83b79ed62e21c654d0945decaaf6571e19c8c12a
|
[
"MIT"
] | null | null | null |
ig_clone_api/permissions.py
|
whosgriffith/ig-clone-api
|
83b79ed62e21c654d0945decaaf6571e19c8c12a
|
[
"MIT"
] | null | null | null |
""" Custom permissions. """
# Django REST Framework
from rest_framework.permissions import BasePermission
class IsObjectOwner(BasePermission):
""" Give permission only to user who created the photo. """
def has_object_permission(self, request, view, obj):
""" Check user and obj are the same. """
return request.user == obj.user
class IsAccountOwner(BasePermission):
""" Give permission only if is the account owner. """
def has_object_permission(self, request, view, obj):
""" Check user and obj are the same. """
return request.user == obj
| 28.428571
| 63
| 0.681742
|
4a17179d345022cdd1f3bd6d3ca4cc0f693d760f
| 31,372
|
py
|
Python
|
mne_bids/tests/test_write.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/tests/test_write.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
mne_bids/tests/test_write.py
|
SophieHerbst/mne-bids
|
0e9b5e261668b90efec28359772f321d999af7d7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test the MNE BIDS converter.
For each supported file format, implement a test.
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Teon L Brooks <teon.brooks@gmail.com>
# Chris Holdgraf <choldgraf@berkeley.edu>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Matt Sanderson <matt.sanderson@mq.edu.au>
#
# License: BSD (3-clause)
import os
import os.path as op
import pytest
from glob import glob
from datetime import datetime
import platform
import shutil as sh
import json
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.utils import (_TempDir, run_subprocess, check_version,
requires_nibabel, requires_version)
from mne.io.constants import FIFF
from mne.io.kit.kit import get_kit_info
from mne_bids import (write_raw_bids, read_raw_bids, make_bids_basename,
make_bids_folders, write_anat)
from mne_bids.tsv_handler import _from_tsv, _to_tsv
from mne_bids.utils import _find_matching_sidecar
from mne_bids.pick import coil_type
base_path = op.join(op.dirname(mne.__file__), 'io')
subject_id = '01'
subject_id2 = '02'
session_id = '01'
run = '01'
acq = '01'
run2 = '02'
task = 'testing'
bids_basename = make_bids_basename(
subject=subject_id, session=session_id, run=run, acquisition=acq,
task=task)
bids_basename_minimal = make_bids_basename(subject=subject_id, task=task)
# WINDOWS issues:
# the bids-validator development version does not work properly on Windows as
# of 2019-06-25 --> https://github.com/bids-standard/bids-validator/issues/790
# As a workaround, we try to get the path to the executable from an environment
# variable VALIDATOR_EXECUTABLE ... if this is not possible we assume to be
# using the stable bids-validator and make a direct call of bids-validator
# also: for windows, shell = True is needed to call npm, bids-validator etc.
# see: https://stackoverflow.com/q/28891053/5201771
@pytest.fixture(scope="session")
def _bids_validate():
"""Fixture to run BIDS validator."""
shell = False
bids_validator_exe = ['bids-validator', '--config.error=41',
'--config.error=41']
if platform.system() == 'Windows':
shell = True
exe = os.getenv('VALIDATOR_EXECUTABLE', 'n/a')
if 'VALIDATOR_EXECUTABLE' != 'n/a':
bids_validator_exe = ['node', exe]
def _validate(output_path):
cmd = bids_validator_exe + [output_path]
run_subprocess(cmd, shell=shell)
return _validate
@requires_version('pybv', '0.2.0')
def test_fif(_bids_validate):
"""Test functionality of the write_raw_bids conversion for fif."""
output_path = _TempDir()
data_path = testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
'Visual/Right': 4, 'Smiley': 5, 'Button': 32}
events_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
raw = mne.io.read_raw_fif(raw_fname)
write_raw_bids(raw, bids_basename, output_path, events_data=events_fname,
event_id=event_id, overwrite=False)
# Read the file back in to check that the data has come through cleanly.
# Events and bad channel information was read through JSON sidecar files.
raw2 = read_raw_bids(bids_basename + '_meg.fif', output_path)
assert set(raw.info['bads']) == set(raw2.info['bads'])
events, _ = mne.events_from_annotations(raw2)
events2 = mne.read_events(events_fname)
events2 = events2[events2[:, 2] != 0]
assert_array_equal(events2[:, 0], events[:, 0])
# check if write_raw_bids works when there is no stim channel
raw.set_channel_types({raw.ch_names[i]: 'misc'
for i in
mne.pick_types(raw.info, stim=True, meg=False)})
output_path = _TempDir()
with pytest.warns(UserWarning, match='No events found or provided.'):
write_raw_bids(raw, bids_basename, output_path, overwrite=False)
_bids_validate(output_path)
# try with eeg data only (conversion to bv)
output_path = _TempDir()
raw = mne.io.read_raw_fif(raw_fname)
raw.load_data()
raw2 = raw.pick_types(meg=False, eeg=True, stim=True, eog=True, ecg=True)
raw2.save(op.join(output_path, 'test.fif'), overwrite=True)
raw2 = mne.io.Raw(op.join(output_path, 'test.fif'), preload=False)
with pytest.warns(UserWarning,
match='Converting data files to BrainVision format'):
write_raw_bids(raw2, bids_basename, output_path,
events_data=events_fname,
event_id=event_id, overwrite=False)
os.remove(op.join(output_path, 'test.fif'))
bids_dir = op.join(output_path, 'sub-%s' % subject_id,
'ses-%s' % session_id, 'eeg')
for sidecar in ['channels.tsv', 'eeg.eeg', 'eeg.json', 'eeg.vhdr',
'eeg.vmrk', 'events.tsv']:
assert op.isfile(op.join(bids_dir, bids_basename + '_' + sidecar))
raw2 = mne.io.read_raw_brainvision(op.join(bids_dir,
bids_basename + '_eeg.vhdr'))
assert_array_almost_equal(raw.get_data(), raw2.get_data())
_bids_validate(output_path)
# write the same data but pretend it is empty room data:
raw = mne.io.read_raw_fif(raw_fname)
er_date = datetime.fromtimestamp(
raw.info['meas_date'][0]).strftime('%Y%m%d')
er_bids_basename = 'sub-emptyroom_ses-{0}_task-noise'.format(str(er_date))
write_raw_bids(raw, er_bids_basename, output_path, overwrite=False)
assert op.exists(op.join(
output_path, 'sub-emptyroom', 'ses-{0}'.format(er_date), 'meg',
'sub-emptyroom_ses-{0}_task-noise_meg.json'.format(er_date)))
# test that an incorrect date raises an error.
er_bids_basename_bad = 'sub-emptyroom_ses-19000101_task-noise'
with pytest.raises(ValueError, match='Date provided'):
write_raw_bids(raw, er_bids_basename_bad, output_path, overwrite=False)
# give the raw object some fake participant data (potentially overwriting)
raw = mne.io.read_raw_fif(raw_fname)
raw.info['subject_info'] = {'his_id': subject_id2,
'birthday': (1993, 1, 26), 'sex': 1}
write_raw_bids(raw, bids_basename, output_path, events_data=events_fname,
event_id=event_id, overwrite=True)
# assert age of participant is correct
participants_tsv = op.join(output_path, 'participants.tsv')
data = _from_tsv(participants_tsv)
assert data['age'][data['participant_id'].index('sub-01')] == '9'
# try and write preloaded data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
with pytest.raises(ValueError, match='preloaded'):
write_raw_bids(raw, bids_basename, output_path,
events_data=events_fname, event_id=event_id,
overwrite=False)
raw = mne.io.read_raw_fif(raw_fname)
raw.anonymize()
data_path2 = _TempDir()
raw_fname2 = op.join(data_path2, 'sample_audvis_raw.fif')
raw.save(raw_fname2)
bids_basename2 = bids_basename.replace(subject_id, subject_id2)
raw = mne.io.read_raw_fif(raw_fname2)
bids_output_path = write_raw_bids(raw, bids_basename2, output_path,
events_data=events_fname,
event_id=event_id, overwrite=False)
# check that the overwrite parameters work correctly for the participant
# data
# change the gender but don't force overwrite.
raw.info['subject_info'] = {'his_id': subject_id2,
'birthday': (1994, 1, 26), 'sex': 2}
with pytest.raises(FileExistsError, match="already exists"): # noqa: F821
write_raw_bids(raw, bids_basename2, output_path,
events_data=events_fname, event_id=event_id,
overwrite=False)
# now force the overwrite
write_raw_bids(raw, bids_basename2, output_path, events_data=events_fname,
event_id=event_id, overwrite=True)
with pytest.raises(ValueError, match='raw_file must be'):
write_raw_bids('blah', bids_basename, output_path)
bids_basename2 = 'sub-01_ses-01_xyz-01_run-01'
with pytest.raises(KeyError, match='Unexpected entity'):
write_raw_bids(raw, bids_basename2, output_path)
bids_basename2 = 'sub-01_run-01_task-auditory'
with pytest.raises(ValueError, match='ordered correctly'):
write_raw_bids(raw, bids_basename2, output_path, overwrite=True)
del raw._filenames
with pytest.raises(ValueError, match='raw.filenames is missing'):
write_raw_bids(raw, bids_basename2, output_path)
_bids_validate(output_path)
assert op.exists(op.join(output_path, 'participants.tsv'))
# asserting that single fif files do not include the part key
files = glob(op.join(bids_output_path, 'sub-' + subject_id2,
'ses-' + subject_id2, 'meg', '*.fif'))
for ii, FILE in enumerate(files):
assert 'part' not in FILE
assert ii < 1
# check that split files have part key
raw = mne.io.read_raw_fif(raw_fname)
data_path3 = _TempDir()
raw_fname3 = op.join(data_path3, 'sample_audvis_raw.fif')
raw.save(raw_fname3, buffer_size_sec=1.0, split_size='10MB',
split_naming='neuromag', overwrite=True)
raw = mne.io.read_raw_fif(raw_fname3)
subject_id3 = '03'
bids_basename3 = bids_basename.replace(subject_id, subject_id3)
bids_output_path = write_raw_bids(raw, bids_basename3, output_path,
overwrite=False)
files = glob(op.join(bids_output_path, 'sub-' + subject_id3,
'ses-' + subject_id3, 'meg', '*.fif'))
for FILE in files:
assert 'part' in FILE
def test_kit(_bids_validate):
"""Test functionality of the write_raw_bids conversion for KIT data."""
output_path = _TempDir()
data_path = op.join(base_path, 'kit', 'tests', 'data')
raw_fname = op.join(data_path, 'test.sqd')
events_fname = op.join(data_path, 'test-eve.txt')
hpi_fname = op.join(data_path, 'test_mrk.sqd')
hpi_pre_fname = op.join(data_path, 'test_mrk_pre.sqd')
hpi_post_fname = op.join(data_path, 'test_mrk_post.sqd')
electrode_fname = op.join(data_path, 'test_elp.txt')
headshape_fname = op.join(data_path, 'test_hsp.txt')
event_id = dict(cond=1)
kit_bids_basename = bids_basename.replace('_acq-01', '')
raw = mne.io.read_raw_kit(
raw_fname, mrk=hpi_fname, elp=electrode_fname,
hsp=headshape_fname)
write_raw_bids(raw, kit_bids_basename, output_path,
events_data=events_fname,
event_id=event_id, overwrite=False)
_bids_validate(output_path)
assert op.exists(op.join(output_path, 'participants.tsv'))
read_raw_bids(kit_bids_basename + '_meg.sqd', output_path)
# ensure the channels file has no STI 014 channel:
channels_tsv = make_bids_basename(
subject=subject_id, session=session_id, task=task, run=run,
suffix='channels.tsv',
prefix=op.join(output_path, 'sub-01', 'ses-01', 'meg'))
data = _from_tsv(channels_tsv)
assert 'STI 014' not in data['name']
# ensure the marker file is produced in the right place
marker_fname = make_bids_basename(
subject=subject_id, session=session_id, task=task, run=run,
suffix='markers.sqd',
prefix=op.join(output_path, 'sub-01', 'ses-01', 'meg'))
assert op.exists(marker_fname)
# test attempts at writing invalid event data
event_data = np.loadtxt(events_fname)
# make the data the wrong number of dimensions
event_data_3d = np.atleast_3d(event_data)
other_output_path = _TempDir()
with pytest.raises(ValueError, match='two dimensions'):
write_raw_bids(raw, bids_basename, other_output_path,
events_data=event_data_3d, event_id=event_id,
overwrite=True)
# remove 3rd column
event_data = event_data[:, :2]
with pytest.raises(ValueError, match='second dimension'):
write_raw_bids(raw, bids_basename, other_output_path,
events_data=event_data, event_id=event_id,
overwrite=True)
# test correct naming of marker files
raw = mne.io.read_raw_kit(
raw_fname, mrk=[hpi_pre_fname, hpi_post_fname], elp=electrode_fname,
hsp=headshape_fname)
write_raw_bids(raw,
kit_bids_basename.replace('sub-01', 'sub-%s' % subject_id2),
output_path, events_data=events_fname, event_id=event_id,
overwrite=False)
_bids_validate(output_path)
# ensure the marker files are renamed correctly
marker_fname = make_bids_basename(
subject=subject_id2, session=session_id, task=task, run=run,
suffix='markers.sqd', acquisition='pre',
prefix=os.path.join(output_path, 'sub-02', 'ses-01', 'meg'))
info = get_kit_info(marker_fname, False)[0]
assert info['meas_date'] == get_kit_info(hpi_pre_fname,
False)[0]['meas_date']
marker_fname = marker_fname.replace('acq-pre', 'acq-post')
info = get_kit_info(marker_fname, False)[0]
assert info['meas_date'] == get_kit_info(hpi_post_fname,
False)[0]['meas_date']
# check that providing markers in the wrong order raises an error
raw = mne.io.read_raw_kit(
raw_fname, mrk=[hpi_post_fname, hpi_pre_fname], elp=electrode_fname,
hsp=headshape_fname)
with pytest.raises(ValueError, match='Markers'):
write_raw_bids(
raw,
kit_bids_basename.replace('sub-01', 'sub-%s' % subject_id2),
output_path, events_data=events_fname, event_id=event_id,
overwrite=True)
def test_ctf(_bids_validate):
"""Test functionality of the write_raw_bids conversion for CTF data."""
output_path = _TempDir()
data_path = op.join(testing.data_path(download=False), 'CTF')
raw_fname = op.join(data_path, 'testdata_ctf.ds')
raw = mne.io.read_raw_ctf(raw_fname)
with pytest.warns(UserWarning, match='No line frequency'):
write_raw_bids(raw, bids_basename, output_path=output_path)
_bids_validate(output_path)
with pytest.warns(UserWarning, match='Did not find any events'):
raw = read_raw_bids(bids_basename + '_meg.ds', output_path)
# test to check that running again with overwrite == False raises an error
with pytest.raises(FileExistsError, match="already exists"): # noqa: F821
write_raw_bids(raw, bids_basename, output_path=output_path)
assert op.exists(op.join(output_path, 'participants.tsv'))
def test_bti(_bids_validate):
"""Test functionality of the write_raw_bids conversion for BTi data."""
output_path = _TempDir()
data_path = op.join(base_path, 'bti', 'tests', 'data')
raw_fname = op.join(data_path, 'test_pdf_linux')
config_fname = op.join(data_path, 'test_config_linux')
headshape_fname = op.join(data_path, 'test_hs_linux')
raw = mne.io.read_raw_bti(raw_fname, config_fname=config_fname,
head_shape_fname=headshape_fname)
write_raw_bids(raw, bids_basename, output_path, verbose=True)
assert op.exists(op.join(output_path, 'participants.tsv'))
_bids_validate(output_path)
raw = read_raw_bids(bids_basename + '_meg', output_path)
# XXX: vhdr test currently passes only on MNE master. Skip until next release.
# see: https://github.com/mne-tools/mne-python/pull/6558
@pytest.mark.skipif(LooseVersion(mne.__version__) < LooseVersion('0.19'),
reason="requires mne 0.19.dev0 or higher")
def test_vhdr(_bids_validate):
"""Test write_raw_bids conversion for BrainVision data."""
output_path = _TempDir()
data_path = op.join(base_path, 'brainvision', 'tests', 'data')
raw_fname = op.join(data_path, 'test.vhdr')
raw = mne.io.read_raw_brainvision(raw_fname)
# inject a bad channel
assert not raw.info['bads']
injected_bad = ['FP1']
raw.info['bads'] = injected_bad
# write with injected bad channels
write_raw_bids(raw, bids_basename_minimal, output_path, overwrite=False)
_bids_validate(output_path)
# read and also get the bad channels
raw = read_raw_bids(bids_basename_minimal + '_eeg.vhdr', output_path)
# Check that injected bad channel shows up in raw after reading
np.testing.assert_array_equal(np.asarray(raw.info['bads']),
np.asarray(injected_bad))
# Test that correct channel units are written ... and that bad channel
# is in channels.tsv
channels_tsv_name = op.join(output_path, 'sub-{}'.format(subject_id),
'eeg', bids_basename_minimal + '_channels.tsv')
data = _from_tsv(channels_tsv_name)
assert data['units'][data['name'].index('FP1')] == 'µV'
assert data['units'][data['name'].index('CP5')] == 'n/a'
assert data['status'][data['name'].index(injected_bad[0])] == 'bad'
# check events.tsv is written
events_tsv_fname = channels_tsv_name.replace('channels', 'events')
assert op.exists(events_tsv_fname)
# create another bids folder with the overwrite command and check
# no files are in the folder
data_path = make_bids_folders(subject=subject_id, kind='eeg',
output_path=output_path, overwrite=True)
assert len([f for f in os.listdir(data_path) if op.isfile(f)]) == 0
# Also cover iEEG
# We use the same data and pretend that eeg channels are ecog
raw = mne.io.read_raw_brainvision(raw_fname)
raw.set_channel_types({raw.ch_names[i]: 'ecog'
for i in mne.pick_types(raw.info, eeg=True)})
output_path = _TempDir()
write_raw_bids(raw, bids_basename, output_path, overwrite=False)
_bids_validate(output_path)
def test_edf(_bids_validate):
"""Test write_raw_bids conversion for European Data Format data."""
output_path = _TempDir()
data_path = op.join(testing.data_path(), 'EDF')
raw_fname = op.join(data_path, 'test_reduced.edf')
raw = mne.io.read_raw_edf(raw_fname, preload=True)
# XXX: hack that should be fixed later. Annotation reading is
# broken for this file with preload=False and read_annotations_edf
raw.preload = False
raw.rename_channels({raw.info['ch_names'][0]: 'EOG'})
raw.info['chs'][0]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
raw.rename_channels({raw.info['ch_names'][1]: 'EMG'})
raw.set_channel_types({'EMG': 'emg'})
write_raw_bids(raw, bids_basename, output_path)
# Reading the file back should raise an error, because we renamed channels
# in `raw` and used that information to write a channels.tsv. Yet, we
# saved the unchanged `raw` in the BIDS folder, so channels in the TSV and
# in raw clash
with pytest.raises(RuntimeError, match='Channels do not correspond'):
read_raw_bids(bids_basename + '_eeg.edf', output_path)
bids_fname = bids_basename.replace('run-01', 'run-%s' % run2)
write_raw_bids(raw, bids_fname, output_path, overwrite=True)
_bids_validate(output_path)
# ensure there is an EMG channel in the channels.tsv:
channels_tsv = make_bids_basename(
subject=subject_id, session=session_id, task=task, run=run,
suffix='channels.tsv', acquisition=acq,
prefix=op.join(output_path, 'sub-01', 'ses-01', 'eeg'))
data = _from_tsv(channels_tsv)
assert 'ElectroMyoGram' in data['description']
# check that the scans list contains two scans
scans_tsv = make_bids_basename(
subject=subject_id, session=session_id, suffix='scans.tsv',
prefix=op.join(output_path, 'sub-01', 'ses-01'))
data = _from_tsv(scans_tsv)
assert len(list(data.values())[0]) == 2
# Also cover iEEG
# We use the same data and pretend that eeg channels are ecog
raw.set_channel_types({raw.ch_names[i]: 'ecog'
for i in mne.pick_types(raw.info, eeg=True)})
output_path = _TempDir()
write_raw_bids(raw, bids_basename, output_path)
_bids_validate(output_path)
def test_bdf(_bids_validate):
"""Test write_raw_bids conversion for Biosemi data."""
output_path = _TempDir()
data_path = op.join(base_path, 'edf', 'tests', 'data')
raw_fname = op.join(data_path, 'test.bdf')
raw = mne.io.read_raw_bdf(raw_fname)
with pytest.warns(UserWarning, match='No line frequency found'):
write_raw_bids(raw, bids_basename, output_path, overwrite=False)
_bids_validate(output_path)
# Test also the reading of channel types from channels.tsv
# the first channel in the raw data is not MISC right now
test_ch_idx = 0
assert coil_type(raw.info, test_ch_idx) != 'misc'
# we will change the channel type to MISC and overwrite the channels file
bids_fname = bids_basename + '_eeg.bdf'
channels_fname = _find_matching_sidecar(bids_fname, output_path,
'channels.tsv')
channels_dict = _from_tsv(channels_fname)
channels_dict['type'][test_ch_idx] = 'MISC'
_to_tsv(channels_dict, channels_fname)
# Now read the raw data back from BIDS, with the tampered TSV, to show
# that the channels.tsv truly influences how read_raw_bids sets ch_types
# in the raw data object
raw = read_raw_bids(bids_fname, output_path)
assert coil_type(raw.info, test_ch_idx) == 'misc'
# Test cropped assertion error
raw = mne.io.read_raw_bdf(raw_fname)
raw.crop(0, raw.times[-2])
with pytest.raises(AssertionError, match='cropped'):
write_raw_bids(raw, bids_basename, output_path)
def test_set(_bids_validate):
"""Test write_raw_bids conversion for EEGLAB data."""
# standalone .set file
output_path = _TempDir()
data_path = op.join(testing.data_path(), 'EEGLAB')
# .set with associated .fdt
output_path = _TempDir()
data_path = op.join(testing.data_path(), 'EEGLAB')
raw_fname = op.join(data_path, 'test_raw.set')
raw = mne.io.read_raw_eeglab(raw_fname)
# embedded - test mne-version assertion
tmp_version = mne.__version__
mne.__version__ = '0.16'
with pytest.raises(ValueError, match='Your version of MNE is too old.'):
write_raw_bids(raw, bids_basename, output_path)
mne.__version__ = tmp_version
# proceed with the actual test for EEGLAB data
write_raw_bids(raw, bids_basename, output_path, overwrite=False)
read_raw_bids(bids_basename + '_eeg.set', output_path)
with pytest.raises(FileExistsError, match="already exists"): # noqa: F821
write_raw_bids(raw, bids_basename, output_path=output_path,
overwrite=False)
_bids_validate(output_path)
# check events.tsv is written
# XXX: only from 0.18 onwards because events_from_annotations
# is broken for earlier versions
events_tsv_fname = op.join(output_path, 'sub-' + subject_id,
'ses-' + session_id, 'eeg',
bids_basename + '_events.tsv')
if check_version('mne', '0.18'):
assert op.exists(events_tsv_fname)
# Also cover iEEG
# We use the same data and pretend that eeg channels are ecog
raw.set_channel_types({raw.ch_names[i]: 'ecog'
for i in mne.pick_types(raw.info, eeg=True)})
output_path = _TempDir()
write_raw_bids(raw, bids_basename, output_path)
_bids_validate(output_path)
@requires_nibabel()
def test_write_anat(_bids_validate):
"""Test writing anatomical data."""
# Get the MNE testing sample data
import nibabel as nib
output_path = _TempDir()
data_path = testing.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
'Visual/Right': 4, 'Smiley': 5, 'Button': 32}
events_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
raw = mne.io.read_raw_fif(raw_fname)
write_raw_bids(raw, bids_basename, output_path, events_data=events_fname,
event_id=event_id, overwrite=False)
# Write some MRI data and supply a `trans`
trans_fname = raw_fname.replace('_raw.fif', '-trans.fif')
trans = mne.read_trans(trans_fname)
# Get the T1 weighted MRI data file
# Needs to be converted to Nifti because we only have mgh in our test base
t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq,
raw=raw, trans=trans, deface=True, verbose=True,
overwrite=True)
_bids_validate(output_path)
# Validate that files are as expected
t1w_json_path = op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.json')
assert op.exists(t1w_json_path)
assert op.exists(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz'))
with open(t1w_json_path, 'r') as f:
t1w_json = json.load(f)
print(t1w_json)
# We only should have AnatomicalLandmarkCoordinates as key
np.testing.assert_array_equal(list(t1w_json.keys()),
['AnatomicalLandmarkCoordinates'])
# And within AnatomicalLandmarkCoordinates only LPA, NAS, RPA in that order
anat_dict = t1w_json['AnatomicalLandmarkCoordinates']
point_list = ['LPA', 'NAS', 'RPA']
np.testing.assert_array_equal(list(anat_dict.keys()),
point_list)
# test the actual values of the voxels (no floating points)
for i, point in enumerate([(66, 51, 46), (41, 32, 74), (17, 53, 47)]):
coords = anat_dict[point_list[i]]
np.testing.assert_array_equal(np.asarray(coords, dtype=int),
point)
# BONUS: test also that we can find the matching sidecar
side_fname = _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz',
output_path, 'T1w.json')
assert op.split(side_fname)[-1] == 'sub-01_ses-01_acq-01_T1w.json'
# Now try some anat writing that will fail
# We already have some MRI data there
with pytest.raises(IOError, match='`overwrite` is set to False'):
write_anat(output_path, subject_id, t1w_mgh, session_id, acq,
raw=raw, trans=trans, verbose=True, deface=False,
overwrite=False)
# pass some invalid type as T1 MRI
with pytest.raises(ValueError, match='must be a path to a T1 weighted'):
write_anat(output_path, subject_id, 9999999999999, session_id, raw=raw,
trans=trans, verbose=True, deface=False, overwrite=True)
# Return without writing sidecar
sh.rmtree(anat_dir)
write_anat(output_path, subject_id, t1w_mgh, session_id)
# Assert that we truly cannot find a sidecar
with pytest.raises(RuntimeError, match='Did not find any'):
_find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz',
output_path, 'T1w.json')
# trans has a wrong type
wrong_type = 1
match = 'transform type {} not known, must be'.format(type(wrong_type))
with pytest.raises(ValueError, match=match):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=wrong_type, verbose=True, deface=False,
overwrite=True)
# trans is a str, but file does not exist
wrong_fname = 'not_a_trans'
match = 'trans file "{}" not found'.format(wrong_fname)
with pytest.raises(IOError, match=match):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=wrong_fname, verbose=True, overwrite=True)
# However, reading trans if it is a string pointing to trans is fine
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=trans_fname, verbose=True, deface=False,
overwrite=True)
# Writing without a session does NOT yield "ses-None" anywhere
anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, None)
assert 'ses-None' not in anat_dir2
assert op.exists(op.join(anat_dir2, 'sub-01_T1w.nii.gz'))
# specify trans but not raw
with pytest.raises(ValueError, match='must be specified if `trans`'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=None,
trans=trans, verbose=True, deface=False, overwrite=True)
# test deface
anat_dir = write_anat(output_path, subject_id, t1w_mgh,
session_id, raw=raw, trans=trans_fname,
verbose=True, deface=True, overwrite=True)
t1w = nib.load(op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz'))
vox_sum = t1w.get_data().sum()
anat_dir2 = write_anat(output_path, subject_id, t1w_mgh,
session_id, raw=raw, trans=trans_fname,
verbose=True, deface=dict(inset=25.),
overwrite=True)
t1w2 = nib.load(op.join(anat_dir2, 'sub-01_ses-01_T1w.nii.gz'))
vox_sum2 = t1w2.get_data().sum()
assert vox_sum > vox_sum2
anat_dir3 = write_anat(output_path, subject_id, t1w_mgh,
session_id, raw=raw, trans=trans_fname,
verbose=True, deface=dict(theta=25),
overwrite=True)
t1w3 = nib.load(op.join(anat_dir3, 'sub-01_ses-01_T1w.nii.gz'))
vox_sum3 = t1w3.get_data().sum()
assert vox_sum > vox_sum3
with pytest.raises(ValueError,
match='The raw object, trans and raw must be provided'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=None, verbose=True, deface=True,
overwrite=True)
with pytest.raises(ValueError, match='inset must be numeric'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=trans, verbose=True, deface=dict(inset='small'),
overwrite=True)
with pytest.raises(ValueError, match='inset should be positive'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=trans, verbose=True, deface=dict(inset=-2.),
overwrite=True)
with pytest.raises(ValueError, match='theta must be numeric'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=trans, verbose=True, deface=dict(theta='big'),
overwrite=True)
with pytest.raises(ValueError,
match='theta should be between 0 and 90 degrees'):
write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw,
trans=trans, verbose=True, deface=dict(theta=100),
overwrite=True)
| 43.152682
| 79
| 0.663235
|
4a1717a584c92aa10a5f44c04e43c4ef343e54e4
| 12,212
|
py
|
Python
|
anyex/async/gateio.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
anyex/async/gateio.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
anyex/async/gateio.py
|
ttwishing/anyex
|
cfd1f2f04ab992b790add4843aafff91e5773cbf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.async.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from anyex.base.errors import ExchangeError
class gateio (Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': 'CN',
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'withdraw': True,
'createDepositAddress': True,
'fetchDepositAddress': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'api': {
'public': 'https://data.gate.io/api',
'private': 'https://data.gate.io/api',
},
'www': 'https://gate.io/',
'doc': 'https://gate.io/api2',
'fees': [
'https://gate.io/fee',
'https://support.gate.io/hc/en-us/articles/115003577673',
],
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
})
async def fetch_markets(self):
response = await self.publicGetMarketinfo()
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
base, quote = id.split('_')
base = base.upper()
quote = quote.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': details['decimal_places'],
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': None,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = float(ticker['last'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high24hr']),
'low': float(ticker['low24hr']),
'bid': float(ticker['highestBid']),
'bidVolume': None,
'ask': float(ticker['lowestAsk']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': float(ticker['percentChange']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['quoteVolume']),
'quoteVolume': float(ticker['baseVolume']),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
# exchange reports local time(UTC+8)
timestamp = self.parse8601(trade['date']) - 8 * 60 * 60 * 1000
return {
'id': trade['tradeID'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': float(trade['rate']),
'amount': self.safe_float(trade, 'amount'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['orderNumber'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'orderNumber': id})
async def query_deposit_address(self, method, currency, params={}):
method = 'privatePost' + method + 'Address'
response = await getattr(self, method)(self.extend({
'currency': currency,
}, params))
address = None
if 'addr' in response:
address = self.safe_string(response, 'addr')
return {
'currency': currency,
'address': address,
'status': 'ok' if (address is not None) else 'none',
'info': response,
}
async def create_deposit_address(self, currency, params={}):
return await self.query_deposit_address('New', currency, params)
async def fetch_deposit_address(self, currency, params={}):
return await self.query_deposit_address('Deposit', currency, params)
async def withdraw(self, currency, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
response = await self.privatePostWithdraw(self.extend({
'currency': currency.lower(),
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
result = response['result']
message = self.id + ' ' + self.json(response)
if result is None:
raise ExchangeError(message)
if isinstance(result, basestring):
if result != 'true':
raise ExchangeError(message)
elif not result:
raise ExchangeError(message)
return response
| 37.006061
| 126
| 0.492385
|
4a1717dcd5d2969bbe259c3c79a93f412266db13
| 20
|
py
|
Python
|
Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/caffe/code/config/__init__.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 49
|
2017-11-27T13:17:16.000Z
|
2021-07-21T15:26:39.000Z
|
Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/caffe/code/config/__init__.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 17
|
2017-10-30T08:15:27.000Z
|
2021-05-14T11:12:42.000Z
|
Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/caffe/code/config/__init__.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 2
|
2019-02-12T06:38:34.000Z
|
2020-05-15T13:42:16.000Z
|
# this file is empty
| 20
| 20
| 0.75
|
4a1718d272b2f15ff4e05f3942378cc903886692
| 511
|
py
|
Python
|
run.py
|
subalterngames/mishnahbot
|
6bc1583ece82f2d2817e4eb62894e9de8786dcf3
|
[
"MIT"
] | null | null | null |
run.py
|
subalterngames/mishnahbot
|
6bc1583ece82f2d2817e4eb62894e9de8786dcf3
|
[
"MIT"
] | null | null | null |
run.py
|
subalterngames/mishnahbot
|
6bc1583ece82f2d2817e4eb62894e9de8786dcf3
|
[
"MIT"
] | null | null | null |
import re
import os
from pathlib import Path
from mishnabot.bot import Bot
"""
Run the bot.
Usage:
```
python3 run.py
```
"""
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.realpath(__file__))
secrets = Path(dir_path).joinpath("bot_secrets.txt").read_text()
# Get the bot token.
token = re.search(r"token=(.*)", secrets).group(1)
# Get the Discord channel.
channel = re.search(r"channel=(.*)", secrets).group(1)
bot = Bot(channel=int(channel))
bot.run(token)
| 19.653846
| 68
| 0.655577
|
4a1718d8475d27628e559cb19f7a17bd3546f5b8
| 196
|
py
|
Python
|
PYTHON/Numpy/inner_and_outer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Numpy/inner_and_outer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Numpy/inner_and_outer.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import numpy
n = numpy.array(list(map(int, input().split())))
m = numpy.array(list(map(int, input().split())))
print(numpy.inner(n, m))
print(numpy.outer(n, m))
| 21.777778
| 48
| 0.673469
|
4a1719357d5289f241fdc4c7703eef923820eaf4
| 927
|
py
|
Python
|
catalog/bindings/ows/telephone_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/ows/telephone_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/ows/telephone_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "http://www.opengis.net/ows/2.0"
@dataclass
class TelephoneType:
"""
Telephone numbers for contacting the responsible individual or
organization.
:ivar voice: Telephone number by which individuals can speak to the
responsible organization or individual.
:ivar facsimile: Telephone number of a facsimile machine for the
responsible organization or individual.
"""
voice: List[str] = field(
default_factory=list,
metadata={
"name": "Voice",
"type": "Element",
"namespace": "http://www.opengis.net/ows/2.0",
},
)
facsimile: List[str] = field(
default_factory=list,
metadata={
"name": "Facsimile",
"type": "Element",
"namespace": "http://www.opengis.net/ows/2.0",
},
)
| 26.485714
| 71
| 0.604099
|
4a17199423af205d214efc035a8b73900ae90d4a
| 5,995
|
py
|
Python
|
PROCESSOR/MachineLearning.py
|
maxwells8/Heptet
|
21cb272cb2eead72e85724a8bbcbb81a3c7206d6
|
[
"MIT"
] | 1
|
2020-04-29T11:45:29.000Z
|
2020-04-29T11:45:29.000Z
|
PROCESSOR/MachineLearning.py
|
maxwells8/Heptet
|
21cb272cb2eead72e85724a8bbcbb81a3c7206d6
|
[
"MIT"
] | null | null | null |
PROCESSOR/MachineLearning.py
|
maxwells8/Heptet
|
21cb272cb2eead72e85724a8bbcbb81a3c7206d6
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
from MAIN.Basics import Processor, Space
from operator import itemgetter
class StateSpace(Processor, Space):
def __init__(self, agent):
self.agent = agent
super().__init__(agent.config['StateSpaceState'])
def process(self):
self.agent.data['NETWORK_STATE'] = self._get_network_input()
self.agent.data['ENGINE_STATE' ] = self._get_engine_input()
def _get_network_input(self):
method = self.agent.config['StateSpaceNetworkSampleType']
state = self.get_random_sample(method)
return state
def _get_engine_input(self):
method = self.agent.config['StateSpaceEngineSampleConversion']
state = self.agent.data['NETWORK_STATE']
state = self.convert(state, method)
return state
class ActionSpace(Processor, Space):
def __init__(self, agent):
self.agent = agent
super().__init__(agent.config['ActionSpaceAction'])
def process(self):
self.agent.data['NETWORK_ACTION'] = self._get_network_input()
self.agent.data['ENGINE_ACTION' ] = self._get_engine_input()
def _get_network_input(self):
method = self.agent.config['ActionSpaceNetworkSampleType']
if method == 'exploration':
self.agent.exploration.process()
action = self.agent.data['EXPLORATION_ACTION']
else:
action = self.get_random_sample(method)
return action
def _get_engine_input(self):
method = self.agent.config['ActionSpaceEngineSampleConversion']
index = self.agent.data['EXPLORATION_ACTION']
action = self.convert(index, method)
return action
class RewardEngine(Processor):
def __init__(self, agent, engine):
self.engine = engine
self.agent = agent
def process(self):
reward, record = self._get_reward()
self.agent.data['ENGINE_REWARD'] = reward
self.agent.data['ENGINE_RECORD'] = record
def _get_reward(self):
state = self.agent.data['ENGINE_STATE']
action = self.agent.data['ENGINE_ACTION']
self.engine.process(**state, **action)
return self.engine.reward, self.engine.record
class Exploration(Processor):
def __init__(self, agent):
self.agent = agent
self.method = agent.config['ExplorationMethod']
self.counter = agent.counters[agent.config['ExplorationCounter']]
self.func = self.get_func(self.method)
if self.method == 'boltzmann':
self.target_attr = getattr(self.agent, self.agent.config['ExplorationBoltzmannProbAttribute'])
def process(self):
self.agent.data['EXPLORATION_ACTION'] = self.func()
def get_func(self, method):
method = '_' + method
return getattr(self, method)
def _random(self):
n_action = self.agent.action_space.n_combination
action_idx = random.randrange(n_action)
return action_idx
def _greedy(self):
self.agent.feed_dict[self.agent.input_layer] = [self.agent.data['NETWORK_STATE']]
q_value = self.agent.session.run(self.agent.output_layer, feed_dict=self.agent.feed_dict)
q_value = q_value.reshape(-1,)
action_idx = np.argmax(q_value)
return action_idx
def _e_greedy(self):
e = self.counter.value
action_idx = self._random() if random.random() < e else self._greedy()
self.counter.step()
return action_idx
def _boltzmann(self):
self.agent.data['BOLTZMANN_TEMP'] = self.counter.value
self.agent.feed_dict[self.agent.input_layer] = [self.agent.data['NETWORK_STATE']]
self.agent.feed_dict[self.agent.temp ] = [self.agent.data['BOLTZMANN_TEMP']]
prob = self.agent.session.run(self.target_attr, feed_dict=self.agent.feed_dict)
action_idx = np.random.choice(self.agent.action_space.n_combination, p=prob)
self.counter.step()
return action_idx
class ExperienceBuffer(Processor):
def __init__(self, agent):
buffer_size = int(agent.config['ExperienceBufferBufferSize'])
self.agent = agent
self.buffer = []
self.buffer_size = buffer_size
def process(self, method):
if method == 'add':
self._add_sample(self.agent.data['SAMPLE'])
elif method == 'get':
self.agent.data['EXPERIENCE_BUFFER_SAMPLE'] = self._get_sample()
else:
raise ValueError("Error: method name should be add/get.")
def _add_sample(self, sample):
sample_length = len(sample)
buffer_length = len(self.buffer)
is_single_sample = True if sample_length == 1 else False
if is_single_sample is True:
total_length = buffer_length
elif is_single_sample is False:
total_length = buffer_length + sample_length
else:
raise ValueError("Error: Boolean value required for input is_single_sample.")
if total_length > buffer_length:
idx_start = total_length - buffer_length
self.buffer = self.buffer[idx_start:]
self.buffer.extend(sample)
else:
self.buffer.extend(sample)
def _get_sample(self):
size = int(self.agent.config['ExperienceBufferSamplingSize'])
sample = itemgetter(*np.random.randint(len(self.buffer), size=size))(self.buffer)
return sample
class Recorder(Processor):
def __init__(self, agent):
self.data_field = agent.config['RecorderDataField']
self.record_freq = agent.config['RecorderRecordFreq']
self.agent = agent
if self.data_field is not None:
self.record = {key: [] for key in self.data_field}
def process(self):
if self.data_field is not None:
if (self.agent.epoch_counter.n_step % self.record_freq) == 0:
for key in self.record.keys():
self.record[key].append(self.agent.data[key])
| 34.653179
| 106
| 0.648374
|
4a171ad99f544865878ab6f9f8a7cd290ce4c422
| 18,916
|
py
|
Python
|
tools/utils.py
|
ganadist/r8
|
850b5a4725954b677103a3a575239d0f330c0b0f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tools/utils.py
|
ganadist/r8
|
850b5a4725954b677103a3a575239d0f330c0b0f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tools/utils.py
|
ganadist/r8
|
850b5a4725954b677103a3a575239d0f330c0b0f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Different utility functions used accross scripts
import hashlib
import json
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile
import defines
import jdk
ANDROID_JAR_DIR = 'third_party/android_jar/lib-v{api}'
ANDROID_JAR = os.path.join(ANDROID_JAR_DIR, 'android.jar')
TOOLS_DIR = defines.TOOLS_DIR
REPO_ROOT = defines.REPO_ROOT
THIRD_PARTY = defines.THIRD_PARTY
ANDROID_SDK = os.path.join(THIRD_PARTY, 'android_sdk')
MEMORY_USE_TMP_FILE = 'memory_use.tmp'
DEX_SEGMENTS_RESULT_PATTERN = re.compile('- ([^:]+): ([0-9]+)')
BUILD = os.path.join(REPO_ROOT, 'build')
BUILD_DEPS_DIR = os.path.join(BUILD, 'deps')
BUILD_MAIN_DIR = os.path.join(BUILD, 'classes', 'main')
BUILD_TEST_DIR = os.path.join(BUILD, 'classes', 'test')
LIBS = os.path.join(BUILD, 'libs')
GENERATED_LICENSE_DIR = os.path.join(BUILD, 'generatedLicense')
SRC_ROOT = os.path.join(REPO_ROOT, 'src', 'main', 'java')
TEST_ROOT = os.path.join(REPO_ROOT, 'src', 'test', 'java')
REPO_SOURCE = 'https://r8.googlesource.com/r8'
D8 = 'd8'
R8 = 'r8'
R8LIB = 'r8lib'
R8LIB_NO_DEPS = 'r8LibNoDeps'
R8_SRC = 'sourceJar'
LIBRARY_DESUGAR_CONVERSIONS = 'buildLibraryDesugarConversions'
D8_JAR = os.path.join(LIBS, 'd8.jar')
R8_JAR = os.path.join(LIBS, 'r8.jar')
R8LIB_JAR = os.path.join(LIBS, 'r8lib.jar')
R8LIB_MAP = os.path.join(LIBS, 'r8lib.jar.map')
R8_SRC_JAR = os.path.join(LIBS, 'r8-src.jar')
R8LIB_EXCLUDE_DEPS_JAR = os.path.join(LIBS, 'r8lib-exclude-deps.jar')
R8_FULL_EXCLUDE_DEPS_JAR = os.path.join(LIBS, 'r8-full-exclude-deps.jar')
MAVEN_ZIP = os.path.join(LIBS, 'r8.zip')
MAVEN_ZIP_LIB = os.path.join(LIBS, 'r8lib.zip')
LIBRARY_DESUGAR_CONVERSIONS_ZIP = os.path.join(LIBS, 'library_desugar_conversions.zip')
DESUGAR_CONFIGURATION = os.path.join(
'src', 'library_desugar', 'desugar_jdk_libs.json')
DESUGAR_CONFIGURATION_MAVEN_ZIP = os.path.join(
LIBS, 'desugar_jdk_libs_configuration.zip')
GENERATED_LICENSE = os.path.join(GENERATED_LICENSE_DIR, 'LICENSE')
RT_JAR = os.path.join(REPO_ROOT, 'third_party/openjdk/openjdk-rt-1.8/rt.jar')
R8LIB_KEEP_RULES = os.path.join(REPO_ROOT, 'src/main/keep.txt')
CF_SEGMENTS_TOOL = os.path.join(THIRD_PARTY, 'cf_segments')
PINNED_R8_JAR = os.path.join(REPO_ROOT, 'third_party/r8/r8.jar')
PINNED_PGR8_JAR = os.path.join(REPO_ROOT, 'third_party/r8/r8-pg6.0.1.jar')
SAMPLE_LIBRARIES_SHA_FILE = os.path.join(
THIRD_PARTY, 'sample_libraries.tar.gz.sha1')
OPENSOURCE_APPS_SHA_FILE = os.path.join(
THIRD_PARTY, 'opensource_apps.tar.gz.sha1')
OPENSOURCE_APPS_FOLDER = os.path.join(THIRD_PARTY, 'opensource_apps')
BAZEL_SHA_FILE = os.path.join(THIRD_PARTY, 'bazel.tar.gz.sha1')
BAZEL_TOOL = os.path.join(THIRD_PARTY, 'bazel')
JAVA8_SHA_FILE = os.path.join(THIRD_PARTY, 'openjdk', 'jdk8', 'linux-x86.tar.gz.sha1')
ANDROID_HOME_ENVIROMENT_NAME = "ANDROID_HOME"
ANDROID_TOOLS_VERSION_ENVIRONMENT_NAME = "ANDROID_TOOLS_VERSION"
USER_HOME = os.path.expanduser('~')
R8_TEST_RESULTS_BUCKET = 'r8-test-results'
def archive_file(name, gs_dir, src_file):
gs_file = '%s/%s' % (gs_dir, name)
upload_file_to_cloud_storage(src_file, gs_file, public_read=False)
def archive_value(name, gs_dir, value):
with TempDir() as temp:
tempfile = os.path.join(temp, name);
with open(tempfile, 'w') as f:
f.write(str(value))
archive_file(name, gs_dir, tempfile)
def getAndroidHome():
return os.environ.get(
ANDROID_HOME_ENVIROMENT_NAME, os.path.join(USER_HOME, 'Android', 'Sdk'))
def getAndroidBuildTools():
version = os.environ.get(ANDROID_TOOLS_VERSION_ENVIRONMENT_NAME, '28.0.3')
return os.path.join(getAndroidHome(), 'build-tools', version)
def Print(s, quiet=False):
if quiet:
return
print(s)
def Warn(message):
CRED = '\033[91m'
CEND = '\033[0m'
print(CRED + message + CEND)
def PrintCmd(cmd, env=None, quiet=False):
if quiet:
return
if type(cmd) is list:
cmd = ' '.join(cmd)
if env:
env = ' '.join(['{}=\"{}\"'.format(x, y) for x, y in env.iteritems()])
print('Running: {} {}'.format(env, cmd))
else:
print('Running: {}'.format(cmd))
# I know this will hit os on windows eventually if we don't do this.
sys.stdout.flush()
class ProgressLogger(object):
CLEAR_LINE = '\033[K'
UP = '\033[F'
def __init__(self, quiet=False):
self._count = 0
self._has_printed = False
self._quiet = quiet
def log(self, text):
if self._quiet:
if self._has_printed:
sys.stdout.write(ProgressLogger.UP + ProgressLogger.CLEAR_LINE)
if len(text) > 140:
text = text[0:140] + '...'
print(text)
self._has_printed = True
def done(self):
if self._quiet and self._has_printed:
sys.stdout.write(ProgressLogger.UP + ProgressLogger.CLEAR_LINE)
print('')
sys.stdout.write(ProgressLogger.UP)
def RunCmd(cmd, env_vars=None, quiet=False, fail=True, logging=True):
PrintCmd(cmd, env=env_vars, quiet=quiet)
env = os.environ.copy()
if env_vars:
env.update(env_vars)
process = subprocess.Popen(
cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
logger = ProgressLogger(quiet=quiet) if logging else None
failed = False
while True:
line = process.stdout.readline()
if line != b'':
stripped = line.rstrip()
stdout.append(stripped)
if logger:
logger.log(stripped)
# TODO(christofferqa): r8 should fail with non-zero exit code.
if ('AssertionError:' in stripped
or 'CompilationError:' in stripped
or 'CompilationFailedException:' in stripped
or 'Compilation failed' in stripped
or 'FAILURE:' in stripped
or 'org.gradle.api.ProjectConfigurationException' in stripped
or 'BUILD FAILED' in stripped):
failed = True
else:
if logger:
logger.done()
exit_code = process.poll()
if exit_code or failed:
for line in stdout:
Warn(line)
if fail:
raise subprocess.CalledProcessError(
exit_code or -1, cmd, output='\n'.join(stdout))
return stdout
def RunGradlew(
args, clean=True, stacktrace=True, use_daemon=False, env_vars=None,
quiet=False, fail=True, logging=True):
cmd = ['./gradlew']
if clean:
assert 'clean' not in args
cmd.append('clean')
if stacktrace:
assert '--stacktrace' not in args
cmd.append('--stacktrace')
if not use_daemon:
assert '--no-daemon' not in args
cmd.append('--no-daemon')
cmd.extend(args)
return RunCmd(cmd, env_vars=env_vars, quiet=quiet, fail=fail, logging=logging)
def IsWindows():
return defines.IsWindows()
def IsLinux():
return defines.IsLinux()
def IsOsX():
return defines.IsOsX()
def EnsureDepFromGoogleCloudStorage(dep, tgz, sha1, msg):
if not os.path.exists(dep) or os.path.getmtime(tgz) < os.path.getmtime(sha1):
DownloadFromGoogleCloudStorage(sha1)
# Update the mtime of the tar file to make sure we do not run again unless
# there is an update.
os.utime(tgz, None)
else:
print 'Ensure cloud dependency:', msg, 'present'
def DownloadFromX20(sha1_file):
download_script = os.path.join(REPO_ROOT, 'tools', 'download_from_x20.py')
cmd = [download_script, sha1_file]
PrintCmd(cmd)
subprocess.check_call(cmd)
def DownloadFromGoogleCloudStorage(sha1_file, bucket='r8-deps', auth=False):
suffix = '.bat' if IsWindows() else ''
download_script = 'download_from_google_storage%s' % suffix
cmd = [download_script]
if not auth:
cmd.append('-n')
cmd.extend(['-b', bucket, '-u', '-s', sha1_file])
PrintCmd(cmd)
subprocess.check_call(cmd)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def is_master():
remotes = subprocess.check_output(['git', 'branch', '-r', '--contains',
'HEAD'])
return 'origin/master' in remotes
def get_HEAD_sha1():
return get_HEAD_sha1_for_checkout(REPO_ROOT)
def get_HEAD_sha1_for_checkout(checkout):
cmd = ['git', 'rev-parse', 'HEAD']
PrintCmd(cmd)
with ChangedWorkingDirectory(checkout):
return subprocess.check_output(cmd).strip()
def makedirs_if_needed(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def upload_dir_to_cloud_storage(directory, destination, is_html=False, public_read=True):
# Upload and make the content encoding right for viewing directly
cmd = ['gsutil.py', '-m', 'cp']
if is_html:
cmd += ['-z', 'html']
if public_read:
cmd += ['-a', 'public-read']
cmd += ['-R', directory, destination]
PrintCmd(cmd)
subprocess.check_call(cmd)
def upload_file_to_cloud_storage(source, destination, public_read=True):
cmd = ['gsutil.py', 'cp']
if public_read:
cmd += ['-a', 'public-read']
cmd += [source, destination]
PrintCmd(cmd)
subprocess.check_call(cmd)
def delete_file_from_cloud_storage(destination):
cmd = ['gsutil.py', 'rm', destination]
PrintCmd(cmd)
subprocess.check_call(cmd)
def ls_files_on_cloud_storage(destination):
cmd = ['gsutil.py', 'ls', destination]
PrintCmd(cmd)
return subprocess.check_output(cmd)
def cat_file_on_cloud_storage(destination, ignore_errors=False):
cmd = ['gsutil.py', 'cat', destination]
PrintCmd(cmd)
try:
return subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
if ignore_errors:
return ''
else:
raise e
def file_exists_on_cloud_storage(destination):
cmd = ['gsutil.py', 'ls', destination]
PrintCmd(cmd)
return subprocess.call(cmd) == 0
def download_file_from_cloud_storage(source, destination):
cmd = ['gsutil.py', 'cp', source, destination]
PrintCmd(cmd)
subprocess.check_call(cmd)
def create_archive(name, sources=None):
if not sources:
sources = [name]
tarname = '%s.tar.gz' % name
with tarfile.open(tarname, 'w:gz') as tar:
for source in sources:
tar.add(source)
return tarname
def extract_dir(filename):
return filename[0:len(filename) - len('.tar.gz')]
def unpack_archive(filename):
dest_dir = extract_dir(filename)
if os.path.exists(dest_dir):
print 'Deleting existing dir %s' % dest_dir
shutil.rmtree(dest_dir)
dirname = os.path.dirname(os.path.abspath(filename))
with tarfile.open(filename, 'r:gz') as tar:
tar.extractall(path=dirname)
def check_prodacces():
subprocess.check_call(['prodaccess'])
# Note that gcs is eventually consistent with regards to list operations.
# This is not a problem in our case, but don't ever use this method
# for synchronization.
def cloud_storage_exists(destination):
cmd = ['gsutil.py', 'ls', destination]
PrintCmd(cmd)
exit_code = subprocess.call(cmd)
return exit_code == 0
class TempDir(object):
def __init__(self, prefix='', delete=True):
self._temp_dir = None
self._prefix = prefix
self._delete = delete
def __enter__(self):
self._temp_dir = tempfile.mkdtemp(self._prefix)
return self._temp_dir
def __exit__(self, *_):
if self._delete:
shutil.rmtree(self._temp_dir, ignore_errors=True)
class ChangedWorkingDirectory(object):
def __init__(self, working_directory, quiet=False):
self._quiet = quiet
self._working_directory = working_directory
def __enter__(self):
self._old_cwd = os.getcwd()
if not self._quiet:
print 'Enter directory:', self._working_directory
os.chdir(self._working_directory)
def __exit__(self, *_):
if not self._quiet:
print 'Enter directory:', self._old_cwd
os.chdir(self._old_cwd)
# Reading Android CTS test_result.xml
class CtsModule(object):
def __init__(self, module_name):
self.name = module_name
class CtsTestCase(object):
def __init__(self, test_case_name):
self.name = test_case_name
class CtsTest(object):
def __init__(self, test_name, outcome):
self.name = test_name
self.outcome = outcome
# Generator yielding CtsModule, CtsTestCase or CtsTest from
# reading through a CTS test_result.xml file.
def read_cts_test_result(file_xml):
re_module = re.compile('<Module name="([^"]*)"')
re_test_case = re.compile('<TestCase name="([^"]*)"')
re_test = re.compile('<Test result="(pass|fail)" name="([^"]*)"')
with open(file_xml) as f:
for line in f:
m = re_module.search(line)
if m:
yield CtsModule(m.groups()[0])
continue
m = re_test_case.search(line)
if m:
yield CtsTestCase(m.groups()[0])
continue
m = re_test.search(line)
if m:
outcome = m.groups()[0]
assert outcome in ['fail', 'pass']
yield CtsTest(m.groups()[1], outcome == 'pass')
def grep_memoryuse(logfile):
re_vmhwm = re.compile('^VmHWM:[ \t]*([0-9]+)[ \t]*([a-zA-Z]*)')
result = None
with open(logfile) as f:
for line in f:
m = re_vmhwm.search(line)
if m:
groups = m.groups()
s = len(groups)
if s >= 1:
result = int(groups[0])
if s >= 2:
unit = groups[1]
if unit == 'kB':
result *= 1024
elif unit != '':
raise Exception('Unrecognized unit in memory usage log: {}'
.format(unit))
if result is None:
raise Exception('No memory usage found in log: {}'.format(logfile))
return result
# Return a dictionary: {segment_name -> segments_size}
def getDexSegmentSizes(dex_files):
assert len(dex_files) > 0
cmd = [jdk.GetJavaExecutable(), '-jar', R8_JAR, 'dexsegments']
cmd.extend(dex_files)
PrintCmd(cmd)
output = subprocess.check_output(cmd)
matches = DEX_SEGMENTS_RESULT_PATTERN.findall(output)
if matches is None or len(matches) == 0:
raise Exception('DexSegments failed to return any output for' \
' these files: {}'.format(dex_files))
result = {}
for match in matches:
result[match[0]] = int(match[1])
return result
# Return a dictionary: {segment_name -> segments_size}
def getCfSegmentSizes(cfFile):
cmd = [jdk.GetJavaExecutable(),
'-cp',
CF_SEGMENTS_TOOL,
'com.android.tools.r8.cf_segments.MeasureLib',
cfFile]
PrintCmd(cmd)
output = subprocess.check_output(cmd)
matches = DEX_SEGMENTS_RESULT_PATTERN.findall(output)
if matches is None or len(matches) == 0:
raise Exception('CfSegments failed to return any output for' \
' the file: ' + cfFile)
result = {}
for match in matches:
result[match[0]] = int(match[1])
return result
def get_maven_path(artifact, version):
return os.path.join('com', 'android', 'tools', artifact, version)
def print_cfsegments(prefix, cf_files):
for cf_file in cf_files:
for segment_name, size in getCfSegmentSizes(cf_file).items():
print('{}-{}(CodeSize): {}'
.format(prefix, segment_name, size))
def print_dexsegments(prefix, dex_files):
for segment_name, size in getDexSegmentSizes(dex_files).items():
print('{}-{}(CodeSize): {}'
.format(prefix, segment_name, size))
# Ensure that we are not benchmarking with a google jvm.
def check_java_version():
cmd= [jdk.GetJavaExecutable(), '-version']
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT)
m = re.search('openjdk version "([^"]*)"', output)
if m is None:
raise Exception("Can't check java version: no version string in output"
" of 'java -version': '{}'".format(output))
version = m.groups(0)[0]
m = re.search('google', version)
if m is not None:
raise Exception("Do not use google JVM for benchmarking: " + version)
def get_android_jar_dir(api):
return os.path.join(REPO_ROOT, ANDROID_JAR_DIR.format(api=api))
def get_android_jar(api):
return os.path.join(REPO_ROOT, ANDROID_JAR.format(api=api))
def get_android_optional_jars(api):
android_optional_jars_dir = os.path.join(get_android_jar_dir(api), 'optional')
android_optional_jars = [
os.path.join(android_optional_jars_dir, 'android.test.base.jar'),
os.path.join(android_optional_jars_dir, 'android.test.mock.jar'),
os.path.join(android_optional_jars_dir, 'android.test.runner.jar'),
os.path.join(android_optional_jars_dir, 'org.apache.http.legacy.jar')
]
return [
android_optional_jar for android_optional_jar in android_optional_jars
if os.path.isfile(android_optional_jar)]
def is_bot():
return 'SWARMING_BOT_ID' in os.environ
def uncompressed_size(path):
return sum(z.file_size for z in zipfile.ZipFile(path).infolist())
def getR8Version(path):
cmd = [jdk.GetJavaExecutable(), '-cp', path, 'com.android.tools.r8.R8',
'--version']
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT)
# output is of the form 'R8 <version> (with additional info)'
# so we split on '('; clean up tailing spaces; and strip off 'R8 '.
return output.split('(')[0].strip()[3:]
def desugar_configuration_version():
with open(DESUGAR_CONFIGURATION, 'r') as f:
configuration_json = json.loads(f.read())
configuration_format_version = \
configuration_json.get('configuration_format_version')
version = configuration_json.get('version')
if not version:
raise Exception(
'No "version" found in ' + utils.DESUGAR_CONFIGURATION)
check_basic_semver_version(version, 'in ' + DESUGAR_CONFIGURATION)
return version
class SemanticVersion:
def __init__(self, major, minor, patch):
self.major = major
self.minor = minor
self.patch = patch
# Build metadata currently not suppported
def larger_than(self, other):
if self.major > other.major:
return True
if self.major == other.major and self.minor > other.minor:
return True
if self.patch:
return (self.major == other.major
and self.minor == other.minor
and self.patch > other.patch)
else:
return False
# Check that the passed string is formatted as a basic semver version (x.y.z)
# See https://semver.org/.
def check_basic_semver_version(version, error_context = '', components = 3):
regexp = '^'
for x in range(components):
regexp += '([0-9]+)'
if x < components - 1:
regexp += '\\.'
regexp += '$'
reg = re.compile(regexp)
match = reg.match(version)
if not match:
raise Exception("Invalid version '"
+ version
+ "'"
+ (' ' + error_context) if len(error_context) > 0 else '')
if components == 2:
return SemanticVersion(int(match.group(1)), int(match.group(2)), None)
elif components == 3:
return SemanticVersion(
int(match.group(1)), int(match.group(2)), int(match.group(3)))
else:
raise Exception('Argument "components" must be 2 or 3')
| 31.632107
| 89
| 0.685504
|
4a171bc93fbfcc0783209ba764573a80d93fa93b
| 44
|
py
|
Python
|
data_structures/__init__.py
|
noorzeea/python_tools
|
1c83f6161badb4de4eb7d1d2de2ed3a5b8c69b5e
|
[
"MIT"
] | null | null | null |
data_structures/__init__.py
|
noorzeea/python_tools
|
1c83f6161badb4de4eb7d1d2de2ed3a5b8c69b5e
|
[
"MIT"
] | null | null | null |
data_structures/__init__.py
|
noorzeea/python_tools
|
1c83f6161badb4de4eb7d1d2de2ed3a5b8c69b5e
|
[
"MIT"
] | null | null | null |
from .tuples import tupleToList, listToTuple
| 44
| 44
| 0.863636
|
4a171be5cb2fc632fade2d22b1eb0abd75a5b0fd
| 2,504
|
py
|
Python
|
Analysis_tools/counter.py
|
miniaoshi/Pele_scripts
|
375a948911a75396ccd47c98de8ef2f73d4f44fa
|
[
"Apache-2.0"
] | 1
|
2021-06-07T19:12:33.000Z
|
2021-06-07T19:12:33.000Z
|
Analysis_tools/counter.py
|
miniaoshi/Pele_scripts
|
375a948911a75396ccd47c98de8ef2f73d4f44fa
|
[
"Apache-2.0"
] | 1
|
2018-09-27T10:03:41.000Z
|
2018-09-27T10:03:41.000Z
|
Analysis_tools/counter.py
|
miniaoshi/Pele_scripts
|
375a948911a75396ccd47c98de8ef2f73d4f44fa
|
[
"Apache-2.0"
] | 1
|
2018-06-07T16:00:31.000Z
|
2018-06-07T16:00:31.000Z
|
#/usr/bin/python2.7
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
import numpy as np
import os
import errno
import argparse
import pandas as pd
import glob
import re
import sys
import interactivePlot as ip
"""
Description: Plot a histogram of how many values in each range of values
For any problem do not hesitate to contact us through the email address written below.
"""
__author__ = "Daniel Soler Viladrich"
__email__ = "daniel.soler@nostrumbiodiscovery.com"
OUTPUT_FOLDER = 'Counts'
def main(criteria, bin, output=OUTPUT_FOLDER, numfolders=False):
reports = ip.find_reports(os.getcwd(), numfolders)
crit_name = get_column_names(reports, criteria)
values = parse_values(reports, criteria, crit_name)
create_data = plot_histogram(values, bin, crit_name)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("crit", type=int, help="Number of column report we want to create the plot with. i.e 6")
parser.add_argument("bin", type=int, help="Bin/Interval of the histogram. i.e 1")
parser.add_argument("--out", "-o", type=str, help="Output Path. i.e: BindingEnergies_apo", default=OUTPUT_FOLDER)
parser.add_argument("--numfolders", "-nm", action="store_true", help="Not to parse non numerical folders")
args = parser.parse_args()
return args.crit, args.bin, args.out, args.numfolders
def parse_values(reports, criteria, crit_name):
"""
Description: Parse the 'reports' and create a sorted array
of size n_structs following the criteria chosen by the user.
"""
INITIAL_DATA = [
(crit_name, [])
]
min_values = pd.DataFrame.from_items(INITIAL_DATA)
#min_values = pd.DataFrame(INITIAL_DATA)
for file in reports:
data = pd.read_csv(file, sep=' ', engine='python')
selected_data = data.iloc[:, [criteria-1]]
min_values = pd.concat([min_values, selected_data])
return min_values
def plot_histogram(values, bin, criteria):
values_list = values[criteria].tolist()
plt.hist(values_list, bin, rwidth=0.98)
plt.title('Histogram')
plt.xlabel(criteria)
plt.ylabel('Counts')
plt.show()
def get_column_names(reports, criteria):
data = pd.read_csv(reports[0], sep=' ', engine='python')
data = list(data)
return data[criteria-1]
if __name__ == '__main__':
crit, bin, out, num = parse_args()
main(crit, bin, out, num)
| 29.116279
| 117
| 0.695288
|
4a171c3d5c01e0a5ea6e77f2295d174ad28d0da0
| 64,598
|
py
|
Python
|
se/se_epub_lint.py
|
loftwah/tools
|
24f5d69d24e4a217da226b7c5f35a84d8fc0eb4a
|
[
"CC0-1.0"
] | null | null | null |
se/se_epub_lint.py
|
loftwah/tools
|
24f5d69d24e4a217da226b7c5f35a84d8fc0eb4a
|
[
"CC0-1.0"
] | null | null | null |
se/se_epub_lint.py
|
loftwah/tools
|
24f5d69d24e4a217da226b7c5f35a84d8fc0eb4a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Contains the LintMessage class and the Lint function, which is broken out of
the SeEpub class for readability and maintainability.
Strictly speaking, the lint() function should be a class member of SeEpub. But
the function is very big and it makes editing easier to put in a separate file.
"""
import filecmp
import glob
import html
import io
import os
import unicodedata
from pathlib import Path
from typing import Dict, List
import importlib_resources
import lxml.cssselect
import lxml.etree as etree
import regex
import roman
from bs4 import BeautifulSoup, NavigableString
import se
import se.easy_xml
import se.formatting
import se.images
class LintMessage:
"""
An object representing an output message for the lint function.
Contains information like message text, severity, and the epub filename that generated the message.
"""
text = ""
filename = ""
message_type = se.MESSAGE_TYPE_WARNING
is_submessage = False
def __init__(self, text: str, message_type=se.MESSAGE_TYPE_WARNING, filename: str = "", is_submessage: bool = False):
self.text = text.strip()
self.filename = filename
self.message_type = message_type
self.is_submessage = is_submessage
def _get_malformed_urls(xhtml: str) -> list:
"""
Helper function used in self.lint()
Get a list of URLs in the epub that do not match SE standards.
INPUTS
xhtml: A string of XHTML to check
OUTPUTS
A list of strings representing any malformed URLs in the XHTML string
"""
messages = []
# Check for non-https URLs
if "http://gutenberg.org" in xhtml or "https://gutenberg.org" in xhtml:
messages.append(LintMessage("gutenberg.org URL missing leading www.", se.MESSAGE_TYPE_ERROR))
if "http://www.gutenberg.org" in xhtml:
messages.append(LintMessage("Non-https gutenberg.org URL.", se.MESSAGE_TYPE_ERROR))
if "http://www.pgdp.net" in xhtml:
messages.append(LintMessage("Non-https pgdp.net URL.", se.MESSAGE_TYPE_ERROR))
if "http://catalog.hathitrust.org" in xhtml:
messages.append(LintMessage("Non-https hathitrust.org URL.", se.MESSAGE_TYPE_ERROR))
if "http://archive.org" in xhtml:
messages.append(LintMessage("Non-https archive.org URL.", se.MESSAGE_TYPE_ERROR))
if "www.archive.org" in xhtml:
messages.append(LintMessage("archive.org URL should not have leading www.", se.MESSAGE_TYPE_ERROR))
if "http://en.wikipedia.org" in xhtml:
messages.append(LintMessage("Non-https en.wikipedia.org URL.", se.MESSAGE_TYPE_ERROR))
# Check for malformed canonical URLs
if regex.search(r"books\.google\.com/books\?id=.+?[&#]", xhtml):
messages.append(LintMessage("Non-canonical Google Books URL. Google Books URLs must look exactly like https://books.google.com/books?id=<BOOK-ID>"))
if "babel.hathitrust.org" in xhtml:
messages.append(LintMessage("Non-canonical HathiTrust URL. HathiTrust URLs must look exactly like https://catalog.hathitrust.org/Record/<BOOK-ID>"))
if ".gutenberg.org/files/" in xhtml:
messages.append(LintMessage("Non-canonical Project Gutenberg URL. Project Gutenberg URLs must look exactly like https://www.gutenberg.org/ebooks/<BOOK-ID>"))
if "archive.org/stream" in xhtml:
messages.append(LintMessage("Non-canonical archive.org URL. Internet Archive URLs must look exactly like https://archive.org/details/<BOOK-ID>"))
return messages
def _get_unused_selectors(self) -> set:
"""
Helper function used in self.lint(); merge directly into lint()?
Get a list of CSS selectors that do not actually select HTML in the epub.
INPUTS
None
OUTPUTS
A list of strings representing CSS selectors that do not actually select HTML in the epub.
"""
try:
with open(self.path / "src" / "epub" / "css" / "local.css", encoding="utf-8") as file:
css = file.read()
except Exception:
raise FileNotFoundError("Couldn't open {}".format(self.path / "src" / "epub" / "css" / "local.css"))
# Remove @supports directives, as the parser can't handle them
css = regex.sub(r"^@supports\(.+?\){(.+?)}\s*}", "\\1}", css, flags=regex.MULTILINE | regex.DOTALL)
# Remove actual content of css selectors
css = regex.sub(r"{[^}]+}", "", css)
# Remove trailing commas
css = regex.sub(r",", "", css)
# Remove comments
css = regex.sub(r"/\*.+?\*/", "", css, flags=regex.DOTALL)
# Remove @ defines
css = regex.sub(r"^@.+", "", css, flags=regex.MULTILINE)
# Construct a dictionary of selectors
selectors = {line for line in css.splitlines() if line != ""}
unused_selectors = set(selectors)
# Get a list of .xhtml files to search
filenames = glob.glob(str(self.path / "src" / "epub" / "text" / "*.xhtml"))
# Now iterate over each CSS selector and see if it's used in any of the files we found
for selector in selectors:
try:
sel = lxml.cssselect.CSSSelector(selector, translator="html", namespaces=se.XHTML_NAMESPACES)
except lxml.cssselect.ExpressionError:
# This gets thrown if we use pseudo-elements, which lxml doesn't support
unused_selectors.remove(selector)
continue
except lxml.cssselect.SelectorSyntaxError as ex:
raise se.InvalidCssException(f"Couldn't parse CSS in or near this line: {selector}\n{ex}")
for filename in filenames:
if not filename.endswith("titlepage.xhtml") and not filename.endswith("imprint.xhtml") and not filename.endswith("uncopyright.xhtml"):
# We have to remove the default namespace declaration from our document, otherwise
# xpath won't find anything at all. See http://stackoverflow.com/questions/297239/why-doesnt-xpath-work-when-processing-an-xhtml-document-with-lxml-in-python
with open(filename, "r", encoding="utf-8") as file:
xhtml = file.read().replace(" xmlns=\"http://www.w3.org/1999/xhtml\"", "")
try:
tree = etree.fromstring(str.encode(xhtml))
except etree.XMLSyntaxError as ex:
raise se.InvalidXhtmlException("Couldn't parse XHTML in file: {}, error: {}".format(filename, str(ex)))
except Exception:
raise se.InvalidXhtmlException(f"Couldn't parse XHTML in file: {filename}")
if tree.xpath(sel.path, namespaces=se.XHTML_NAMESPACES):
unused_selectors.remove(selector)
break
return unused_selectors
def lint(self, metadata_xhtml) -> list:
"""
Check this ebook for some common SE style errors.
INPUTS
None
OUTPUTS
A list of LintMessage objects.
"""
messages = []
has_halftitle = False
has_frontmatter = False
has_cover_source = False
cover_svg_title = ""
titlepage_svg_title = ""
xhtml_css_classes: Dict[str, int] = {}
headings: List[tuple] = []
# Get the ebook language, for later use
language = regex.search(r"<dc:language>([^>]+?)</dc:language>", metadata_xhtml).group(1)
# Check local.css for various items, for later use
abbr_elements: List[str] = []
css = ""
with open(self.path / "src" / "epub" / "css" / "local.css", "r", encoding="utf-8") as file:
css = file.read()
local_css_has_subtitle_style = "span[epub|type~=\"subtitle\"]" in css
abbr_styles = regex.findall(r"abbr\.[a-z]+", css)
matches = regex.findall(r"^h[0-6]\s*,?{?", css, flags=regex.MULTILINE)
if matches:
messages.append(LintMessage("Do not directly select h[0-6] elements, as they are used in template files; use more specific selectors.", se.MESSAGE_TYPE_ERROR, "local.css"))
# Check for presence of ./dist/ folder
if (self.path / "dist").exists():
messages.append(LintMessage("Illegal ./dist/ folder. Do not commit compiled versions of the source.", se.MESSAGE_TYPE_ERROR, "./dist/"))
# Check if there are non-typogrified quotes or em-dashes in metadata descriptions
if regex.search(r"#description\">[^<]+?(['\"]|\-\-)[^<]+?</meta>", metadata_xhtml.replace("\">", "").replace("=\"", "")) is not None:
messages.append(LintMessage("Non-typogrified \", ', or -- detected in metadata long description", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check if there are non-typogrified quotes or em-dashes in the title.
# The open-ended start and end of the regex also catches title-sort
if regex.search(r"title\">[^<]+?(['\"]|\-\-)[^<]+?<", metadata_xhtml) is not None:
messages.append(LintMessage("Non-typogrified \", ', or -- detected in metadata title", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for malformed long description HTML
long_description = regex.findall(r"<meta id=\"long-description\".+?>(.+?)</meta>", metadata_xhtml, flags=regex.DOTALL)
if long_description:
long_description = "<?xml version=\"1.0\"?><html xmlns=\"http://www.w3.org/1999/xhtml\">" + html.unescape(long_description[0]) + "</html>"
try:
etree.parse(io.StringIO(long_description))
except lxml.etree.XMLSyntaxError as ex:
messages.append(LintMessage("Metadata long description is not valid HTML. LXML says: " + str(ex), se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for double spacing
regex_string = fr"[{se.NO_BREAK_SPACE}{se.HAIR_SPACE} ]{{2,}}"
matches = regex.findall(regex_string, metadata_xhtml)
if matches:
messages.append(LintMessage("Double spacing detected in file. Sentences should be single-spaced.", se.MESSAGE_TYPE_ERROR, "content.opf"))
if regex.search(r"<dc:description id=\"description\">[^<]+?(['\"]|\-\-)[^<]+?</dc:description>", metadata_xhtml) is not None:
messages.append(LintMessage("Non-typogrified \", ', or -- detected in metadata dc:description.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for punctuation outside quotes. We don't check single quotes because contractions are too common.
matches = regex.findall(r"[a-zA-Z][”][,.]", metadata_xhtml)
if matches:
messages.append(LintMessage("Comma or period outside of double quote. Generally punctuation should go within single and double quotes.", se.MESSAGE_TYPE_WARNING, "content.opf"))
# Make sure long-description is escaped HTML
if "<meta id=\"long-description\" property=\"se:long-description\" refines=\"#description\">\n\t\t\t<p>" not in metadata_xhtml:
messages.append(LintMessage("Long description must be escaped HTML.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for HTML entities in long-description, but allow &amp;
if regex.search(r"&[a-z]+?;", metadata_xhtml.replace("&amp;", "")):
messages.append(LintMessage("HTML entites detected in metadata. Use Unicode equivalents instead.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for illegal em-dashes in <dc:subject>
if regex.search(r"<dc:subject id=\"[^\"]+?\">[^<]+?—[^<]+?</dc:subject>", metadata_xhtml) is not None:
messages.append(LintMessage("Illegal em-dash detected in dc:subject; use --", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for empty production notes
if "<meta property=\"se:production-notes\">Any special notes about the production of this ebook for future editors/producers? Remove this element if not.</meta>" in metadata_xhtml:
messages.append(LintMessage("Empty production-notes element in metadata.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for illegal VCS URLs
matches = regex.findall(r"<meta property=\"se:url\.vcs\.github\">([^<]+?)</meta>", metadata_xhtml)
if matches:
for match in matches:
if not match.startswith("https://github.com/standardebooks/"):
messages.append(LintMessage(f"Illegal se:url.vcs.github. VCS URLs must begin with https://github.com/standardebooks/: {match}", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for HathiTrust scan URLs instead of actual record URLs
if "babel.hathitrust.org" in metadata_xhtml or "hdl.handle.net" in metadata_xhtml:
messages.append(LintMessage("Use HathiTrust record URLs, not page scan URLs, in metadata, imprint, and colophon. Record URLs look like: https://catalog.hathitrust.org/Record/<RECORD-ID>", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for illegal se:subject tags
matches = regex.findall(r"<meta property=\"se:subject\">([^<]+?)</meta>", metadata_xhtml)
if matches:
for match in matches:
if match not in se.SE_GENRES:
messages.append(LintMessage(f"Illegal se:subject: {match}", se.MESSAGE_TYPE_ERROR, "content.opf"))
else:
messages.append(LintMessage("No se:subject <meta> tag found.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check for CDATA tags
if "<![CDATA[" in metadata_xhtml:
messages.append(LintMessage("<![CDATA[ detected. Run `clean` to canonicalize <![CDATA[ sections.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check that our provided identifier matches the generated identifier
identifier = regex.sub(r"<.+?>", "", regex.findall(r"<dc:identifier id=\"uid\">.+?</dc:identifier>", metadata_xhtml)[0])
if identifier != self.generated_identifier:
messages.append(LintMessage(f"<dc:identifier> does not match expected: {self.generated_identifier}", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check that the GitHub repo URL is as expected
if ("<meta property=\"se:url.vcs.github\">" + self.generated_github_repo_url + "</meta>") not in metadata_xhtml:
messages.append(LintMessage(f"GitHub repo URL does not match expected: {self.generated_github_repo_url}", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Check if se:name.person.full-name matches their titlepage name
matches = regex.findall(r"<meta property=\"se:name\.person\.full-name\" refines=\"#([^\"]+?)\">([^<]*?)</meta>", metadata_xhtml)
duplicate_names = []
for match in matches:
name_matches = regex.findall(r"<([a-z:]+)[^<]+?id=\"{}\"[^<]*?>([^<]*?)</\1>".format(match[0]), metadata_xhtml)
for name_match in name_matches:
if name_match[1] == match[1]:
duplicate_names.append(name_match[1])
if duplicate_names:
messages.append(LintMessage("se:name.person.full-name property identical to regular name. If the two are identical the full name <meta> element must be removed.", se.MESSAGE_TYPE_ERROR, "content.opf"))
for duplicate_name in duplicate_names:
messages.append(LintMessage(duplicate_name, se.MESSAGE_TYPE_ERROR, "", True))
# Check for malformed URLs
for message in _get_malformed_urls(metadata_xhtml):
message.filename = "content.opf"
messages.append(message)
if regex.search(r"id\.loc\.gov/authorities/names/[^\.]+\.html", metadata_xhtml):
messages.append(LintMessage("id.loc.gov URL ending with illegal .html", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Does the manifest match the generated manifest?
for manifest in regex.findall(r"<manifest>.*?</manifest>", metadata_xhtml, flags=regex.DOTALL):
manifest = regex.sub(r"[\n\t]", "", manifest)
expected_manifest = regex.sub(r"[\n\t]", "", self.generate_manifest())
if manifest != expected_manifest:
messages.append(LintMessage("<manifest> does not match expected structure.", se.MESSAGE_TYPE_ERROR, "content.opf"))
# Make sure some static files are unchanged
try:
with importlib_resources.path("se.data.templates", "LICENSE.md") as license_file_path:
if not filecmp.cmp(license_file_path, self.path / "LICENSE.md"):
messages.append(LintMessage(f"LICENSE.md does not match {license_file_path}", se.MESSAGE_TYPE_ERROR, "LICENSE.md"))
except Exception:
messages.append(LintMessage("Missing ./LICENSE.md", se.MESSAGE_TYPE_ERROR, "LICENSE.md"))
with importlib_resources.path("se.data.templates", "core.css") as core_css_file_path:
if not filecmp.cmp(core_css_file_path, self.path / "src" / "epub" / "css" / "core.css"):
messages.append(LintMessage(f"core.css does not match {core_css_file_path}", se.MESSAGE_TYPE_ERROR, "core.css"))
with importlib_resources.path("se.data.templates", "logo.svg") as logo_svg_file_path:
if not filecmp.cmp(logo_svg_file_path, self.path / "src" / "epub" / "images" / "logo.svg"):
messages.append(LintMessage(f"logo.svg does not match {logo_svg_file_path}", se.MESSAGE_TYPE_ERROR, "logo.svg"))
with importlib_resources.path("se.data.templates", "uncopyright.xhtml") as uncopyright_file_path:
if not filecmp.cmp(uncopyright_file_path, self.path / "src" / "epub" / "text" / "uncopyright.xhtml"):
messages.append(LintMessage(f"uncopyright.xhtml does not match {uncopyright_file_path}", se.MESSAGE_TYPE_ERROR, "uncopyright.xhtml"))
# Check for unused selectors
unused_selectors = _get_unused_selectors(self)
if unused_selectors:
messages.append(LintMessage("Unused CSS selectors:", se.MESSAGE_TYPE_ERROR, "local.css"))
for selector in unused_selectors:
messages.append(LintMessage(selector, se.MESSAGE_TYPE_ERROR, "", True))
# Now iterate over individual files for some checks
for root, _, filenames in os.walk(self.path):
for filename in sorted(filenames, key=se.natural_sort_key):
if ".git" in str(Path(root) / filename):
continue
if filename.startswith("cover.source."):
has_cover_source = True
if filename != "LICENSE.md" and regex.findall(r"[A-Z]", filename):
messages.append(LintMessage("Illegal uppercase letter in filename", se.MESSAGE_TYPE_ERROR, filename))
if "-0" in filename:
messages.append(LintMessage("Illegal leading 0 in filename", se.MESSAGE_TYPE_ERROR, filename))
if filename.endswith(tuple(se.BINARY_EXTENSIONS)) or filename.endswith("core.css"):
continue
if filename.startswith(".") or filename.startswith("README"):
if filename == ".gitignore":
# .gitignore is optional, because our standard gitignore ignores itself.
# So if it's present, it must match our template.
with importlib_resources.path("se.data.templates", "gitignore") as gitignore_file_path:
if not filecmp.cmp(gitignore_file_path, str(self.path / ".gitignore")):
messages.append(LintMessage(f".gitignore does not match {gitignore_file_path}", se.MESSAGE_TYPE_ERROR, ".gitignore"))
continue
else:
messages.append(LintMessage(f"Illegal {filename} file detected in {root}", se.MESSAGE_TYPE_ERROR))
continue
with open(Path(root) / filename, "r", encoding="utf-8") as file:
try:
file_contents = file.read()
except UnicodeDecodeError:
# This is more to help developers find weird files that might choke 'lint', hopefully unnecessary for end users
messages.append(LintMessage("Problem decoding file as utf-8", se.MESSAGE_TYPE_ERROR, filename))
continue
if "http://standardebooks.org" in file_contents:
messages.append(LintMessage("Non-HTTPS Standard Ebooks URL detected.", se.MESSAGE_TYPE_ERROR, filename))
if "UTF-8" in file_contents:
messages.append(LintMessage("String \"UTF-8\" must always be lowercase.", se.MESSAGE_TYPE_ERROR, filename))
if filename == "halftitle.xhtml":
has_halftitle = True
if "<title>Half Title</title>" not in file_contents:
messages.append(LintMessage("Half title <title> tag must contain exactly: \"Half Title\".", se.MESSAGE_TYPE_ERROR, filename))
if filename == "colophon.xhtml":
if "<a href=\"{}\">{}</a>".format(self.generated_identifier.replace("url:", ""), self.generated_identifier.replace("url:https://", "")) not in file_contents:
messages.append(LintMessage(f"Unexpected SE identifier in colophon. Expected: {self.generated_identifier}", se.MESSAGE_TYPE_ERROR, filename))
if ">trl<" in metadata_xhtml and "translated from" not in file_contents:
messages.append(LintMessage("Translator detected in metadata, but no 'translated from LANG' block in colophon", se.MESSAGE_TYPE_ERROR, filename))
# Check if we forgot to fill any variable slots
matches = regex.findall(r"([ >\"][A-Z_]{3,}[0-9]*[ <\"])", file_contents)
for match in matches:
messages.append(LintMessage("Missing data in colophon: {}".format(match[1:-1]), se.MESSAGE_TYPE_ERROR, filename))
# Are the sources represented correctly?
# We don't have a standard yet for more than two sources (transcription and scan) so just ignore that case for now.
matches = regex.findall(r"<dc:source>([^<]+?)</dc:source>", metadata_xhtml)
if len(matches) <= 2:
for link in matches:
if "gutenberg.org" in link and f"<a href=\"{link}\">Project Gutenberg</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in colophon.xhtml. It should read: <a href=\"{link}\">Project Gutenberg</a>", se.MESSAGE_TYPE_WARNING, filename))
if "hathitrust.org" in link and f"the<br/>\n\t\t\t<a href=\"{link}\">HathiTrust Digital Library</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in colophon.xhtml. It should read: the<br/> <a href=\"{link}\">HathiTrust Digital Library</a>", se.MESSAGE_TYPE_WARNING, filename))
if "archive.org" in link and f"the<br/>\n\t\t\t<a href=\"{link}\">Internet Archive</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in colophon.xhtml. It should read: the<br/> <a href=\"{link}\">Internet Archive</a>", se.MESSAGE_TYPE_WARNING, filename))
if "books.google.com" in link and f"<a href=\"{link}\">Google Books</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in colophon.xhtml. It should read: <a href=\"{link}\">Google Books</a>", se.MESSAGE_TYPE_WARNING, filename))
if filename == "titlepage.xhtml":
if "<title>Titlepage</title>" not in file_contents:
messages.append(LintMessage("Titlepage <title> tag must contain exactly: \"Titlepage\".", se.MESSAGE_TYPE_ERROR, filename))
if filename.endswith(".svg"):
# Check for fill: #000 which should simply be removed
matches = regex.findall(r"fill=\"\s*#000", file_contents) + regex.findall(r"style=\"[^\"]*?fill:\s*#000", file_contents)
if matches:
messages.append(LintMessage("Illegal style=\"fill: #000\" or fill=\"#000\".", se.MESSAGE_TYPE_ERROR, filename))
# Check for illegal height or width on root <svg> element
if filename != "logo.svg": # Do as I say, not as I do...
matches = regex.findall(r"<svg[^>]*?(height|width)=[^>]*?>", file_contents)
if matches:
messages.append(LintMessage("Illegal height or width on root <svg> element. Size SVGs using the viewbox attribute only.", se.MESSAGE_TYPE_ERROR, filename))
# Check for illegal transform attribute
matches = regex.findall(r"<[a-z]+[^>]*?transform=[^>]*?>", file_contents)
if matches:
messages.append(LintMessage("Illegal transform attribute. SVGs should be optimized to remove use of transform. Try using Inkscape to save as an \"optimized SVG\".", se.MESSAGE_TYPE_ERROR, filename))
if os.sep + "src" + os.sep not in root:
# Check that cover and titlepage images are in all caps
if filename == "cover.svg":
matches = regex.findall(r"<text[^>]+?>.*[a-z].*</text>", file_contents)
if matches:
messages.append(LintMessage("Lowercase letters in cover. Cover text must be all uppercase.", se.MESSAGE_TYPE_ERROR, filename))
# Save for later comparison with titlepage
matches = regex.findall(r"<title>(.*?)</title>", file_contents)
for match in matches:
cover_svg_title = match.replace("The cover for ", "")
if filename == "titlepage.svg":
matches = regex.findall(r"<text[^>]+?>(.*[a-z].*)</text>", html.unescape(file_contents))
for match in matches:
if match not in ("translated by", "illustrated by", "and"):
messages.append(LintMessage("Lowercase letters in titlepage. Titlepage text must be all uppercase except \"translated by\" and \"illustrated by\".", se.MESSAGE_TYPE_ERROR, filename))
# For later comparison with cover
matches = regex.findall(r"<title>(.*?)</title>", file_contents)
for match in matches:
titlepage_svg_title = match.replace("The titlepage for ", "")
if filename.endswith(".css"):
# Check CSS style
# First remove @supports selectors and normalize indentation within them
matches = regex.findall(r"^@supports\(.+?\){.+?}\s*}", file_contents, flags=regex.MULTILINE | regex.DOTALL)
for match in matches:
processed_match = regex.sub(r"^@supports\(.+?\){\s*(.+?)\s*}\s*}", "\\1", match.replace("\n\t", "\n") + "\n}", flags=regex.MULTILINE | regex.DOTALL)
file_contents = file_contents.replace(match, processed_match)
# Remove comments that are on their own line
file_contents = regex.sub(r"^/\*.+?\*/\n", "", file_contents, flags=regex.MULTILINE | regex.DOTALL)
# Check for unneeded white-space nowrap in abbr selectors
matches = regex.findall(r"abbr.+?{[^}]*?white-space:\s*nowrap;[^}]*?}", css)
if matches:
messages.append(LintMessage("abbr selector does not need white-space: nowrap; as it inherits it from core.css.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Don't specify border color
matches = regex.findall(r"(?:border|color).+?(?:#[a-f0-9]{0,6}|black|white|red)", file_contents, flags=regex.IGNORECASE)
if matches:
messages.append(LintMessage("Don't specify border colors, so that reading systems can adjust for night mode.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# If we select on the xml namespace, make sure we define the namespace in the CSS, otherwise the selector won't work
matches = regex.findall(r"\[\s*xml\s*\|", file_contents)
if matches and "@namespace xml \"http://www.w3.org/XML/1998/namespace\";" not in file_contents:
messages.append(LintMessage("[xml|attr] selector in CSS, but no XML namespace declared (@namespace xml \"http://www.w3.org/XML/1998/namespace\";).", se.MESSAGE_TYPE_ERROR, filename))
if filename.endswith(".xhtml"):
for message in _get_malformed_urls(file_contents):
message.filename = filename
messages.append(message)
# Check if this is a frontmatter file
if filename not in ("titlepage.xhtml", "imprint.xhtml", "toc.xhtml"):
matches = regex.findall(r"epub:type=\"[^\"]*?frontmatter[^\"]*?\"", file_contents)
if matches:
has_frontmatter = True
# Add new CSS classes to global list
if filename not in se.IGNORED_FILENAMES:
matches = regex.findall(r"(?:class=\")[^\"]+?(?:\")", file_contents)
for match in matches:
for css_class in match.replace("class=", "").replace("\"", "").split():
if css_class in xhtml_css_classes:
xhtml_css_classes[css_class] += 1
else:
xhtml_css_classes[css_class] = 1
#xhtml_css_classes = xhtml_css_classes + match.replace("class=", "").replace("\"", "").split()
# Read file contents into a DOM for querying
dom = BeautifulSoup(file_contents, "lxml")
# Store all headings to check for ToC references later
if filename != "toc.xhtml":
for match in dom.select("h1,h2,h3,h4,h5,h6"):
# Remove any links to the endnotes
endnote_ref = match.find("a", attrs={"epub:type": regex.compile("^.*noteref.*$")})
if endnote_ref:
endnote_ref.extract()
# Decide whether to remove subheadings based on the following logic:
# If the closest parent <section> is a part or division, then keep subtitle
# Else, if the closest parent <section> is a halftitlepage, then discard subtitle
# Else, if the first child of the heading is not z3998:roman, then also discard subtitle
# Else, keep the subtitle.
heading_subtitle = match.find(attrs={"epub:type": regex.compile("^.*subtitle.*$")})
if heading_subtitle:
# If an <h#> tag has a subtitle, the non-subtitle text must also be wrapped in a <span>.
# This invocation of match.find() returns all text nodes. We don't want any text nodes, so if it returns anything then we know we're
# missing a <span> somewhere.
if match.find(text=True, recursive=False).strip():
messages.append(LintMessage(f"<{match.name}> tag has subtitle <span>, but first line is not wrapped in a <span>. See semantics manual for structure of headers with subtitles.", se.MESSAGE_TYPE_ERROR, filename))
# OK, move on with processing headers.
parent_section = match.find_parents("section")
# Sometimes we might not have a parent <section>, like in Keats' Poetry
if not parent_section:
parent_section = match.find_parents("body")
closest_section_epub_type = parent_section[0].get("epub:type") or ""
heading_first_child_epub_type = match.find("span", recursive=False).get("epub:type") or ""
if regex.findall(r"^.*(part|division|volume).*$", closest_section_epub_type) and not regex.findall(r"^.*se:short-story.*$", closest_section_epub_type):
remove_subtitle = False
elif regex.findall(r"^.*halftitlepage.*$", closest_section_epub_type):
remove_subtitle = True
elif not regex.findall(r"^.*z3998:roman.*$", heading_first_child_epub_type):
remove_subtitle = True
else:
remove_subtitle = False
if remove_subtitle:
heading_subtitle.extract()
normalized_text = " ".join(match.get_text().split())
headings = headings + [(normalized_text, filename)]
# Check for direct z3998:roman spans that should have their semantic pulled into the parent element
matches = regex.findall(r"<([a-z0-9]+)[^>]*?>\s*(<span epub:type=\"z3998:roman\">[^<]+?</span>)\s*</\1>", file_contents, flags=regex.DOTALL)
if matches:
messages.append(LintMessage("If <span> exists only for the z3998:roman semantic, then z3998:roman should be pulled into parent tag instead.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match[1], se.MESSAGE_TYPE_WARNING, filename, True))
# Check for "Hathi Trust" instead of "HathiTrust"
if "Hathi Trust" in file_contents:
messages.append(LintMessage("\"Hathi Trust\" should be \"HathiTrust\"", se.MESSAGE_TYPE_ERROR, filename))
# Check for uppercase letters in IDs or classes
matches = dom.select("[id],[class]")
for match in matches:
if match.has_attr("id"):
normalized_id = unicodedata.normalize("NFKD", match["id"])
uppercase_matches = regex.findall(r"[A-Z]", normalized_id)
for _ in uppercase_matches:
messages.append(LintMessage("Uppercase ID attribute: {}. Attribute values must be all lowercase.".format(match["id"]), se.MESSAGE_TYPE_ERROR, filename))
number_matches = regex.findall(r"^[0-9]", normalized_id)
for _ in number_matches:
messages.append(LintMessage("ID starting with a number is illegal XHTML: {}".format(match["id"]), se.MESSAGE_TYPE_ERROR, filename))
if match.has_attr("class"):
for css_class in match["class"]:
uppercase_matches = regex.findall(r"[A-Z]", unicodedata.normalize("NFKD", css_class))
for _ in uppercase_matches:
messages.append(LintMessage(f"Uppercase class attribute: {css_class}. Attribute values must be all lowercase.", se.MESSAGE_TYPE_ERROR, filename))
matches = [x for x in dom.select("section") if not x.has_attr("id")]
if matches:
messages.append(LintMessage("<section> element without id attribute.", se.MESSAGE_TYPE_ERROR, filename))
# Check for empty title tags
if "<title/>" in file_contents or "<title></title>" in file_contents:
messages.append(LintMessage("Empty <title> tag.", se.MESSAGE_TYPE_ERROR, filename))
# Check for numeric entities
matches = regex.findall(r"&#[0-9]+?;", file_contents)
if matches:
messages.append(LintMessage("Illegal numeric entity (like Α) in file.", se.MESSAGE_TYPE_ERROR, filename))
# Check for <hr> tags before the end of a section, which is a common PG artifact
matches = regex.findall(r"<hr[^>]*?/?>\s*</section>", file_contents, flags=regex.DOTALL)
if matches:
messages.append(LintMessage("Illegal <hr/> before the end of a section.", se.MESSAGE_TYPE_ERROR, filename))
# Check for double greater-than at the end of a tag
matches = regex.findall(r"(>>|>>)", file_contents)
if matches:
messages.append(LintMessage("Tags should end with a single >.", se.MESSAGE_TYPE_WARNING, filename))
# Check for nbsp before ampersand (&)
matches = regex.findall(fr"[^{se.NO_BREAK_SPACE}]\&", file_contents)
if matches:
messages.append(LintMessage("Required nbsp not found before &", se.MESSAGE_TYPE_WARNING, filename))
# Check for nbsp after ampersand (&)
matches = regex.findall(fr"\&[^{se.NO_BREAK_SPACE}]", file_contents)
if matches:
messages.append(LintMessage("Required nbsp not found after &", se.MESSAGE_TYPE_WARNING, filename))
# Check for nbsp before times
matches = regex.findall(fr"[0-9]+[^{se.NO_BREAK_SPACE}]<abbr class=\"time", file_contents)
if matches:
messages.append(LintMessage("Required nbsp not found before <abbr class=\"time\">", se.MESSAGE_TYPE_WARNING, filename))
# Check for low-hanging misquoted fruit
matches = regex.findall(r"[A-Za-z]+[“‘]", file_contents)
if matches:
messages.append(LintMessage("Possible mis-curled quotation mark.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check that times have colons and not periods
matches = regex.findall(r"[0-9]\.[0-9]+\s<abbr class=\"time", file_contents) + regex.findall(r"at [0-9]\.[0-9]+", file_contents)
if matches:
messages.append(LintMessage("Times must be separated by colons (:) not periods (.)", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for leading 0 in IDs
matches = regex.findall(r"id=\"[^\"]+?\-0[0-9]+[^\"]*?\"", file_contents)
if matches:
messages.append(LintMessage("Illegal leading 0 in ID attribute", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for stage direction that ends in ?! but also has a trailing period
matches = regex.findall(r"<i epub:type=\"z3998:stage-direction\">(?:(?!<i).)*?\.</i>[,:;!?]", file_contents)
if matches:
messages.append(LintMessage("Stage direction ending in period next to other punctuation. Remove trailing periods in stage direction.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for ending punctuation inside italics
matches = regex.findall(r"(<([ib]) epub:type=\"se:[^\"]+?\">[^<]+?[\.,\!\?]</\2>)", file_contents)
if matches:
messages.append(LintMessage("Ending punctuation inside italics.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match[0], se.MESSAGE_TYPE_WARNING, filename, True))
# Check for money not separated by commas
matches = regex.findall(r"[£\$][0-9]{4,}", file_contents)
if matches:
messages.append(LintMessage("Numbers not grouped by commas. Separate numbers greater than 1,000 with commas at every three numerals.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for trailing commas inside <i> tags at the close of dialog
if ",</i>”" in file_contents:
messages.append(LintMessage("Comma inside <i> tag before closing dialog. (Search for ,</i>”)", se.MESSAGE_TYPE_WARNING, filename))
# Check for period following Roman numeral, which is an old-timey style we must fix
# But ignore the numeral if it's the first item in a <p> tag, as that suggests it might be a kind of list item.
matches = regex.findall(r"(?<!<p[^>]*?>)<span epub:type=\"z3998:roman\">[^<]+?</span>\.\s+[a-z]", file_contents)
if matches:
messages.append(LintMessage("Roman numeral followed by a period. When in mid-sentence Roman numerals must not be followed by a period.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for two em dashes in a row
matches = regex.findall(fr"—{se.WORD_JOINER}*—+", file_contents)
if matches:
messages.append(LintMessage("Two or more em-dashes in a row detected. Elided words should use the two- or three-em-dash Unicode character, and dialog ending in em-dashes should only end in a single em-dash.", se.MESSAGE_TYPE_ERROR, filename))
# Check for <abbr class="name"> that does not contain spaces
matches = regex.findall(r"<abbr class=\"name\">[^<]*?[A-Z]\.[A-Z]\.[^<]*?</abbr>", file_contents)
if matches:
messages.append(LintMessage("Initials in <abbr class=\"name\"> not separated by spaces.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for empty <h2> missing epub:type="title" attribute
if "<h2>" in file_contents:
messages.append(LintMessage("<h2> tag without epub:type=\"title\" attribute.", se.MESSAGE_TYPE_WARNING, filename))
# Check for a common typo
if "z3998:nonfiction" in file_contents:
messages.append(LintMessage("Typo: z3998:nonfiction should be z3998:non-fiction", se.MESSAGE_TYPE_ERROR, filename))
# Check for empty <p> tags
matches = regex.findall(r"<p>\s*</p>", file_contents)
if "<p/>" in file_contents or matches:
messages.append(LintMessage("Empty <p> tag. Use <hr/> for scene breaks if appropriate.", se.MESSAGE_TYPE_ERROR, filename))
# Check for <p> tags that end with <br/>
matches = regex.findall(r"(\s*<br/?>\s*)+</p>", file_contents)
if matches:
messages.append(LintMessage("<br/> tag found before closing </p> tag.", se.MESSAGE_TYPE_ERROR, filename))
# Check for single words that are in italics, but that have closing punctuation outside italics
# Outer wrapping match is so that .findall returns the entire match and not the subgroup
# The first regex also matches the first few characters before the first double quote; we use those for more sophisticated
# checks below, to give fewer false positives like `with its downy red hairs and its “<i xml:lang="fr">doigts de faune</i>.”`
matches = regex.findall(r"((?:.{1,2}\s)?“<(i|em)[^>]*?>[^<]+?</\2>[\!\?\.])", file_contents) + regex.findall(r"([\.\!\?] <(i|em)[^>]*?>[^<]+?</\2>[\!\?\.])", file_contents)
# But, if we've matched a name of something, don't include that as an error. For example, `He said, “<i epub:type="se:name.publication.book">The Decameron</i>.”`
# We also exclude the match from the list if:
# 1. The double quote is directly preceded by a lowercase letter and a space: `with its downy red hairs and its “<i xml:lang="fr">doigts de faune</i>.”`
# 2. The double quote is directly preceded by a lowercase letter, a comma, and a space, and the first letter within the double quote is lowercase: In the original, “<i xml:lang="es">que era un Conde de Irlos</i>.”
matches = [x for x in matches if "epub:type=\"se:name." not in x[0] and "epub:type=\"z3998:taxonomy" not in x[0] and not regex.match(r"^[a-z’]+\s“", x[0]) and not regex.match(r"^[a-z’]+,\s“[a-z]", se.formatting.remove_tags(x[0]))]
if matches:
messages.append(LintMessage("When a complete clause is italicized, ending punctuation EXCEPT commas must be within containing italics.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match[0], se.MESSAGE_TYPE_WARNING, filename, True))
# Check for foreign phrases with italics going *outside* quotes
matches = regex.findall(r"<i[^>]*?>“.+?\b", file_contents) + regex.findall(r"”</i>", file_contents)
if matches:
messages.append(LintMessage("When italicizing language in dialog, italics go INSIDE quotation marks.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for style attributes
matches = regex.findall(r"<.+?style=\"", file_contents)
if matches:
messages.append(LintMessage("Illegal style attribute. Do not use inline styles, any element can be targeted with a clever enough selector.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for uppercase HTML tags
if regex.findall(r"<[A-Z]+", file_contents):
messages.append(LintMessage("One or more uppercase HTML tags.", se.MESSAGE_TYPE_ERROR, filename))
# Check for nbsp within <abbr class="name">, which is redundant
matches = regex.findall(fr"<abbr[^>]+?class=\"name\"[^>]*?>[^<]*?{se.NO_BREAK_SPACE}[^<]*?</abbr>", file_contents)
if matches:
messages.append(LintMessage("No-break space detected in <abbr class=\"name\">. This is redundant.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for Roman numerals in <title> tag
if regex.findall(r"<title>[Cc]hapter [XxIiVv]+", file_contents):
messages.append(LintMessage("No Roman numerals allowed in <title> tag; use decimal numbers.", se.MESSAGE_TYPE_ERROR, filename))
# If the chapter has a number and no subtitle, check the <title> tag...
matches = regex.findall(r"<h([0-6]) epub:type=\"title z3998:roman\">([^<]+)</h\1>", file_contents, flags=regex.DOTALL)
# ...But only make the correction if there's one <h#> tag. If there's more than one, then the xhtml file probably requires an overarching title
if matches and len(regex.findall(r"<h(?:[0-6])", file_contents)) == 1:
try:
chapter_number = roman.fromRoman(matches[0][1].upper())
regex_string = fr"<title>(Chapter|Section|Part) {chapter_number}"
if not regex.findall(regex_string, file_contents):
messages.append(LintMessage(f"<title> tag doesn't match expected value; should be \"Chapter {chapter_number}\". (Beware hidden Unicode characters!)", se.MESSAGE_TYPE_ERROR, filename))
except Exception:
messages.append(LintMessage("<h#> tag is marked with z3998:roman, but is not a Roman numeral", se.MESSAGE_TYPE_ERROR, filename))
# If the chapter has a number and subtitle, check the <title> tag...
matches = regex.findall(r"<h([0-6]) epub:type=\"title\">\s*<span epub:type=\"z3998:roman\">([^<]+)</span>\s*<span epub:type=\"subtitle\">(.+?)</span>\s*</h\1>", file_contents, flags=regex.DOTALL)
# ...But only make the correction if there's one <h#> tag. If there's more than one, then the xhtml file probably requires an overarching title
if matches and len(regex.findall(r"<h(?:[0-6])", file_contents)) == 1:
chapter_number = roman.fromRoman(matches[0][1].upper())
# First, remove endnotes in the subtitle, then remove all other tags (but not tag contents)
chapter_title = regex.sub(r"<a[^<]+?epub:type=\"noteref\"[^<]*?>[^<]+?</a>", "", matches[0][2]).strip()
chapter_title = regex.sub(r"<[^<]+?>", "", chapter_title)
regex_string = r"<title>(Chapter|Section|Part) {}: {}".format(chapter_number, regex.escape(chapter_title))
if not regex.findall(regex_string, file_contents):
messages.append(LintMessage(f"<title> tag doesn't match expected value; should be \"Chapter {chapter_number}: {chapter_title}\". (Beware hidden Unicode characters!)", se.MESSAGE_TYPE_ERROR, filename))
# Check for missing subtitle styling
if "epub:type=\"subtitle\"" in file_contents and not local_css_has_subtitle_style:
messages.append(LintMessage("Subtitles detected, but no subtitle style detected in local.css.", se.MESSAGE_TYPE_ERROR, filename))
# Check for whitespace before noteref
matches = regex.findall(r"\s+<a href=\"endnotes\.xhtml#note-[0-9]+?\" id=\"noteref-[0-9]+?\" epub:type=\"noteref\">[0-9]+?</a>", file_contents)
if matches:
messages.append(LintMessage("Illegal white space before noteref.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for <li> elements that don't have a direct block child
if filename != "toc.xhtml":
matches = regex.findall(r"<li(?:\s[^>]*?>|>)\s*[^\s<]", file_contents)
if matches:
messages.append(LintMessage("<li> without direct block-level child.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for IDs on <h#> tags
matches = regex.findall(r"<h[0-6][^>]*?id=[^>]*?>", file_contents, flags=regex.DOTALL)
if matches:
messages.append(LintMessage("<h#> tag with id attribute. <h#> tags should be wrapped in <section> tags, which should hold the id attribute.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check to see if <h#> tags are correctly titlecased
matches = regex.finditer(r"<h([0-6])([^>]*?)>(.*?)</h\1>", file_contents, flags=regex.DOTALL)
for match in matches:
if "z3998:roman" not in match.group(2):
title = match.group(3).strip()
# Remove leading roman numerals first
title = regex.sub(r"^<span epub:type=\"[^\"]*?z3998:roman[^\"]*?\">(.*?)</span>", "", title, flags=regex.DOTALL)
# Remove leading leftover spacing and punctuation
title = regex.sub(r"^[\s\.\,\!\?\:\;]*", "", title)
# Remove endnotes
title = regex.sub(r"<a[^>]*?epub:type=\"noteref\"[^>]*?>[0-9]+</a>", "", title)
# Normalize whitespace
title = regex.sub(r"\s+", " ", title, flags=regex.DOTALL).strip()
# Remove nested <span>s in subtitles, which might trip up the next regex block
title = regex.sub(r"(<span epub:type=\"subtitle\">[^<]*?)<span[^>]*?>([^<]*?</span>)", r"\1\2", title, flags=regex.DOTALL)
title = regex.sub(r"(<span epub:type=\"subtitle\">[^<]*?)</span>([^<]*?</span>)", r"\1\2", title, flags=regex.DOTALL)
# Do we have a subtitle? If so the first letter of that must be capitalized, so we pull that out
subtitle_matches = regex.findall(r"(.*?)<span epub:type=\"subtitle\">(.*?)</span>(.*?)", title, flags=regex.DOTALL)
if subtitle_matches:
for title_header, subtitle, title_footer in subtitle_matches:
title_header = se.formatting.titlecase(se.formatting.remove_tags(title_header).strip())
subtitle = se.formatting.titlecase(se.formatting.remove_tags(subtitle).strip())
title_footer = se.formatting.titlecase(se.formatting.remove_tags(title_footer).strip())
titlecased_title = title_header + " " + subtitle + " " + title_footer
titlecased_title = titlecased_title.strip()
title = se.formatting.remove_tags(title).strip()
if title != titlecased_title:
messages.append(LintMessage(f"Title \"{title}\" not correctly titlecased. Expected: {titlecased_title}", se.MESSAGE_TYPE_WARNING, filename))
# No subtitle? Much more straightforward
else:
titlecased_title = se.formatting.remove_tags(se.formatting.titlecase(title))
title = se.formatting.remove_tags(title)
if title != titlecased_title:
messages.append(LintMessage(f"Title \"{title}\" not correctly titlecased. Expected: {titlecased_title}", se.MESSAGE_TYPE_WARNING, filename))
# Check for <figure> tags without id attributes
matches = regex.findall(r"<img[^>]*?id=\"[^>]+?>", file_contents)
if matches:
messages.append(LintMessage("<img> tag with ID attribute. ID attributes go on parent <figure> tags.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for closing dialog without comma
matches = regex.findall(r"[a-z]+?” [a-zA-Z]+? said", file_contents)
if matches:
messages.append(LintMessage("Dialog without ending comma.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_WARNING, filename, True))
# Check for non-typogrified img alt attributes
matches = regex.findall(r"alt=\"[^\"]*?('|--|")[^\"]*?\"", file_contents)
if matches:
messages.append(LintMessage("Non-typogrified ', \" (as "), or -- in image alt attribute.", se.MESSAGE_TYPE_ERROR, filename))
# Check alt attributes not ending in punctuation
if filename not in se.IGNORED_FILENAMES:
matches = regex.findall(r"alt=\"[^\"]*?[a-zA-Z]\"", file_contents)
if matches:
messages.append(LintMessage("Alt attribute doesn't appear to end with punctuation. Alt attributes must be composed of complete sentences ending in appropriate punctuation.", se.MESSAGE_TYPE_ERROR, filename))
# Check alt attributes match image titles
images = dom.select("img[src$=svg]")
for image in images:
alt_text = image["alt"]
title_text = ""
image_ref = image["src"].split("/").pop()
try:
with open(self.path / "src" / "epub" / "images" / image_ref, "r", encoding="utf-8") as image_source:
try:
title_text = BeautifulSoup(image_source, "lxml").title.get_text()
except Exception:
messages.append(LintMessage(f"{image_ref} missing <title> element.", se.MESSAGE_TYPE_ERROR, image_ref))
if title_text != "" and alt_text != "" and title_text != alt_text:
messages.append(LintMessage(f"The <title> of {image_ref} doesn’t match the alt text in {filename}", se.MESSAGE_TYPE_ERROR, filename))
except FileNotFoundError:
messages.append(LintMessage(f"The image {image_ref} doesn’t exist", se.MESSAGE_TYPE_ERROR, filename))
# Check for punctuation after endnotes
regex_string = fr"<a[^>]*?epub:type=\"noteref\"[^>]*?>[0-9]+</a>[^\s<–\]\)—{se.WORD_JOINER}]"
matches = regex.findall(regex_string, file_contents)
if matches:
messages.append(LintMessage("Endnote links must be outside of punctuation, including quotation marks.", se.MESSAGE_TYPE_WARNING, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for nbsp in measurements, for example: 90 mm
matches = regex.findall(r"[0-9]+[\- ][mck][mgl]\b", file_contents)
if matches:
messages.append(LintMessage("Measurements must be separated by a no-break space, not a dash or regular space.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for line breaks after <br/> tags
matches = regex.findall(r"<br\s*?/>[^\n]", file_contents)
if matches:
messages.append(LintMessage("<br/> tags must be followed by a newline, and subsequent content must be indented to the same level.", se.MESSAGE_TYPE_ERROR, filename))
for match in matches:
messages.append(LintMessage(match, se.MESSAGE_TYPE_ERROR, filename, True))
# Check for <pre> tags
if "<pre" in file_contents:
messages.append(LintMessage("Illegal <pre> tag.", se.MESSAGE_TYPE_ERROR, filename))
# Check for double spacing
regex_string = fr"[{se.NO_BREAK_SPACE}{se.HAIR_SPACE} ]{{2,}}"
matches = regex.findall(regex_string, file_contents)
if matches:
messages.append(LintMessage("Double spacing detected in file. Sentences should be single-spaced. (Note that double spaces might include Unicode no-break spaces!)", se.MESSAGE_TYPE_ERROR, filename))
# Check for punctuation outside quotes. We don't check single quotes because contractions are too common.
matches = regex.findall(r"[a-zA-Z][”][,.]", file_contents)
if matches:
messages.append(LintMessage("Comma or period outside of double quote. Generally punctuation should go within single and double quotes.", se.MESSAGE_TYPE_WARNING, filename))
# Did someone use colons instead of dots for SE identifiers? e.g. se:name:vessel:ship
matches = regex.findall(r"\bse:[a-z]+:(?:[a-z]+:?)*", file_contents)
if matches:
messages.append(LintMessage(f"Illegal colon (:) detected in SE identifier. SE identifiers are separated by dots (.) not colons (:). Identifier: {matches}", se.MESSAGE_TYPE_ERROR, filename))
# Check for leftover asterisms
matches = regex.findall(r"\*\s*(\*\s*)+", file_contents)
if matches:
messages.append(LintMessage("Illegal asterism (***) detected. Section/scene breaks must be defined by an <hr/> tag.", se.MESSAGE_TYPE_ERROR, filename))
# Check for space before endnote backlinks
if filename == "endnotes.xhtml":
# Do we have to replace Ibid.?
matches = regex.findall(r"\bibid\b", file_contents, flags=regex.IGNORECASE)
if matches:
messages.append(LintMessage("Illegal \"Ibid\" in endnotes. \"Ibid\" means \"The previous reference\" which is meaningless with popup endnotes, and must be replaced by the actual thing \"Ibid\" refers to.", se.MESSAGE_TYPE_ERROR, filename))
endnote_referrers = dom.select("li[id^=note-] a")
bad_referrers = []
for referrer in endnote_referrers:
# We check against the attr value here because I couldn't figure out how to select an XML-namespaced attribute using BS4
if "epub:type" in referrer.attrs and referrer.attrs["epub:type"] == "backlink":
is_first_sib = True
for sib in referrer.previous_siblings:
if is_first_sib:
is_first_sib = False
if isinstance(sib, NavigableString):
if sib == "\n": # Referrer preceded by newline. Check if all previous sibs are tags.
continue
if sib == " " or str(sib) == se.NO_BREAK_SPACE or regex.search(r"[^\s] $", str(sib)): # Referrer preceded by a single space; we're OK
break
# Referrer preceded by a string that is not a newline and does not end with a single space
bad_referrers.append(referrer)
break
else:
# We got here because the first sib was a newline, or not a string. So, check all previous sibs.
if isinstance(sib, NavigableString) and sib != "\n":
bad_referrers.append(referrer)
break
if bad_referrers:
messages.append(LintMessage("Endnote referrer link not preceded by exactly one space, or a newline if all previous siblings are elements.", se.MESSAGE_TYPE_WARNING, filename))
for referrer in bad_referrers:
messages.append(LintMessage(str(referrer), se.MESSAGE_TYPE_WARNING, filename, True))
# If we're in the imprint, are the sources represented correctly?
# We don't have a standard yet for more than two sources (transcription and scan) so just ignore that case for now.
if filename == "imprint.xhtml":
matches = regex.findall(r"<dc:source>([^<]+?)</dc:source>", metadata_xhtml)
if len(matches) <= 2:
for link in matches:
if "gutenberg.org" in link and f"<a href=\"{link}\">Project Gutenberg</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in imprint.xhtml. It should read: <a href=\"{link}\">Project Gutenberg</a>", se.MESSAGE_TYPE_WARNING, filename))
if "hathitrust.org" in link and f"the <a href=\"{link}\">HathiTrust Digital Library</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in imprint.xhtml. It should read: the <a href=\"{link}\">HathiTrust Digital Library</a>", se.MESSAGE_TYPE_WARNING, filename))
if "archive.org" in link and f"the <a href=\"{link}\">Internet Archive</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in imprint.xhtml. It should read: the <a href=\"{link}\">Internet Archive</a>", se.MESSAGE_TYPE_WARNING, filename))
if "books.google.com" in link and f"<a href=\"{link}\">Google Books</a>" not in file_contents:
messages.append(LintMessage(f"Source not represented in imprint.xhtml. It should read: <a href=\"{link}\">Google Books</a>", se.MESSAGE_TYPE_WARNING, filename))
# Collect abbr elements for later check
result = regex.findall("<abbr[^<]+?>", file_contents)
result = [item.replace("eoc", "").replace(" \"", "").strip() for item in result]
abbr_elements = list(set(result + abbr_elements))
# Check if language tags in individual files match the language in content.opf
if filename not in se.IGNORED_FILENAMES:
file_language = regex.search(r"<html[^<]+xml\:lang=\"([^\"]+)\"", file_contents).group(1)
if language != file_language:
messages.append(LintMessage(f"File language is {file_language}, but content.opf language is {language}", se.MESSAGE_TYPE_ERROR, filename))
# Check LoI descriptions to see if they match associated figcaptions
if filename == "loi.xhtml":
illustrations = dom.select("li > a")
for illustration in illustrations:
figure_ref = illustration["href"].split("#")[1]
chapter_ref = regex.findall(r"(.*?)#.*", illustration["href"])[0]
figcaption_text = ""
loi_text = illustration.get_text()
with open(self.path / "src" / "epub" / "text" / chapter_ref, "r", encoding="utf-8") as chapter:
try:
figure = BeautifulSoup(chapter, "lxml").select("#" + figure_ref)[0]
except Exception:
messages.append(LintMessage(f"#{figure_ref} not found in file {chapter_ref}", se.MESSAGE_TYPE_ERROR, 'loi.xhtml'))
continue
if figure.img:
figure_img_alt = figure.img.get('alt')
if figure.figcaption:
figcaption_text = figure.figcaption.get_text()
if (figcaption_text != "" and loi_text != "" and figcaption_text != loi_text) and (figure_img_alt != "" and loi_text != "" and figure_img_alt != loi_text):
messages.append(LintMessage(f"The <figcaption> tag of {figure_ref} doesn’t match the text in its LoI entry", se.MESSAGE_TYPE_WARNING, chapter_ref))
# Check for missing MARC relators
if filename == "introduction.xhtml" and ">aui<" not in metadata_xhtml and ">win<" not in metadata_xhtml:
messages.append(LintMessage("introduction.xhtml found, but no MARC relator 'aui' (Author of introduction, but not the chief author) or 'win' (Writer of introduction)", se.MESSAGE_TYPE_WARNING, filename))
if filename == "preface.xhtml" and ">wpr<" not in metadata_xhtml:
messages.append(LintMessage("preface.xhtml found, but no MARC relator 'wpr' (Writer of preface)", se.MESSAGE_TYPE_WARNING, filename))
if filename == "afterword.xhtml" and ">aft<" not in metadata_xhtml:
messages.append(LintMessage("afterword.xhtml found, but no MARC relator 'aft' (Author of colophon, afterword, etc.)", se.MESSAGE_TYPE_WARNING, filename))
if filename == "endnotes.xhtml" and ">ann<" not in metadata_xhtml:
messages.append(LintMessage("endnotes.xhtml found, but no MARC relator 'ann' (Annotator)", se.MESSAGE_TYPE_WARNING, filename))
if filename == "loi.xhtml" and ">ill<" not in metadata_xhtml:
messages.append(LintMessage("loi.xhtml found, but no MARC relator 'ill' (Illustrator)", se.MESSAGE_TYPE_WARNING, filename))
# Check for wrong semantics in frontmatter/backmatter
if filename in se.FRONTMATTER_FILENAMES and "frontmatter" not in file_contents:
messages.append(LintMessage("No frontmatter semantic inflection for what looks like a frontmatter file", se.MESSAGE_TYPE_WARNING, filename))
if filename in se.BACKMATTER_FILENAMES and "backmatter" not in file_contents:
messages.append(LintMessage("No backmatter semantic inflection for what looks like a backmatter file", se.MESSAGE_TYPE_WARNING, filename))
if cover_svg_title != titlepage_svg_title:
messages.append(LintMessage("cover.svg and titlepage.svg <title> tags don't match", se.MESSAGE_TYPE_ERROR))
if has_frontmatter and not has_halftitle:
messages.append(LintMessage("Frontmatter found, but no halftitle. Halftitle is required when frontmatter is present.", se.MESSAGE_TYPE_ERROR, "content.opf"))
if not has_cover_source:
messages.append(LintMessage("./images/cover.source.jpg not found", se.MESSAGE_TYPE_ERROR, "cover.source.jpg"))
single_use_css_classes = []
for css_class in xhtml_css_classes:
if css_class not in se.IGNORED_CLASSES:
if "." + css_class not in css:
messages.append(LintMessage(f"class “{css_class}” found in xhtml, but no style in local.css", se.MESSAGE_TYPE_ERROR, "local.css"))
if xhtml_css_classes[css_class] == 1 and css_class not in se.IGNORED_CLASSES and not regex.match(r"^i[0-9]$", css_class):
# Don't count ignored classes OR i[0-9] which are used for poetry styling
single_use_css_classes.append(css_class)
if single_use_css_classes:
messages.append(LintMessage("CSS class only used once. Can a clever selector be crafted instead of a single-use class? When possible classes should not be single-use style hooks.", se.MESSAGE_TYPE_WARNING, "local.css"))
for css_class in single_use_css_classes:
messages.append(LintMessage(css_class, se.MESSAGE_TYPE_WARNING, "local.css", True))
headings = list(set(headings))
with open(self.path / "src" / "epub" / "toc.xhtml", "r", encoding="utf-8") as file:
toc = BeautifulSoup(file.read(), "lxml")
landmarks = toc.find("nav", attrs={"epub:type": "landmarks"})
toc = toc.find("nav", attrs={"epub:type": "toc"})
# Depth first search using recursiveChildGenerator to get the headings in order
toc_entries = []
for child in toc.recursiveChildGenerator():
if getattr(child, "name") == "a":
toc_entries.append(child)
# Match ToC headings against text headings
# Unlike main headings, ToC entries have a ‘:’ before the subheading so we need to strip these for comparison
toc_headings = []
for index, entry in enumerate(toc_entries):
entry_text = " ".join(entry.get_text().replace(":", "").split())
entry_file = regex.sub(r"^text\/(.*?\.xhtml).*$", r"\1", entry.get("href"))
toc_headings.append((entry_text, entry_file))
for heading in headings:
# Occasionally we find a heading with a colon, but as we’ve stripped our
# ToC-only colons above we also need to do that here for the comparison.
heading_without_colons = (heading[0].replace(":", ""), heading[1])
if heading_without_colons not in toc_headings:
messages.append(LintMessage("Heading “{}” found, but not present for that file in the ToC.".format(heading[0]), se.MESSAGE_TYPE_ERROR, heading[1]))
# Check our ordered ToC entries against the spine
# To cover all possibilities, we combine the toc and the landmarks to get the full set of entries
with open(self.path / "src" / "epub" / "content.opf", "r", encoding="utf-8") as content_opf:
toc_files = []
for index, entry in enumerate(landmarks.find_all("a", attrs={"epub:type": regex.compile("^.*(frontmatter|bodymatter).*$")})):
entry_file = regex.sub(r"^text\/(.*?\.xhtml).*$", r"\1", entry.get("href"))
toc_files.append(entry_file)
for index, entry in enumerate(toc_entries):
entry_file = regex.sub(r"^text\/(.*?\.xhtml).*$", r"\1", entry.get("href"))
toc_files.append(entry_file)
unique_toc_files: List[str] = []
for toc_file in toc_files:
if toc_file not in unique_toc_files:
unique_toc_files.append(toc_file)
toc_files = unique_toc_files
spine_entries = BeautifulSoup(content_opf.read(), "lxml").find("spine").find_all("itemref")
if len(toc_files) != len(spine_entries):
messages.append(LintMessage("The number of elements in the spine ({}) does not match the number of elements in the ToC and landmarks ({}).".format(len(toc_files), len(spine_entries)), se.MESSAGE_TYPE_ERROR, "content.opf"))
for index, entry in enumerate(spine_entries):
if toc_files[index] != entry.attrs["idref"]:
messages.append(LintMessage("The spine order does not match the order of the ToC and landmarks. Expected {}, found {}.".format(entry.attrs["idref"], toc_files[index]), se.MESSAGE_TYPE_ERROR, "content.opf"))
break
for element in abbr_elements:
try:
css_class = regex.search(r"class=\"([^\"]+?)\"", element).group(1)
except Exception:
continue
if css_class and css_class in ("temperature", "era", "acronym") and "abbr." + css_class not in abbr_styles:
messages.append(LintMessage(f"abbr.{css_class} element found, but no required style in local.css (See typgoraphy manual for style)", se.MESSAGE_TYPE_ERROR, "local.css"))
return messages
| 55.832325
| 248
| 0.695966
|
4a171cf49882c0424e4931ea5b112b59e7823a70
| 31,703
|
py
|
Python
|
aea/decision_maker/gop.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
aea/decision_maker/gop.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
aea/decision_maker/gop.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the decision maker class."""
import copy
import logging
from enum import Enum
from typing import Any, Dict, List, Optional, cast
from aea.common import Address
from aea.crypto.wallet import Wallet
from aea.decision_maker.base import DecisionMakerHandler as BaseDecisionMakerHandler
from aea.decision_maker.base import OwnershipState as BaseOwnershipState
from aea.decision_maker.base import Preferences as BasePreferences
from aea.exceptions import enforce
from aea.helpers.preference_representations.base import (
linear_utility,
logarithmic_utility,
)
from aea.helpers.transaction.base import SignedMessage, SignedTransaction, Terms
from aea.identity.base import Identity
from aea.protocols.base import Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
CurrencyHoldings = Dict[str, int] # a map from identifier to quantity
GoodHoldings = Dict[str, int] # a map from identifier to quantity
UtilityParams = Dict[str, float] # a map from identifier to quantity
ExchangeParams = Dict[str, float] # a map from identifier to quantity
_default_logger = logging.getLogger(__name__)
class GoalPursuitReadiness:
"""The goal pursuit readiness."""
__slots__ = ("_status",)
class Status(Enum):
"""
The enum of the readiness status.
In particular, it can be one of the following:
- Status.READY: when the agent is ready to pursuit its goal
- Status.NOT_READY: when the agent is not ready to pursuit its goal
"""
READY = "ready"
NOT_READY = "not_ready"
def __init__(self) -> None:
"""Instantiate the goal pursuit readiness."""
self._status = GoalPursuitReadiness.Status.NOT_READY
@property
def is_ready(self) -> bool:
"""Get the readiness."""
return self._status.value == GoalPursuitReadiness.Status.READY.value
def update(self, new_status: Status) -> None:
"""
Update the goal pursuit readiness.
:param new_status: the new status
"""
self._status = new_status
class OwnershipState(BaseOwnershipState):
"""Represent the ownership state of an agent (can proxy a ledger)."""
__slots__ = ("_amount_by_currency_id", "_quantities_by_good_id")
def __init__(self) -> None:
"""Instantiate an ownership state object."""
self._amount_by_currency_id = None # type: Optional[CurrencyHoldings]
self._quantities_by_good_id = None # type: Optional[GoodHoldings]
def set( # pylint: disable=arguments-differ
self,
amount_by_currency_id: CurrencyHoldings = None,
quantities_by_good_id: GoodHoldings = None,
**kwargs: Any,
) -> None:
"""
Set values on the ownership state.
:param amount_by_currency_id: the currency endowment of the agent in this state.
:param quantities_by_good_id: the good endowment of the agent in this state.
:param kwargs: the keyword arguments.
"""
if amount_by_currency_id is None: # pragma: nocover
raise ValueError("Must provide amount_by_currency_id.")
if quantities_by_good_id is None: # pragma: nocover
raise ValueError("Must provide quantities_by_good_id.")
enforce(
not self.is_initialized,
"Cannot apply state update, current state is already initialized!",
)
self._amount_by_currency_id = copy.copy(amount_by_currency_id)
self._quantities_by_good_id = copy.copy(quantities_by_good_id)
def apply_delta( # pylint: disable=arguments-differ
self,
delta_amount_by_currency_id: Dict[str, int] = None,
delta_quantities_by_good_id: Dict[str, int] = None,
**kwargs: Any,
) -> None:
"""
Apply a state update to the ownership state.
This method is used to apply a raw state update without a transaction.
:param delta_amount_by_currency_id: the delta in the currency amounts
:param delta_quantities_by_good_id: the delta in the quantities by good
:param kwargs: the keyword arguments
"""
if delta_amount_by_currency_id is None: # pragma: nocover
raise ValueError("Must provide delta_amount_by_currency_id.")
if delta_quantities_by_good_id is None: # pragma: nocover
raise ValueError("Must provide delta_quantities_by_good_id.")
if self._amount_by_currency_id is None or self._quantities_by_good_id is None:
raise ValueError( # pragma: nocover
"Cannot apply state update, current state is not initialized!"
)
enforce(
all(
[
key in self._amount_by_currency_id
for key in delta_amount_by_currency_id.keys()
]
),
"Invalid keys present in delta_amount_by_currency_id.",
)
enforce(
all(
[
key in self._quantities_by_good_id
for key in delta_quantities_by_good_id.keys()
]
),
"Invalid keys present in delta_quantities_by_good_id.",
)
for currency_id, amount_delta in delta_amount_by_currency_id.items():
self._amount_by_currency_id[currency_id] += amount_delta
for good_id, quantity_delta in delta_quantities_by_good_id.items():
self._quantities_by_good_id[good_id] += quantity_delta
@property
def is_initialized(self) -> bool:
"""Get the initialization status."""
return (
self._amount_by_currency_id is not None
and self._quantities_by_good_id is not None
)
@property
def amount_by_currency_id(self) -> CurrencyHoldings:
"""Get currency holdings in this state."""
if self._amount_by_currency_id is None:
raise ValueError("amount_by_currency_id is not set!")
return copy.copy(self._amount_by_currency_id)
@property
def quantities_by_good_id(self) -> GoodHoldings:
"""Get good holdings in this state."""
if self._quantities_by_good_id is None:
raise ValueError("quantities_by_good_id is not set!")
return copy.copy(self._quantities_by_good_id)
def is_affordable_transaction(self, terms: Terms) -> bool:
"""
Check if the transaction is affordable (and consistent).
E.g. check that the agent state has enough money if it is a buyer or enough holdings if it is a seller.
Note, the agent is the sender of the transaction message by design.
:param terms: the transaction terms
:return: True if the transaction is legal wrt the current state, false otherwise.
"""
if all(amount == 0 for amount in terms.amount_by_currency_id.values()) and all(
quantity == 0 for quantity in terms.quantities_by_good_id.values()
):
# reject the transaction when there is no wealth exchange
result = False
elif all(
amount <= 0 for amount in terms.amount_by_currency_id.values()
) and all(quantity >= 0 for quantity in terms.quantities_by_good_id.values()):
# check if the agent has the money to cover the sender_amount (the agent=sender is the buyer)
result = all(
self.amount_by_currency_id[currency_id] >= -amount
for currency_id, amount in terms.amount_by_currency_id.items()
)
elif all(
amount >= 0 for amount in terms.amount_by_currency_id.values()
) and all(quantity <= 0 for quantity in terms.quantities_by_good_id.values()):
# check if the agent has the goods (the agent=sender is the seller).
result = all(
self.quantities_by_good_id[good_id] >= -quantity
for good_id, quantity in terms.quantities_by_good_id.items()
)
else:
result = False
return result
def is_affordable(self, terms: Terms) -> bool:
"""
Check if the tx is affordable.
:param terms: the transaction terms
:return: whether the transaction is affordable or not
"""
if self.is_initialized:
is_affordable = self.is_affordable_transaction(terms)
else:
_default_logger.debug(
"Cannot verify whether transaction is affordable as ownership state is not initialized. Assuming it is!"
)
is_affordable = True
return is_affordable
def update(self, terms: Terms) -> None:
"""
Update the agent state from a transaction.
:param terms: the transaction terms
"""
if self._amount_by_currency_id is None or self._quantities_by_good_id is None:
raise ValueError( # pragma: nocover
"Cannot apply state update, current state is not initialized!"
)
for currency_id, amount_delta in terms.amount_by_currency_id.items():
self._amount_by_currency_id[currency_id] += amount_delta
for good_id, quantity_delta in terms.quantities_by_good_id.items():
self._quantities_by_good_id[good_id] += quantity_delta
def apply_transactions(self, list_of_terms: List[Terms]) -> "OwnershipState":
"""
Apply a list of transactions to (a copy of) the current state.
:param list_of_terms: the sequence of transaction terms.
:return: the final state.
"""
new_state = copy.copy(self)
for terms in list_of_terms:
new_state.update(terms)
return new_state
def __copy__(self) -> "OwnershipState":
"""Copy the object."""
state = OwnershipState()
if self.is_initialized:
state._amount_by_currency_id = self.amount_by_currency_id
state._quantities_by_good_id = self.quantities_by_good_id
return state
class Preferences(BasePreferences):
"""Class to represent the preferences."""
__slots__ = ("_exchange_params_by_currency_id", "_utility_params_by_good_id")
def __init__(self) -> None:
"""Instantiate an agent preference object."""
self._exchange_params_by_currency_id = None # type: Optional[ExchangeParams]
self._utility_params_by_good_id = None # type: Optional[UtilityParams]
def set( # pylint: disable=arguments-differ
self,
exchange_params_by_currency_id: ExchangeParams = None,
utility_params_by_good_id: UtilityParams = None,
**kwargs: Any,
) -> None:
"""
Set values on the preferences.
:param exchange_params_by_currency_id: the exchange params.
:param utility_params_by_good_id: the utility params for every asset.
:param kwargs: the keyword arguments.
"""
if exchange_params_by_currency_id is None: # pragma: nocover
raise ValueError("Must provide exchange_params_by_currency_id.")
if utility_params_by_good_id is None: # pragma: nocover
raise ValueError("Must provide utility_params_by_good_id.")
enforce(
not self.is_initialized,
"Cannot apply preferences update, preferences already initialized!",
)
self._exchange_params_by_currency_id = copy.copy(exchange_params_by_currency_id)
self._utility_params_by_good_id = copy.copy(utility_params_by_good_id)
@property
def is_initialized(self) -> bool:
"""
Get the initialization status.
:return: True if exchange_params_by_currency_id and utility_params_by_good_id are not None.
"""
return (self._exchange_params_by_currency_id is not None) and (
self._utility_params_by_good_id is not None
)
@property
def exchange_params_by_currency_id(self) -> ExchangeParams:
"""Get exchange parameter for each currency."""
if self._exchange_params_by_currency_id is None:
raise ValueError("ExchangeParams not set!")
return self._exchange_params_by_currency_id
@property
def utility_params_by_good_id(self) -> UtilityParams:
"""Get utility parameter for each good."""
if self._utility_params_by_good_id is None:
raise ValueError("UtilityParams not set!")
return self._utility_params_by_good_id
def logarithmic_utility(self, quantities_by_good_id: GoodHoldings) -> float:
"""
Compute agent's utility given her utility function params and a good bundle.
:param quantities_by_good_id: the good holdings (dictionary) with the identifier (key) and quantity (value) for each good
:return: utility value
"""
enforce(self.is_initialized, "Preferences params not set!")
result = logarithmic_utility(
self.utility_params_by_good_id, quantities_by_good_id,
)
return result
def linear_utility(self, amount_by_currency_id: CurrencyHoldings) -> float:
"""
Compute agent's utility given her utility function params and a currency bundle.
:param amount_by_currency_id: the currency holdings (dictionary) with the identifier (key) and quantity (value) for each currency
:return: utility value
"""
enforce(self.is_initialized, "Preferences params not set!")
result = linear_utility(
self.exchange_params_by_currency_id, amount_by_currency_id
)
return result
def utility(
self,
quantities_by_good_id: GoodHoldings,
amount_by_currency_id: CurrencyHoldings,
) -> float:
"""
Compute the utility given the good and currency holdings.
:param quantities_by_good_id: the good holdings
:param amount_by_currency_id: the currency holdings
:return: the utility value.
"""
enforce(self.is_initialized, "Preferences params not set!")
goods_score = self.logarithmic_utility(quantities_by_good_id)
currency_score = self.linear_utility(amount_by_currency_id)
score = goods_score + currency_score
return score
def marginal_utility( # pylint: disable=arguments-differ
self,
ownership_state: BaseOwnershipState,
delta_quantities_by_good_id: Optional[GoodHoldings] = None,
delta_amount_by_currency_id: Optional[CurrencyHoldings] = None,
**kwargs: Any,
) -> float:
"""
Compute the marginal utility.
:param ownership_state: the ownership state against which to compute the marginal utility.
:param delta_quantities_by_good_id: the change in good holdings
:param delta_amount_by_currency_id: the change in money holdings
:param kwargs: the keyword arguments
:return: the marginal utility score
"""
enforce(self.is_initialized, "Preferences params not set!")
ownership_state = cast(OwnershipState, ownership_state)
current_goods_score = self.logarithmic_utility(
ownership_state.quantities_by_good_id
)
current_currency_score = self.linear_utility(
ownership_state.amount_by_currency_id
)
new_goods_score = current_goods_score
new_currency_score = current_currency_score
if delta_quantities_by_good_id is not None:
new_quantities_by_good_id = {
good_id: quantity + delta_quantities_by_good_id[good_id]
for good_id, quantity in ownership_state.quantities_by_good_id.items()
}
new_goods_score = self.logarithmic_utility(new_quantities_by_good_id)
if delta_amount_by_currency_id is not None:
new_amount_by_currency_id = {
currency: amount + delta_amount_by_currency_id[currency]
for currency, amount in ownership_state.amount_by_currency_id.items()
}
new_currency_score = self.linear_utility(new_amount_by_currency_id)
marginal_utility = (
new_goods_score
+ new_currency_score
- current_goods_score
- current_currency_score
)
return marginal_utility
def utility_diff_from_transaction(
self, ownership_state: BaseOwnershipState, terms: Terms
) -> float:
"""
Simulate a transaction and get the resulting utility difference (taking into account the fee).
:param ownership_state: the ownership state against which to apply the transaction.
:param terms: the transaction terms.
:return: the score.
"""
enforce(self.is_initialized, "Preferences params not set!")
ownership_state = cast(OwnershipState, ownership_state)
current_score = self.utility(
quantities_by_good_id=ownership_state.quantities_by_good_id,
amount_by_currency_id=ownership_state.amount_by_currency_id,
)
new_ownership_state = ownership_state.apply_transactions([terms])
new_score = self.utility(
quantities_by_good_id=new_ownership_state.quantities_by_good_id,
amount_by_currency_id=new_ownership_state.amount_by_currency_id,
)
score_difference = new_score - current_score
return score_difference
def is_utility_enhancing(
self, ownership_state: BaseOwnershipState, terms: Terms
) -> bool:
"""
Check if the tx is utility enhancing.
:param ownership_state: the ownership state against which to apply the transaction.
:param terms: the transaction terms
:return: whether the transaction is utility enhancing or not
"""
if self.is_initialized and ownership_state.is_initialized:
is_utility_enhancing = (
self.utility_diff_from_transaction(ownership_state, terms) >= 0.0
)
else:
_default_logger.debug(
"Cannot verify whether transaction improves utility as preferences are not initialized. Assuming it does!"
)
is_utility_enhancing = True
return is_utility_enhancing
def __copy__(self) -> "Preferences":
"""Copy the object."""
preferences = Preferences()
if self.is_initialized:
preferences._exchange_params_by_currency_id = (
self.exchange_params_by_currency_id
)
preferences._utility_params_by_good_id = self.utility_params_by_good_id
return preferences
class DecisionMakerHandler(BaseDecisionMakerHandler):
"""This class implements the decision maker."""
# pylint: disable=import-outside-toplevel
from packages.fetchai.protocols.signing.dialogues import ( # noqa: F811
SigningDialogue,
)
from packages.fetchai.protocols.signing.dialogues import ( # noqa: F811
SigningDialogues as BaseSigningDialogues,
)
from packages.fetchai.protocols.signing.message import SigningMessage # noqa: F811
from packages.fetchai.protocols.state_update.dialogues import ( # noqa: F811
StateUpdateDialogue,
)
from packages.fetchai.protocols.state_update.dialogues import ( # noqa: F811
StateUpdateDialogues as BaseStateUpdateDialogues,
)
from packages.fetchai.protocols.state_update.message import ( # noqa: F811
StateUpdateMessage,
)
signing_dialogues_class = SigningDialogue
signing_msg_class = SigningMessage
state_update_dialogue_class = StateUpdateDialogue
state_update_msg_class = StateUpdateMessage
class SigningDialogues(BaseSigningDialogues):
"""This class keeps track of all oef_search dialogues."""
def __init__(self, self_address: Address, **kwargs: Any) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:param kwargs: the keyword arguments
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
from packages.fetchai.protocols.signing.dialogues import ( # pylint: disable=import-outside-toplevel
SigningDialogue,
)
return SigningDialogue.Role.DECISION_MAKER
# pylint: disable=import-outside-toplevel
from packages.fetchai.protocols.signing.dialogues import (
SigningDialogues as BaseSigningDialogues,
)
BaseSigningDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
**kwargs,
)
class StateUpdateDialogues(BaseStateUpdateDialogues):
"""This class keeps track of all oef_search dialogues."""
def __init__(self, self_address: Address, **kwargs: Any) -> None:
"""
Initialize dialogues.
:param self_address: the address of the entity for whom dialogues are maintained
:param kwargs: the keyword arguments
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
from packages.fetchai.protocols.state_update.dialogues import ( # noqa: F811 # pylint: disable=import-outside-toplevel
StateUpdateDialogue,
)
return StateUpdateDialogue.Role.DECISION_MAKER
# pylint: disable=import-outside-toplevel
from packages.fetchai.protocols.state_update.dialogues import ( # noqa: F401
StateUpdateDialogues as BaseStateUpdateDialogues,
)
BaseStateUpdateDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
**kwargs,
)
__slots__ = ("signing_dialogues", "state_update_dialogues")
def __init__(
self, identity: Identity, wallet: Wallet, config: Dict[str, Any]
) -> None:
"""
Initialize the decision maker.
:param identity: the identity
:param wallet: the wallet
:param config: the user defined configuration of the handler
"""
kwargs: Dict[str, Any] = {
"goal_pursuit_readiness": GoalPursuitReadiness(),
"ownership_state": OwnershipState(),
"preferences": Preferences(),
}
super().__init__(identity=identity, wallet=wallet, config=config, **kwargs)
self.signing_dialogues = DecisionMakerHandler.SigningDialogues(
self.self_address
)
self.state_update_dialogues = DecisionMakerHandler.StateUpdateDialogues(
self.self_address
)
def handle(self, message: Message) -> None:
"""
Handle an internal message from the skills.
:param message: the internal message
"""
if isinstance(message, self.signing_msg_class):
self._handle_signing_message(message)
elif isinstance(message, self.state_update_msg_class):
self._handle_state_update_message(message)
else: # pragma: no cover
self.logger.error(
"[{}]: cannot handle message={} of type={}".format(
self.agent_name, message, type(message)
)
)
def _handle_signing_message(self, signing_msg: SigningMessage) -> None:
"""
Handle a signing message.
:param signing_msg: the transaction message
"""
if not self.context.goal_pursuit_readiness.is_ready:
self.logger.debug(
"[{}]: Preferences and ownership state not initialized!".format(
self.agent_name
)
)
signing_dialogue = self.signing_dialogues.update(signing_msg)
if signing_dialogue is None or not isinstance(
signing_dialogue, self.signing_dialogues_class
): # pragma: no cover
self.logger.error(
"[{}]: Could not construct signing dialogue. Aborting!".format(
self.agent_name
)
)
return
# check if the transaction is acceptable and process it accordingly
if signing_msg.performative == self.signing_msg_class.Performative.SIGN_MESSAGE:
self._handle_message_signing(signing_msg, signing_dialogue)
elif (
signing_msg.performative
== self.signing_msg_class.Performative.SIGN_TRANSACTION
):
self._handle_transaction_signing(signing_msg, signing_dialogue)
else: # pragma: no cover
self.logger.error(
"[{}]: Unexpected transaction message performative".format(
self.agent_name
)
)
def _handle_message_signing(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle a message for signing.
:param signing_msg: the signing message
:param signing_dialogue: the signing dialogue
"""
performative = self.signing_msg_class.Performative.ERROR
kwargs = {
"error_code": self.signing_msg_class.ErrorCode.UNSUCCESSFUL_MESSAGE_SIGNING,
} # type: Dict[str, Any]
if self._is_acceptable_for_signing(signing_msg):
signed_message = self.wallet.sign_message(
signing_msg.raw_message.ledger_id,
signing_msg.raw_message.body,
signing_msg.raw_message.is_deprecated_mode,
)
if signed_message is not None:
performative = self.signing_msg_class.Performative.SIGNED_MESSAGE
kwargs.pop("error_code")
kwargs["signed_message"] = SignedMessage(
signing_msg.raw_message.ledger_id,
signed_message,
signing_msg.raw_message.is_deprecated_mode,
)
signing_msg_response = signing_dialogue.reply(
performative=performative, target_message=signing_msg, **kwargs,
)
self.message_out_queue.put(signing_msg_response)
def _handle_transaction_signing(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle a transaction for signing.
:param signing_msg: the signing message
:param signing_dialogue: the signing dialogue
"""
performative = self.signing_msg_class.Performative.ERROR
kwargs = {
"error_code": self.signing_msg_class.ErrorCode.UNSUCCESSFUL_TRANSACTION_SIGNING,
} # type: Dict[str, Any]
if self._is_acceptable_for_signing(signing_msg):
signed_tx = self.wallet.sign_transaction(
signing_msg.raw_transaction.ledger_id, signing_msg.raw_transaction.body
)
if signed_tx is not None:
performative = self.signing_msg_class.Performative.SIGNED_TRANSACTION
kwargs.pop("error_code")
kwargs["signed_transaction"] = SignedTransaction(
signing_msg.raw_transaction.ledger_id, signed_tx
)
signing_msg_response = signing_dialogue.reply(
performative=performative, target_message=signing_msg, **kwargs,
)
self.message_out_queue.put(signing_msg_response)
def _is_acceptable_for_signing(self, signing_msg: SigningMessage) -> bool:
"""
Check if the tx message is acceptable for signing.
:param signing_msg: the transaction message
:return: whether the transaction is acceptable or not
"""
result = self.context.preferences.is_utility_enhancing(
self.context.ownership_state, signing_msg.terms
) and self.context.ownership_state.is_affordable(signing_msg.terms)
return result
def _handle_state_update_message(
self, state_update_msg: StateUpdateMessage
) -> None:
"""
Handle a state update message.
:param state_update_msg: the state update message
"""
state_update_dialogue = self.state_update_dialogues.update(state_update_msg)
if state_update_dialogue is None or not isinstance(
state_update_dialogue, self.state_update_dialogue_class
): # pragma: no cover
self.logger.error(
"[{}]: Could not construct state_update dialogue. Aborting!".format(
self.agent_name
)
)
return
if (
state_update_msg.performative
== self.state_update_msg_class.Performative.INITIALIZE
):
self.logger.info(
"[{}]: Applying ownership_state and preferences initialization!".format(
self.agent_name
)
)
self.context.ownership_state.set(
amount_by_currency_id=state_update_msg.amount_by_currency_id,
quantities_by_good_id=state_update_msg.quantities_by_good_id,
)
self.context.preferences.set(
exchange_params_by_currency_id=state_update_msg.exchange_params_by_currency_id,
utility_params_by_good_id=state_update_msg.utility_params_by_good_id,
)
self.context.goal_pursuit_readiness.update(
GoalPursuitReadiness.Status.READY
)
elif (
state_update_msg.performative
== self.state_update_msg_class.Performative.APPLY
):
self.logger.info("[{}]: Applying state update!".format(self.agent_name))
self.context.ownership_state.apply_delta(
delta_amount_by_currency_id=state_update_msg.amount_by_currency_id,
delta_quantities_by_good_id=state_update_msg.quantities_by_good_id,
)
| 40.13038
| 137
| 0.645933
|
4a171d9cbdbe17a79cbf29a11ec5396b7188eb63
| 1,829
|
py
|
Python
|
local-cli/generator-ubuntu/templates/snap/parts/plugins/x-nodejs.py
|
Abdulhafiz-Yusuf/react-native
|
98e0ce38cdcb8c489a064c436a353be754e95f89
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 1,079
|
2016-08-02T16:20:28.000Z
|
2022-03-22T20:40:37.000Z
|
local-cli/generator-ubuntu/templates/snap/parts/plugins/x-nodejs.py
|
daisty/react-native-1
|
98e0ce38cdcb8c489a064c436a353be754e95f89
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 3
|
2016-08-04T07:57:48.000Z
|
2020-05-21T05:02:52.000Z
|
local-cli/generator-ubuntu/templates/snap/parts/plugins/x-nodejs.py
|
daisty/react-native-1
|
98e0ce38cdcb8c489a064c436a353be754e95f89
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 80
|
2016-08-03T09:34:32.000Z
|
2021-04-22T16:57:11.000Z
|
import os
import platform
import snapcraft
from snapcraft import sources
_NODEJS_BASE = 'node-v{version}-linux-{arch}'
_NODEJS_VERSION = '4.2.6'
_NODEJS_TMPL = 'https://nodejs.org/dist/v{version}/{base}.tar.gz'
_NODEJS_ARCHES = {
'i686': 'x86',
'x86_64': 'x64',
'armv7l': 'armv7l',
}
class NodeJsPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['node_version'] = {
'type': 'string',
'default': _NODEJS_VERSION
}
if 'required' in schema:
del schema['required']
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self._nodejs_dir = os.path.join(self.partdir, 'node')
self._nodejs_tar = sources.Tar(get_nodejs_release(
self.options.node_version), self._nodejs_dir)
def pull(self):
super().pull()
os.makedirs(self._nodejs_dir, exist_ok=True)
self._nodejs_tar.download()
def clean_pull(self):
super().clean_pull()
if os.path.exists(self._nodejs_dir):
shutil.rmtree(self._nodejs_dir)
def build(self):
super().build()
self._nodejs_tar.provision(
self.installdir, clean_target=False, keep_tarball=True)
def snap_fileset(self):
return (['-bin/npm', '-CHANGELOG.md', '-LICENSE', '-README.md', '-lib', '-include', '-share'])
def get_nodejs_release(node_version):
return _NODEJS_TMPL.format(version=node_version,
base=_get_nodejs_base(node_version))
def _get_nodejs_base(node_version):
machine = platform.machine()
if machine not in _NODEJS_ARCHES:
raise EnvironmentError('architecture not supported ({})'.format(
machine))
return _NODEJS_BASE.format(version=node_version,
arch=_NODEJS_ARCHES[machine])
| 25.760563
| 98
| 0.666484
|
4a171dfd0a41fe8f6e7516c77e48e8440697e549
| 11,093
|
py
|
Python
|
WebApp/main/views.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | 1
|
2022-03-29T14:46:40.000Z
|
2022-03-29T14:46:40.000Z
|
WebApp/main/views.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | null | null | null |
WebApp/main/views.py
|
georg-wenzel/ml-data-smell-detection
|
7dddd401ca1f1a830dfd8b00760659911e5b1086
|
[
"MIT"
] | 1
|
2021-06-13T08:24:46.000Z
|
2021-06-13T08:24:46.000Z
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.http import HttpResponse
from main.utility.DjangoUtility import dict_contains_all
from .models import Agent, Dataset, Column, Smell
from main.utility.DatabaseUtility import safe_get
from main.utility import GensimUtility
from main.utility.AgentUtility import agent_handlers
from django.core.cache import cache
from main.forms import AddAgentForm
import pandas as pd
from django.utils.html import escape
#views for non dataset- and agent-specific calls
#render base logged in page if logged in, otherwise redirect to user login
def home(request):
if request.user.is_authenticated:
smells = Smell.objects.all
form = AddAgentForm()
return render(request, 'main/baseloggedin.html', {"username": request.user.username, "smells": smells, "form": form})
else:
return redirect('/user/login')
#analyze page (validate trained agent against uploaded dataset)
def analyze(request):
if not request.user.is_authenticated:
return redirect('/user/login')
#if not post request
if request.method != "POST":
#get all agents for this user that have a settings file or have an external training set (i.e. are trained)
agents = [x for x in Agent.objects.filter(user=request.user) if x.settings.name or x.external_prepared]
#get all datasets for this user
datasets = Dataset.objects.filter(user=request.user)
#display in form
return render(request, 'main/analyze.html', {"username": request.user.username, "agents": agents, "datasets": datasets})
#if post
if request.method == "POST":
#kwargs will contain arguments which are not required for every type of agent
kwargs = dict()
#make sure all required parameters are passed (agent and dataset)
if not dict_contains_all(request.POST, ["dataset", "agent"]):
messages.add_message(request, messages.ERROR, "Not all required fields were filled.")
return redirect('/analyze')
dataset = safe_get(Dataset, id=request.POST['dataset'])
agent = safe_get(Agent, id=request.POST['agent'])
if not agent or not dataset:
messages.add_message(request, messages.ERROR, "Could not find agent or dataset.")
return redirect('/analyze/')
#make sure column is present for non-dedupe agents
if 'column' in request.POST:
column = safe_get(Column, id=request.POST['column'])
if not column:
messages.add_message(request, messages.ERROR, "Could not find specified column.")
return redirect('/analyze/')
kwargs['column'] = column
else:
if agent.agent_type.id != 1: #if not dedupe agent, error
messages.add_message(request, messages.ERROR, "Column must be specified for non-Dedupe agents.")
return redirect('/analyze')
#make sure rcsSum, rcsThreshold and rcsSecondaryAgent are present for gensim agents
if dict_contains_all(request.POST, ['rcsThreshold', 'rcsNum', 'rcsSecondaryAgent']):
kwargs['rcsNum'] = int(request.POST['rcsNum'])
kwargs['rcsThreshold'] = float(request.POST['rcsThreshold'])
if(request.POST['rcsSecondaryAgent'] != '-1'): kwargs['rcsSecondaryAgent'] = int(request.POST['rcsSecondaryAgent'])
else:
if agent.agent_type.id == 2:
messages.add_message(request, messages.ERROR, "RCS value and threshold must be specified for gensim agents.")
return redirect('/analyze')
#pass to agent based handling if we got this far
return agent_handlers[agent.agent_type.id-1].analyze(request, agent, dataset, **kwargs)
#duplicates page (called via post from gensim analyze screen to give details on synonymous pairs of words)
def duplicates(request):
#authentication
if not request.user.is_authenticated:
return redirect('/user/login')
#only allow post requests
if request.method != "POST":
messages.add_message(request, messages.ERROR, "This link must be called via POST.")
return redirect('/analyze')
#get dataset and column provided in the form
if not dict_contains_all(request.POST, ["dataset", "column"]):
messages.add_message(request, messages.ERROR, "Not all required fields were provided.")
return redirect('/analyze')
dataset = safe_get(Dataset, id=request.POST['dataset'])
column = safe_get(Column, id=request.POST['column'])
if not column or not dataset or not (dataset.user == request.user) or not (column.dataset == dataset):
messages.add_message(request, messages.ERROR, "An error occured during authorization or fetching the requested data.")
return redirect('/analyze')
#each pair key is in the form word1,word2,rcs
#out of this key, we build the corresponding word pair
pairs = []
for key in request.POST:
if len(key.split(",")) == 3:
word1 = key.split(",")[0]
word2 = key.split(",")[1]
rcs = float(key.split(",")[2])
#append the word pair
pairs.append((word1, word2, rcs))
# this is simply dictionary preparation to avoid key errors
# the dictionary occurence has keys in order of indices
# at each index is another dictionary containing "word1", "word2" with the respective word of the word pair
# and "word1_occurences" and "word2_occurences" containing every sentence in the corresponding column which contains this word.
# finally, we simply propagate the RCS again
# word1_examples and word2_examples contain up to 5 examples of escaped strings with a <strong> tag around the desired word.
# These are not shown in the newly created dataset, but passed on as examples to the template.
occurences = dict()
for i, (word1, word2, rcs) in enumerate(pairs):
occurences[i] = dict()
occurences[i]["word1"] = word1
occurences[i]["word2"] = word2
occurences[i]["word1_occurences"] = []
occurences[i]["word2_occurences"] = []
occurences[i]["rcs"] = rcs
occurences[i]["word1_examples"] = []
occurences[i]["word2_examples"] = []
#here we get the sentences in an in-memory array
sentences = GensimUtility.get_unfiltered(dataset.upload.path, column.name)
#we iterate over the sentences and find all word occurences and add them to the corresponding dictionary entry
for sentence in sentences:
for i, (word1, word2, _) in enumerate(pairs):
if " " + word1 + " " in sentence:
occurences[i]["word1_occurences"].append(sentence)
if len(occurences[i]["word1_examples"]) < 5:
occurences[i]["word1_examples"].append(escape(sentence).replace(word1, "<strong>" + word1 + "</strong>"))
elif " " + word2 + " " in sentence:
occurences[i]["word2_occurences"].append(sentence)
if len(occurences[i]["word2_examples"]) < 5:
occurences[i]["word2_examples"].append(escape(sentence).replace(word2, "<strong>" + word2 + "</strong>"))
rows = []
#iterate over all word pairs and append occurences as well as ratio to each word pair, build new dataset
for i in occurences:
rows.append([occurences[i]["word1"], occurences[i]["word2"], occurences[i]["rcs"], len(occurences[i]["word1_occurences"]),
len(occurences[i]["word2_occurences"]),
round(len(occurences[i]["word1_occurences"])/len(occurences[i]["word2_occurences"]), 4) if len(occurences[i]["word2_occurences"]) > 0 else None])
#store new dataset in cache
df = pd.DataFrame(rows, columns=["word", "synonym", "rcs", "Word 1 occurences", "Word 2 occurences", "Occurence Ratio"])
cache.set(request.user.username + "_dataset", df)
#display the results
return render(request, 'main/duplicates.html', {"dataset": dataset, "column": column, "matches": occurences, "username": request.user.username})
#view which is called when the user clicks a download button
def download(request):
#authentication
if not request.user.is_authenticated:
return redirect('/user/login')
#get the cache value for this user
csv = cache.get(request.user.username + "_dataset")
#check that the cache file exists and is a pandas dataframe
if (type(csv) == pd.DataFrame):
# return this object as a filestream
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename="results.csv"'
csv.to_csv(response, index=False)
return response
#otherwise return to the analyze page with an error
messages.add_message(request, messages.ERROR, "No file to download was found for this user.")
return redirect('/analyze')
#view which is called when the user requests to download an agent's model file
def download_model(request, id):
#authentication
if not request.user.is_authenticated:
return redirect('/user/login')
agent = safe_get(Agent, id=id)
#make sure agent exists and belongs to this user
if not agent:
messages.add_message(request, messages.ERROR, ERR_UNAUTHORIZED.format("agent"))
return redirect('/agents')
if agent.user != request.user:
messages.add_message(request, messages.ERROR, ERR_UNAUTHORIZED.format("agent"))
return redirect('/agents')
#make sure the agent has a model file
if not agent.model:
messages.add_message(request, messages.ERROR, "Requested agent does not have a model file.")
return redirect('/agents/' + id)
# return the file as a filestream
with open(agent.model.name, 'rb') as f:
response = HttpResponse(f.read())
response['Content-Disposition'] = 'attachment;filename="model"'
return response
#view which is called when the user requests to download an agent's settings file
def download_settings(request, id):
#authentication
if not request.user.is_authenticated:
return redirect('/user/login')
agent = safe_get(Agent, id=id)
#make sure agent exists and belongs to this user
if not agent:
messages.add_message(request, messages.ERROR, ERR_UNAUTHORIZED.format("agent"))
return redirect('/agents')
if agent.user != request.user:
messages.add_message(request, messages.ERROR, ERR_UNAUTHORIZED.format("agent"))
return redirect('/agents')
#make sure the agent has a settings file
if not agent.settings:
messages.add_message(request, messages.ERROR, "Requested agent does not have a settings file.")
return redirect('/agents/' + id)
# return the file as a filestream
with open(agent.settings.name, 'rb') as f:
response = HttpResponse(f.read())
response['Content-Disposition'] = 'attachment;filename="settings"'
return response
| 47.004237
| 169
| 0.669161
|
4a171f5a883bccf544f861c574df6856268acbf4
| 13,089
|
py
|
Python
|
pytype/tools/analyze_project/pytype_runner.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 3,882
|
2015-03-22T12:17:15.000Z
|
2022-03-31T17:13:20.000Z
|
pytype/tools/analyze_project/pytype_runner.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 638
|
2015-11-03T06:34:44.000Z
|
2022-03-31T23:41:48.000Z
|
pytype/tools/analyze_project/pytype_runner.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 301
|
2015-08-14T10:21:17.000Z
|
2022-03-08T11:03:40.000Z
|
"""Use pytype to analyze and infer types for an entire project."""
import logging
import os
import subprocess
import sys
from typing import Iterable, Sequence, Tuple
from pytype import file_utils
from pytype import module_utils
from pytype.tools.analyze_project import config
# Generate a default pyi for builtin and system dependencies.
DEFAULT_PYI = """
from typing import Any
def __getattr__(name) -> Any: ...
"""
class Action:
CHECK = 'check'
INFER = 'infer'
GENERATE_DEFAULT = 'generate default'
class Stage:
SINGLE_PASS = 'single pass'
FIRST_PASS = 'first pass'
SECOND_PASS = 'second pass'
FIRST_PASS_SUFFIX = '-1'
def _get_executable(binary, module=None):
"""Get the path to the executable with the given name."""
if binary == 'pytype-single':
custom_bin = os.path.join('out', 'bin', 'pytype')
if sys.argv[0] == custom_bin:
# The Travis type-check step uses custom binaries in pytype/out/bin/.
return [os.path.join(os.path.abspath(os.path.dirname(custom_bin)),
'pytype-single')]
if sys.executable is not None:
return [sys.executable, '-m', module or binary]
else:
return [binary]
PYTYPE_SINGLE = _get_executable('pytype-single', 'pytype.single')
def resolved_file_to_module(f):
"""Turn an importlab ResolvedFile into a pytype Module."""
full_path = f.path
target = f.short_path
path = full_path[:-len(target)]
name = f.module_name
# We want to preserve __init__ in the module_name for pytype.
if os.path.basename(full_path) == '__init__.py':
name += '.__init__'
return module_utils.Module(
path=path, target=target, name=name, kind=f.__class__.__name__)
def deps_from_import_graph(import_graph):
"""Construct PytypeRunner args from an importlab.ImportGraph instance.
Kept as a separate function so PytypeRunner can be tested independently of
importlab.
Args:
import_graph: An importlab.ImportGraph instance.
Returns:
List of (tuple of source modules, tuple of direct deps) in dependency order.
"""
def get_filenames(node):
if isinstance(node, str):
return (node,)
else:
# Make the build as deterministic as possible to minimize rebuilds.
return tuple(sorted(node.nodes))
def make_module(filename):
return resolved_file_to_module(import_graph.provenance[filename])
modules = []
for node, deps in reversed(import_graph.deps_list()):
files = tuple(
make_module(f) for f in get_filenames(node) if not _is_type_stub(f))
# flatten and dedup
seen = set()
final_deps = []
for dep in deps:
for d in get_filenames(dep):
if d in seen:
continue
seen.add(d)
if not _is_type_stub(d):
final_deps.append(make_module(d))
if files:
modules.append((files, tuple(final_deps)))
return modules
def _is_type_stub(f):
_, ext = os.path.splitext(f)
return ext in ('.pyi', '.pytd')
def _module_to_output_path(mod):
"""Convert a module to an output path."""
path, _ = os.path.splitext(mod.target)
if path.replace(os.path.sep, '.').endswith(mod.name):
# Preferentially use the short path.
return path[-len(mod.name):]
else:
# Fall back to computing the output path from the name, which is a last
# resort because it messes up hidden files. Since such files aren't valid
# python packages anyway, we preserve any leading '.' in order to not
# create a file directly in / (which would likely cause a crash with a
# permission error) and let the rest of the path be mangled.
return mod.name[0] + mod.name[1:].replace('.', os.path.sep)
def get_imports_map(deps, module_to_imports_map, module_to_output):
"""Get a short path -> full path map for the given deps."""
imports_map = {}
for m in deps:
if m in module_to_imports_map:
imports_map.update(module_to_imports_map[m])
imports_map[_module_to_output_path(m)] = module_to_output[m]
return imports_map
class PytypeRunner:
"""Runs pytype over an import graph."""
def __init__(self, conf, sorted_sources):
self.filenames = set(conf.inputs) # files to type-check
# all source modules as a sequence of (module, direct_deps)
self.sorted_sources = sorted_sources
self.python_version = conf.python_version
self.pyi_dir = os.path.join(conf.output, 'pyi')
self.imports_dir = os.path.join(conf.output, 'imports')
self.ninja_file = os.path.join(conf.output, 'build.ninja')
self.custom_options = [
(k, getattr(conf, k)) for k in set(conf.__slots__) - set(config.ITEMS)]
self.keep_going = conf.keep_going
self.jobs = conf.jobs
def set_custom_options(self, flags_with_values, binary_flags):
"""Merge self.custom_options into flags_with_values and binary_flags."""
for dest, value in self.custom_options:
arg_info = config.get_pytype_single_item(dest).arg_info
if arg_info.to_command_line:
value = arg_info.to_command_line(value)
if isinstance(value, bool):
if value:
binary_flags.add(arg_info.flag)
else:
binary_flags.discard(arg_info.flag)
elif value:
flags_with_values[arg_info.flag] = str(value)
def get_pytype_command_for_ninja(self, report_errors):
"""Get the command line for running pytype."""
exe = PYTYPE_SINGLE
flags_with_values = {
'--imports_info': '$imports',
'-V': self.python_version,
'-o': '$out',
'--module-name': '$module',
}
binary_flags = {
'--quick',
'--analyze-annotated' if report_errors else '--no-report-errors',
'--nofail',
}
if report_errors:
self.set_custom_options(flags_with_values, binary_flags)
# Order the flags so that ninja recognizes commands across runs.
return (
exe +
list(sum(sorted(flags_with_values.items()), ())) +
sorted(binary_flags) +
['$in']
)
def make_imports_dir(self):
try:
file_utils.makedirs(self.imports_dir)
except OSError:
logging.error('Could not create imports directory: %s', self.imports_dir)
return False
return True
def write_default_pyi(self):
"""Write a default pyi file."""
output = os.path.join(self.imports_dir, 'default.pyi')
with open(output, 'w') as f:
f.write(DEFAULT_PYI)
return output
def write_imports(self, module_name, imports_map, suffix):
"""Write a .imports file."""
output = os.path.join(self.imports_dir, module_name + '.imports' + suffix)
with open(output, 'w') as f:
for item in imports_map.items():
f.write('%s %s\n' % item)
return output
def get_module_action(self, module):
"""Get the action for the given module.
Args:
module: A module_utils.Module object.
Returns:
An Action object, or None for a non-Python file.
"""
f = module.full_path
# Report errors for files we are analysing directly.
if f in self.filenames:
action = Action.CHECK
report = logging.warning
else:
action = Action.INFER
report = logging.info
# For builtin and system files not in pytype's own pytype_extensions
# library, do not attempt to generate a pyi.
if (not module.name.startswith('pytype_extensions.') and
module.kind in ('Builtin', 'System')):
action = Action.GENERATE_DEFAULT
report('%s: %s module %s', action, module.kind, module.name)
return action
def yield_sorted_modules(self) -> Iterable[
Tuple[module_utils.Module, str, Sequence[module_utils.Module], str]]:
"""Yield modules from our sorted source files."""
for group, deps in self.sorted_sources:
modules = []
for module in group:
action = self.get_module_action(module)
if action:
modules.append((module, action))
if len(modules) == 1:
# TODO(b/73562531): Remove the pytype disable once the bug is fixed.
yield modules[0] + (deps, Stage.SINGLE_PASS) # pytype: disable=bad-return-type
else:
# If we have a cycle we run pytype over the files twice. So that we
# don't fail on missing dependencies, we'll ignore errors the first
# time and add the cycle itself to the dependencies the second time.
second_pass_deps = []
for module, action in modules:
second_pass_deps.append(module)
if action == Action.CHECK:
action = Action.INFER
yield module, action, deps, Stage.FIRST_PASS
deps += tuple(second_pass_deps)
for module, action in modules:
# We don't need to run generate_default twice
if action != Action.GENERATE_DEFAULT:
yield module, action, deps, Stage.SECOND_PASS
def write_ninja_preamble(self):
"""Write out the pytype-single commands that the build will call."""
with open(self.ninja_file, 'w') as f:
for action, report_errors in ((Action.INFER, False),
(Action.CHECK, True)):
command = ' '.join(
self.get_pytype_command_for_ninja(report_errors=report_errors))
logging.info('%s command: %s', action, command)
f.write(
'rule {action}\n'
' command = {command}\n'
' description = {action} $module\n'.format(
action=action, command=command)
)
def write_build_statement(self, module, action, deps, imports, suffix):
"""Write a build statement for the given module.
Args:
module: A module_utils.Module object.
action: An Action object.
deps: The module's dependencies.
imports: An imports file.
suffix: An output file suffix.
Returns:
The expected output of the build statement.
"""
output = os.path.join(self.pyi_dir,
_module_to_output_path(module) + '.pyi' + suffix)
logging.info('%s %s\n imports: %s\n deps: %s\n output: %s',
action, module.name, imports, deps, output)
with open(self.ninja_file, 'a') as f:
f.write('build {output}: {action} {input}{deps}\n'
' imports = {imports}\n'
' module = {module}\n'.format(
output=output,
action=action,
input=module.full_path,
deps=' | ' + ' '.join(deps) if deps else '',
imports=imports,
module=module.name))
return output
def setup_build(self):
"""Write out the full build.ninja file.
Returns:
All files with build statements.
"""
if not self.make_imports_dir():
return set()
default_output = self.write_default_pyi()
self.write_ninja_preamble()
files = set()
module_to_imports_map = {}
module_to_output = {}
for module, action, deps, stage in self.yield_sorted_modules():
if files >= self.filenames:
logging.info('skipped: %s %s (%s)', action, module.name, stage)
continue
if action == Action.GENERATE_DEFAULT:
module_to_output[module] = default_output
continue
if stage == Stage.SINGLE_PASS:
files.add(module.full_path)
suffix = ''
elif stage == Stage.FIRST_PASS:
suffix = FIRST_PASS_SUFFIX
else:
assert stage == Stage.SECOND_PASS
files.add(module.full_path)
suffix = ''
imports_map = module_to_imports_map[module] = get_imports_map(
deps, module_to_imports_map, module_to_output)
imports = self.write_imports(module.name, imports_map, suffix)
# Don't depend on default.pyi, since it's regenerated every time.
deps = tuple(module_to_output[m] for m in deps
if module_to_output[m] != default_output)
module_to_output[module] = self.write_build_statement(
module, action, deps, imports, suffix)
return files
def build(self):
"""Execute the build.ninja file."""
# -k N keep going until N jobs fail (0 means infinity)
# -C DIR change to DIR before doing anything else
# -j N run N jobs in parallel (0 means infinity)
# -v show all command lines while building
k = '0' if self.keep_going else '1'
# relpath() prevents possibly sensitive directory info from appearing in
# ninja's "Entering directory" message.
c = os.path.relpath(os.path.dirname(self.ninja_file))
command = _get_executable('ninja') + [
'-k', k, '-C', c, '-j', str(self.jobs)]
if logging.getLogger().isEnabledFor(logging.INFO):
command.append('-v')
ret = subprocess.call(command)
print('Leaving directory %r' % c)
return ret
def run(self):
"""Run pytype over the project."""
logging.info('------------- Starting pytype run. -------------')
files_to_analyze = self.setup_build()
num_sources = len(self.filenames & files_to_analyze)
print('Analyzing %d sources with %d local dependencies' %
(num_sources, len(files_to_analyze) - num_sources))
ret = self.build()
if not ret:
print('Success: no errors found')
return ret
| 34.904
| 87
| 0.648942
|
4a171fe3d2594c8c5e42f6813346337941f4c980
| 263
|
py
|
Python
|
3.py
|
maruf212000/Python_Assignment_3
|
dfedb06ea5f73475c51467577622cb63f8f3888e
|
[
"MIT"
] | null | null | null |
3.py
|
maruf212000/Python_Assignment_3
|
dfedb06ea5f73475c51467577622cb63f8f3888e
|
[
"MIT"
] | null | null | null |
3.py
|
maruf212000/Python_Assignment_3
|
dfedb06ea5f73475c51467577622cb63f8f3888e
|
[
"MIT"
] | null | null | null |
#GitHub.com/tanujdey7
try:
num1 = int(input("Enter Number 1: "))
num2 = int(input("Enter Number 2: "))
ans = num1/num2
print(ans)
except ValueError:
print("Enter Numeric Value")
except Exception as e:
print("Can't Divide by Zero "+str(e))
| 23.909091
| 41
| 0.642586
|
4a172013014bc7bdf41f2acd48041f76b67b0048
| 679
|
py
|
Python
|
src/users/models.py
|
BikoCodes/movie-api-clone
|
cc7b11c44df041e05f5b47deb29f56542b7a3089
|
[
"MIT"
] | null | null | null |
src/users/models.py
|
BikoCodes/movie-api-clone
|
cc7b11c44df041e05f5b47deb29f56542b7a3089
|
[
"MIT"
] | null | null | null |
src/users/models.py
|
BikoCodes/movie-api-clone
|
cc7b11c44df041e05f5b47deb29f56542b7a3089
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for movie-api."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 29.521739
| 74
| 0.681885
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.