text
stringlengths 2
999k
|
|---|
#!/usr/local/bin/python
#############################################################
# Simple MD of Lennard Jones charged or uncharged particles #
# Alexandre Bonvin, Aalt Jan van Dijk, Utrecht University #
# Updated to a singular script with a modern look by Douwe #
# Schulte, Utrecht University (2022) #
# #
# Adapted from a script from Patrick Fuchs, Uni. Paris VI #
#############################################################
##################
# Import modules #
##################
from math import sqrt,log,sin,cos
from random import random,seed
from enum import Enum
from tkinter import Tk, Canvas, DoubleVar, StringVar
from tkinter.ttk import Label, Button, Style, Frame, Notebook, Entry
import sys
#####################
# Define parameters #
#####################
nAtoms = 20 # Number of atoms
Radius = 25.0 # Beware that Radius must be in a good range (according to nAtoms)
# In order to be able to place all atoms
Mass = 10.0 # Atom mass
Rmin = 2.24 * Radius # Distance at which Rmin is minimal
BoxDim = [500,500] # Box dimension
Atom_Coord = [] # List of the form : [nAtoms][2]
Epsilon = 2 * Radius # Well depth
Dielec = 1.0 # Dielectric constant
qat = 2 * Radius # Atom absolute charge
frac_neg = 0.5 # Fraction negative charges
OverlapFr = 0.0 # Fraction of overlap allowed
CutOff = 250 # Non-bonded cutoff
CutOffSquare = CutOff**2 # Precalculated square
speed = 5 # Canvas update speed
cstboltz = 0.00198722 # Boltzmann's constant in kcal/mol/K
cstboltz = 1000*cstboltz/4.18 # In kJ/mol/K
Seed = 42 # Random number seed
# Steepest Descent parameters
drinit = 1.00 # dr from EM
drmin = 0.00001 # Minimum dr value to step EM
drmax = 5.00 # Maximum dr
alpha = 1.05 # Scaling factor for dr if Enew < Eold
beta = 0.90 # Scaling factor for dr if Enew > Eold
deltaE = 0.001 # Energy difference threshold to stop EM
normFmin = 0.001 # Minimum force norm to step EM
# Verlet parameters
Temperature = 300.0 # Temperature in K
timestep = 5.0E-3 # MD time step
# Set specific behaviour for practical session 4
CalculateEnergyPeriodic = True # Practical #4 part 1
ShowOtherEnergyCutoffResults = False # Practical #4 part 2
# Additional program specific parameters
Minimizers = Enum("Minimisers", "SteepestDescent Verlet")
Minimizer = Minimizers.Verlet
drstep = drinit
Iterations = 0
canvas_event = None
Color = []
ATOM = []
##############################
# Steepest descent minimizer #
##############################
def steepest_descent(atom_coord,drstep,forces):
"""
This function gets as input parameters:
- atom_coord, a vector containing the x and y position and the charge of the i atoms
- drstep, the displacement for the minimizer
- force, a vector containing the x and y components of the force on the atoms
The function returns a list array (vector containing the new positions)
Implement in the following loop over all atoms the using the steepest descent algorithm
A few hints:
- powers in python are given by **, e.g.: x to the square is x**2
- squared root x: sqrt(x)
- avoid dividing by zero
"""
new_positions=[]
# 1) First calculate the norm of the total force vector
normf = 0.0
for force in forces:
normf=normf+force[0]**2.0+force[1]**2.0
normf=sqrt(normf)
if normf < 0: return atom_coord, normf
# 2) Then move the particles
for (coord, force) in zip(atom_coord, forces):
r0x=coord[0] # Coordinates
r0y=coord[1]
# Insert below the lines defining the new coordinates based on the old ones + forces + drstep.
#
# Forces are contained in force[0] for the x force component and force[1] for the y force.
# The step size for the move is given by drstep.
#
# ====>>>>>
sx=force[0]/normf
sy=force[1]/normf
r0xnew=r0x+drstep*sx
r0ynew=r0y+drstep*sy
# <<<<<====
new_positions.append([r0xnew,r0ynew,coord[2]])
return new_positions,normf
#####################
# Verlet integrator #
#####################
def verlet(atom_coord,forces,dtstep,old_atom_coord,mass):
"""
This function gets as input parameters:
- `atom_coord`, a vector containing the x and y position and the charge of the i atoms
- `old_atom_coord`, a vector containing the x and y positions from the previous MD step
- `forces`, a vector containing the x and y components of the force on the atoms
- `dtstep`, the integration time step
The function returns a list containing the new positions.
Implement in the following loop between the arrows the Verlet MD algorithm.
A few hints:
- Powers in python are given by **, e.g.: x to the square is `x**2`
- Squared root x: `sqrt(x)`
- Indents are important in python
"""
new_positions=[]
for coord,old_coord,force in zip(atom_coord, old_atom_coord, forces):
r0x=coord[0] # Coordinates
r0y=coord[1]
old_r0x=old_coord[0] # Old coordinates
old_r0y=old_coord[1]
# Insert below the lines defining the new x and y positions based on the old ones + forces + mass + dtstep.
#
# Forces are contained in force[0] for the x force component and force[1] for the y force.
# The step size for the move is given by dtstep.
#
# ====>>>>>
new_r0x = 2*r0x - old_r0x + force[0]/mass * dtstep**2
new_r0y = 2*r0y - old_r0y + force[1]/mass * dtstep**2
# <<<<<====
new_positions.append([new_r0x,new_r0y,coord[2]])
return new_positions
def verlet_step1(atom_coord,velocity,forces,dtstep,mass):
"""The first step for Verlet"""
global Ene,EneLJ,EneCoul,ELJ2,ELJ4
new_positions=[]
for coord, vel, force in zip(atom_coord, velocity, forces):
r0x=coord[0]+dtstep*vel[0]+0.5*dtstep**2*force[0]/mass
r0y=coord[1]+dtstep*vel[1]+0.5*dtstep**2*force[1]/mass
new_positions.append([r0x,r0y,coord[2]])
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
return new_positions
def calculate_velocities(old_atom_coord,atom_coord,dtstep):
"""Calculate velocities based on old and new positions"""
velocities=[]
for coord, old_coord in zip(atom_coord, old_atom_coord):
v0x=(coord[0]-old_coord[0])/(2*dtstep)
v0y=(coord[1]-old_coord[1])/(2*dtstep)
velocities.append([v0x,v0y])
return velocities
##########################
# Move particles with MD #
##########################
def simulate():
"""Execute the simulation"""
global Atom_Coord,Radius,Mass,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene,Old_Atom_Coord
global Velocity,timestep,report_var_total,report_var_subenergies, drstep, Ene_prev
global Color,report_var_time,Dielec,root,atom_canvas,speed,canvas_event
Force = calculate_force(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
tmp=Atom_Coord
if Minimizer == Minimizers.SteepestDescent:
if Iterations == 0: Ene_prev=Ene
Atom_Coord, normF=steepest_descent(Atom_Coord,drstep,Force)
if Minimizer == Minimizers.Verlet:
if Iterations == 0:
Old_Atom_Coord=Atom_Coord
Atom_Coord=verlet_step1(Atom_Coord,Velocity,Force,timestep,Mass)
Atom_Coord=verlet(Atom_Coord,Force,timestep,Old_Atom_Coord,Mass)
Velocity=calculate_velocities(Old_Atom_Coord,Atom_Coord,timestep)
Old_Atom_Coord=tmp
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
# Update drstep
if Minimizer == Minimizers.SteepestDescent:
if Ene < Ene_prev:
drstep = min(drmax, drstep * alpha)
else:
drstep = drstep * beta
# Update top labels
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
# Apply boundary conditions
for coord, old_coord in zip(Atom_Coord, Old_Atom_Coord):
for i in range(2): # i=0 -> case x coordinate ; i=1 -> case y coordinate
if coord[i] < 0:
coord[i] += BoxDim[i]
old_coord[i] += BoxDim[i]
if coord[i] > BoxDim[i]:
coord[i] -= BoxDim[i]
old_coord[i] -= BoxDim[i]
# Draw new canvas coordinates
for atom, coord in zip(ATOM, Atom_Coord):
x, y = coord[0], coord[1]
atom_canvas.coords(atom, x + Radius, y + Radius, x - Radius, y - Radius)
# Print to terminal window
if Iterations % 20 == 0:
if ShowOtherEnergyCutoffResults:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Elj2: %6.1f Elj4: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
print("Step: %4d Time: %8.3f Etot: %6.1f Ekin: %6.1f Epot: %6.1f Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (Iterations,float(Iterations)*timestep,Ene+Kin,Kin,Ene,EneLJ,EneCoul,temperature))
# Stopping conditions
if Minimizer == Minimizers.SteepestDescent and (abs(Ene - Ene_prev) < deltaE or drstep < drmin or normF < normFmin):
print("STOPPING... deltaE <",deltaE,", or drstep <",drmin,", or normF <",normFmin)
outtext="Step: %4d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f deltaE: %10.6f <normF>: %8.6f dr: %8.6f" % (Iterations,Ene,EneLJ,EneCoul,Ene - Ene_prev,normF,drstep)
print(outtext)
elif temperature > 1000000:
print("The system is exploding !!!")
print("Step: %4d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
print("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
print("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
print("Emergency stop")
else:
Ene_prev=Ene
Iterations=Iterations+1
canvas_event=atom_canvas.after(speed,simulate)
####################
# Energy functions #
####################
# Calculate Lennard Jones from the squared distance
def LJ2(r2, epsilon, sigma6):
# Uncomment the following lines to get a more obvious mathematical implementation
# r = sqrt(r2)
# sigma = sigma6**(1/6)
# return epsilon*((sigma/r)**12 - (sigma/r)**6)
# The following implementation is significantly faster so this is the default
Z = (1/r2)**3 * sigma6
return epsilon * Z * (Z-1)
# Classical Coulomb from the squared distance
def Coulomb2(r,dielec,qa,qb):
return qa*qb/(dielec*sqrt(r))
# Calculate energy Evdw + Ecoulomb (used squared distance)
def calculate_energy(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
global CalculateEnergyPeriodic
cutoff2=2.0*rmin; cutoff2sq=cutoff2**2
cutoff4=4.0*rmin; cutoff4sq=cutoff4**2
Ene = 0.0; distsquare = 0
ELJ = 0.0; ECoul=0.0
ELJ2 = 0.0; ELJ4 = 0.0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
if CalculateEnergyPeriodic:
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
vdw = LJ2(distsquare, epsilon, rmin_exp6)
Ene += vdw
ELJ += vdw
if elec:
CC = Coulomb2(distsquare,dielec,qa,qb)
Ene+=CC
ECoul+=CC
if distsquare < cutoff4sq:
ELJ4 += vdw
if distsquare < cutoff2sq:
ELJ2 += vdw
return Ene,ELJ,ECoul,ELJ2,ELJ4
# Calculate kinetic energy and temperature
def calculate_temperature(vel,nat,k,mass):
v2=0.0
for velocity in vel:
v2=v2+velocity[0]**2+velocity[1]**2
nkt=0.5*mass*v2 # Kinetic energy equals 0.5*m*v**2
kin=v2*0.5*mass
temp=nkt/(nat*k) # N*k*T=Kinetic Energy
return kin,temp
###################
# Force functions #
###################
# Force LJ (use squared distance)
def calculate_lennard_jones(distsquare, epsilon, rmin_exp6,xi):
rij=sqrt(distsquare)
Z = (1/distsquare)**3 * rmin_exp6
dedz=epsilon*(2*Z-1)
dzdr=rmin_exp6*(-6.0/rij**(7.0))
drdx=xi/rij
return dedz*dzdr*drdx
# Force Coulomb (use squared distance)
def calculate_coulomb(distsquare,dielec,qa,qb,xi):
rij=sqrt(distsquare)
dedr=-1.0*(qa*qb/dielec)*(1/distsquare)
drdx=xi/rij
return dedr*drdx
# Calculate force from Evdw + Ecoulomb (uses squared distance)
def calculate_force(coord,epsilon,rmin,dielec,cutoffsquare,boxdim):
Force=[] ; distsquare = 0
rmin_exp6 = rmin**6
# Doubly nested loop over all particle pairs
for i in range(len(coord)):
tmpforce=[0.0,0.0]
for j in range(len(coord)):
if not i==j:
# Calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# Chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# Compute vdw force
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
for k in range(2):
tmp = coord[j][k] - coord[i][k]
ff = calculate_lennard_jones(distsquare, epsilon, rmin_exp6,tmp)
ff += calculate_coulomb(distsquare,dielec,qa,qb,tmp)
tmpforce[k]+=ff
Force.append(tmpforce)
return Force
###################
# Other functions #
###################
# Normal Distance
def dist(A,B):
return sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
# Change sign
def SignR(a,b):
if b > 0:
return a
else:
return -a
# Color particules based on charge
def charge_color(charge,qat):
if charge == qat:
return "white"
else:
return "#333333"
##################
# Initialization #
##################
# Generate random coordinates
def InitConf(n,dim,radius,qat,frac_neg):
global Seed
seed(Seed)
print("Initializing box, please wait...", end='')
tmp_coord = []
ntrial = 0
i = 1
# Fix first atom
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
tmp_coord.append([x,y,charge])
for negative in [-1, 1]:
while negative == -1 and i < nneg or negative == 1 and i < n:
x = random()*(dim[0]-2*radius)+radius
y = random()*(dim[1]-2*radius)+radius
# Check wether the new particle overlaps with an existing one
OVERLAP = False
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = True
if not OVERLAP:
tmp_coord.append([x,y,negative * qat])
i += 1
ntrial = ntrial + 1
if ntrial > 100000:
print('error')
print("Initialisation failed")
print("==> Reduce radius or number of atoms")
sys.exit()
print("done")
return tmp_coord
# Generate random charges
def InitCharge(n,qat,frac_neg):
global Atom_Coord
print("Initializing charges, please wait...", end='')
i = 0
nneg = int(float(n) * frac_neg)
charge = -qat
if nneg == 0: charge = qat
Atom_Coord[i][2]=charge
i += 1
while i < nneg:
Atom_Coord[i][2]=-qat
i += 1
while i < n:
Atom_Coord[i][2]=qat
i += 1
print("done")
# Generates initial velocities according to Maxwell distribution
def InitVel(n,temperature,cstboltz,mass):
global Seed
seed(Seed)
stdev=sqrt(cstboltz*temperature/mass)
print("Initializing velocities, please wait...", end='')
tmp_vel=[]
for i in range(n):
# Generate random numbers according to Gaussian:
r1=random()
r2=random()
x1=sqrt(-2.0*log(r1))*cos(r2)*stdev
x2=sqrt(-2.0*log(r1))*sin(0.5*r2)*stdev
tmp_vel.append([x1,x2])
# Remove overall motion
vxt=0.0
vyt=0.0
for item in tmp_vel:
vxt+=item[0]
vyt+=item[1]
for item in tmp_vel:
item[0] -= vxt/float(n)
item[1] -= vyt/float(n)
# Scaling factor is used to get temperature exactly equal to desired temperature
kin,tt=calculate_temperature(tmp_vel,n,cstboltz,mass)
scaling=sqrt(temperature/tt)
vel=[]
for item in tmp_vel:
vx=item[0]*scaling
vy=item[1]*scaling
vel.append([vx,vy])
print("done")
return vel
########################################
# Various functions for input + layout #
########################################
# Setup system
def set_up_atoms(repack=1):
global Iterations,Velocity,Temperature,Mass,cstboltz,atom_canvas,ATOM,Atom_Coord,Color
ATOM = []
if repack==1:
Atom_Coord = InitConf(nAtoms,BoxDim,Radius,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
if repack==2:
InitCharge(nAtoms,qat,frac_neg)
Color = []
for i in range(nAtoms):
Color.append(charge_color(Atom_Coord[i][2],qat))
for (color, atom) in zip(Color, Atom_Coord):
x, y = atom[0], atom[1]
ATOM.append(atom_canvas.create_oval(x + Radius,y + Radius,x - Radius,y - Radius,fill=color))
update_energy()
# Set number of particles
def set_r(event):
global nAtoms
nAtoms=int(r.get())
update_canvas()
# Set atom Radius
def set_size(event):
global Radius,Rmin
Radius=int(size.get())
Rmin = 2 * Radius
update_canvas()
# Set epsilon for Lennard-Jones
def set_vdw1(event):
global Epsilon
Epsilon=int(vdw1.get())
update_canvas(0)
# Set sigma for Lennard-Jones
def set_vdw2(event):
global Rmin
Rmin=int(vdw2.get())
update_canvas(0)
# Set charge fraction
def set_frac(event):
global frac_neg
frac_neg=float(frac.get())
update_canvas(2)
# Set particle charge
def set_q(event):
global qat
qat=float(q.get())
update_canvas(2)
# Set dielectric constant
def set_diel(event):
global Dielec
Dielec=float(diel.get())
update_canvas(0)
# Set Temperature
def set_temp(event):
global Temperature
Temperature=float(temp.get())
update_canvas(0)
def set_tstep(event):
global timestep,Velocity,nAtoms,Temperature,cstboltz,Mass
timestep=float(tstep.get())
update_canvas(0)
Velocity=InitVel(nAtoms,Temperature,cstboltz,Mass)
# Set minimum Force norm difference for stop condition
def set_dFmin(event):
global normFmin
normFmin=float(Fmin.get())
update_canvas(0)
# Set minimum Energy difference for stop condition
def set_deltaE(event):
global deltaE
deltaE=float(Emin.get())
update_canvas(0)
# Set initial displacement for minimizer
def set_dxstep(event):
global drinit
drinit=float(dxstep.get())
update_canvas(0)
# Set alpha factor for increasing dr
def set_alpha(event):
global alpha
alpha=float(alphafactor.get())
update_canvas(0)
# Set beta factor for decreasing dr
def set_beta(event):
global beta
beta=float(betafactor.get())
update_canvas(0)
# Update energy
def update_energy():
global Atom_Coord,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene
global Dielec,Velocity,Mass,cstboltz
Ene,EneLJ,EneCoul,ELJ2,ELJ4 = calculate_energy(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Kin,temperature=calculate_temperature(Velocity,nAtoms,cstboltz,Mass)
report_var_time.set("Step: %d Time: %8.3f" % (Iterations,float(Iterations)*timestep))
report_var_total.set("Etot: %6.1f Ekin: %6.1f Epot: %6.1f" % (Ene+Kin,Kin,Ene))
if ShowOtherEnergyCutoffResults:
report_var_subenergies.set("Elj: %6.2f Elj2: %6.2f Elj4: %6.2f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,ELJ2,ELJ4,EneCoul,temperature))
else:
report_var_subenergies.set("Elj: %6.1f Ecoul: %6.1f Temp: %6.1f" % (EneLJ,EneCoul,temperature))
def update_canvas(repack=1):
global Iterations, atom_canvas, Atom_Coord, ATOM, Color
atom_canvas.delete("all")
set_up_atoms(repack)
update_energy()
Iterations = 0
################
# MAIN PROGRAM #
################
def die():
sys.exit()
def select_minimizer(*args):
global Minimizer, minimizer_selector
reset()
if minimizer_selector.index('current') == 0: # First tab is Steepest Descent
Minimizer = Minimizers.SteepestDescent
selected_method_text.set("Active method: Steepest Descent")
else:
Minimizer = Minimizers.Verlet
selected_method_text.set("Active method: Verlet")
def start():
global canvas_event
if canvas_event==None:
simulate()
def stop():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas(0)
def reset():
global canvas_event
if canvas_event != None: atom_canvas.after_cancel(canvas_event)
canvas_event = None
update_canvas()
root = Tk()
root.winfo_toplevel().title("MolMod Practical")
root.bind("<Escape>", die)
root.bind('<Control-c>', die)
top=Frame(root)
top.pack(side='top')
title=Frame(top)
title.pack(side='top')
labels=Frame(top)
labels.pack(side='top')
buttons=Frame(top)
buttons.pack(side='bottom')
atom_canvas=Canvas(root, width=BoxDim[0], height=BoxDim[1],bg="#ccddff")
atom_canvas.pack()
minimizer_selector=Notebook(root)
minimizer_selector.pack(side='bottom')
steepest_descent_pack=Frame(minimizer_selector)
steepest_descent_pack.pack(side='top')
verlet_pack=Frame(minimizer_selector)
verlet_pack.pack(side='top')
Style().configure("Notebook", foreground="black")
minimizer_selector.add(steepest_descent_pack, text="Steepest Descent")
minimizer_selector.add(verlet_pack, text="Verlet")
minimizer_selector.bind("<<NotebookTabChanged>>", select_minimizer)
minimizer_selector.select(1)
selected_method=Frame(root)
selected_method.pack(side='bottom')
low2=Frame(root)
low2.pack(side='bottom')
low1=Frame(root)
low1.pack(side='bottom')
r=DoubleVar()
size=DoubleVar()
vdw1=DoubleVar()
vdw2=DoubleVar()
frac=DoubleVar()
diel=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
tstep=DoubleVar()
Emin=DoubleVar()
Fmin=DoubleVar()
alphafactor=DoubleVar()
betafactor=DoubleVar()
q=DoubleVar()
temp=DoubleVar()
dxstep=DoubleVar()
# Create an entry with a label
def create_entry(pack, text, var, bound_var, callback):
Label(pack,text=text + " =").pack(side='left')
var.set(bound_var)
tstep_entry=Entry(pack,width=6,textvariable=var)
tstep_entry.pack(side='left')
tstep_entry.bind('<Return>', callback)
tstep_entry.bind('<FocusOut>', callback)
# Set up the general parameters
create_entry(low1, "Atoms", r, nAtoms, set_r)
create_entry(low1, "VDW radius", size, Radius, set_size)
create_entry(low1, "VDW ε", vdw1, Epsilon, set_vdw1)
create_entry(low1, "VDW σ", vdw2, Rmin, set_vdw2)
create_entry(low2, "Coulomb param: fraction negative", frac, frac_neg, set_frac)
create_entry(low2, "Charge", q, qat, set_q)
create_entry(low2, "Dielec", diel, Dielec, set_diel)
# Steepest Descent Paramaters
create_entry(steepest_descent_pack, "DeltaE threshold", Emin, deltaE, set_deltaE)
create_entry(steepest_descent_pack, "dFmin", Fmin, normFmin, set_dFmin)
create_entry(steepest_descent_pack, "dr init", dxstep, drinit, set_dxstep)
create_entry(steepest_descent_pack, "α", alphafactor, alpha, set_alpha)
create_entry(steepest_descent_pack, "β", betafactor, beta, set_beta)
# Verlet Parameters
create_entry(verlet_pack, "T (K)", temp, Temperature, set_temp)
create_entry(verlet_pack, "Timestep", tstep, timestep, set_tstep)
# Set up title
Label(title,text="EM & MD",foreground='blue',font='times 18 bold').pack(side='left')
# Set up reporting labels
report_var_time = StringVar()
Label(labels,textvariable=report_var_time).pack(side='top')
report_var_total = StringVar()
Label(labels,textvariable=report_var_total).pack(side='top')
report_var_subenergies = StringVar()
Label(labels,textvariable=report_var_subenergies).pack(side='top')
selected_method_text = StringVar()
Label(selected_method,textvariable=selected_method_text).pack(side='top')
# Set up buttons
Style().configure("TButton", padding=1, relief="flat")
Style().configure("Start.TButton", foreground='blue')
Style().configure("Stop.TButton", foreground='red')
Style().configure("Reset.TButton", foreground='green')
Button(buttons,text='Start',command=start,style="Start.TButton").pack(side='left',fill='x')
Button(buttons,text='Stop',command=stop,style="Stop.TButton").pack(side='left')
Button(buttons,text='Reset',command=reset,style="Reset.TButton").pack(side='left')
# Set up the positions of the atoms and start the simulation
set_up_atoms()
print("Click on 'Start' to go ahead")
print("Use <ESC> or 'X' to quit")
root.mainloop()
|
import os
from setuptools import setup, find_packages
version = '0.2.5.2'
description = "A command-line interface to the GitHub Issues API v2."
cur_dir = os.path.dirname(__file__)
try:
long_description = open(os.path.join(cur_dir, 'README.rst')).read()
except:
long_description = description
setup(
name = "github-cli",
version = version,
url = 'http://jsmits.github.com/github-cli',
license = 'BSD',
description = description,
long_description = long_description,
author = 'Sander Smits',
author_email = 'jhmsmits@gmail.com',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'simplejson'],
entry_points="""
[console_scripts]
ghi = github.issues:main
""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Bug Tracking',
],
test_suite = 'nose.collector',
)
|
# Generated by Django 3.1.4 on 2021-01-04 04:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ordermodel',
name='is_shipped',
field=models.BooleanField(default=False),
),
]
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dftfe(CMakePackage):
"""Real-space DFT calculations using Finite Elements"""
homepage = "https://sites.google.com/umich.edu/dftfe/"
url = "https://github.com/dftfeDevelopers/dftfe/archive/0.5.1.tar.gz"
maintainers = ['rmsds']
version('0.6.0', sha256='66b633a3aae2f557f241ee45b2faa41aa179e4a0bdf39c4ae2e679a2970845a1')
version('0.5.2', sha256='9dc4fa9f16b00be6fb1890d8af4a1cd3e4a2f06a2539df999671a09f3d26ec64')
version('0.5.1', sha256='e47272d3783cf675dcd8bc31da07765695164110bfebbbab29f5815531f148c1')
version('0.5.0', sha256='9aadb9a9b059f98f88c7756b417423dc67d02f1cdd2ed7472ba395fcfafc6dcb')
variant('scalapack', default=True, description='Use ScaLAPACK, strongly recommended for problem sizes >5000 electrons')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
depends_on('mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi')
depends_on('dealii+p4est+petsc+slepc+int64+scalapack+mpi@9.0.0:', when='@0.5.1:')
depends_on('scalapack', when='+scalapack')
depends_on('alglib')
depends_on('libxc')
depends_on('spglib')
depends_on('libxml2')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER={0}'.format(spec['mpi'].mpicc),
'-DCMAKE_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx),
'-DALGLIB_DIR={0}'.format(spec['alglib'].prefix),
'-DLIBXC_DIR={0}'.format(spec['libxc'].prefix),
'-DXML_LIB_DIR={0}/lib'.format(spec['libxml2'].prefix),
'-DXML_INCLUDE_DIR={0}/include'.format(spec['libxml2'].prefix),
'-DSPGLIB_DIR={0}'.format(spec['spglib'].prefix),
]
if spec.satisfies('^intel-mkl'):
args.append('-DWITH_INTEL_MKL=ON')
else:
args.append('-DWITH_INTEL_MKL=OFF')
if spec.satisfies('%gcc'):
args.append('-DCMAKE_C_FLAGS=-fpermissive')
args.append('-DCMAKE_CXX_FLAGS=-fpermissive')
return args
@when('@:0.5.2')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib64)
install(join_path(self.build_directory, 'main'),
join_path(prefix.bin, 'dftfe'))
install(join_path(self.build_directory, 'libdftfe.so'),
prefix.lib64)
|
#! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright 2000-2001, GMD, Sankt Augustin
# -- German National Research Center for Information Technology
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
import string
from dvbobjects.utils import *
from dvbobjects.DSMCC.Message import *
import dvbobjects.DSMCC.BIOP
from Loops import *
######################################################################
class SuperGroup(DownloadServerInitiate):
def __init__(self, PATH, transactionId, version):
self.groups = []
self.PATH = PATH
self.transactionId = transactionId
self.version_number = version
def addGroup(self, group):
self.groups.append(group)
def generate(self, output_dir, SRG_IOR = None):
for group in self.groups:
group.generate(output_dir)
if SRG_IOR <> None:
self.privateData = dvbobjects.DSMCC.BIOP.ServiceGatewayInfo(
srg_ior = SRG_IOR,
)
else:
self.privateData = GroupInfoIndication(self)
sec_file = open("%s/DSI.sec" % output_dir, "wb")
sec_file.write(self.pack())
sec_file.close()
######################################################################
class Group(DownloadInfoIndication):
def __init__(self, PATH, transactionId, version, downloadId, blockSize):
self.modules = []
self.descriptors = []
self.PATH = PATH
self.downloadId = downloadId
self.transactionId = transactionId
self.version_number = version
self.blockSize = blockSize
def addModule(self, mod):
self.modules.append(mod)
def getGroupInfo(self):
return GroupInfo(
groupId = self.transactionId,
groupSize = self.groupSize,
groupCompatibility = self.compatibilityDescriptor,
descriptors = self.descriptors,
)
def generate(self, output_dir):
self.groupSize = 0
for module in self.modules:
module.downloadId = self.downloadId
module.generate(
output_dir,
self.transactionId,
self.blockSize,
)
self.groupSize = self.groupSize + module.moduleSize
self.__mii = mii = ModuleInfoIndication(self)
self.moduleInfoIndication = mii
path = os.path.basename(self.PATH)
stem = os.path.splitext(path)[0]
sec_file = open("%s/%s.sec" % (output_dir, stem), "wb")
sec_file.write(self.pack())
sec_file.close()
######################################################################
class Module(DVBobject):
assocTag = None # i.e. NO Object Carousel
descriptors = []
def generate(self, output_dir, transactionId, blockSize):
lastBlockNumber = -1
self.moduleSize = os.stat(self.INPUT)[6]
last_section_number = ((self.moduleSize - 1) / blockSize) % 256
tmp = ((((self.moduleSize - 1) / blockSize) / 256) * 256) - 1
input = open(self.INPUT, "rb")
while 1:
data = input.read(blockSize)
if not data:
break
lastBlockNumber = lastBlockNumber + 1
if tmp >= lastBlockNumber:
block = DownloadDataBlock(
moduleId = self.moduleId,
moduleVersion = self.moduleVersion,
downloadId = self.downloadId,
data_block = data,
blockNumber = lastBlockNumber,
section_number = lastBlockNumber % 256,
last_section_number = 0xFF,
)
else:
block = DownloadDataBlock(
moduleId = self.moduleId,
moduleVersion = self.moduleVersion,
downloadId = self.downloadId,
data_block = data,
blockNumber = lastBlockNumber,
section_number = lastBlockNumber % 256,
last_section_number = last_section_number,
)
ofn = self.__outputFilename(
self.INPUT,
output_dir,
block.blockNumber)
open(ofn, "wb").write(block.pack())
def __outputFilename(self, input_path, output_dir, blockNumber):
input_basename = os.path.basename(input_path)
input_stem = os.path.splitext(input_basename)[0]
output_basename = "%s_%06d.%s" % (
input_stem,
blockNumber,
"sec",
)
output_path = os.path.join(
output_dir, output_basename)
return output_path
def getModuleInfo(self):
if self.assocTag <> None:
moduleInfo = dvbobjects.DSMCC.BIOP.ModuleInfo(
assocTag = self.assocTag,
userInfo = self.descriptors,
)
else:
moduleInfo = ""
modInfo = ModuleInfo(
moduleId = self.moduleId,
moduleSize = self.moduleSize,
moduleVersion = self.moduleVersion,
descriptors = self.descriptors,
moduleInfo = moduleInfo,
)
return modInfo
|
"""
Copyright 2019-present NAVER Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#-*- coding: utf-8 -*-
def load_label(label_path):
char2index = dict() # [ch] = id
index2char = dict() # [id] = ch
with open(label_path, 'r') as f: # 파일을 열면 닫아주는게 좋다. with문은 다음과 같은 역할을 한다.
for no, line in enumerate(f):
if line[0] == '#':
continue
index, char, freq = line.strip().split('\t')
char = char.strip()
if len(char) == 0:
char = ' '
char2index[char] = int(index)
index2char[int(index)] = char
return char2index, index2char
|
import os
from unittest import TestCase
from django.template import Engine
from .utils import TEMPLATE_DIR
class OriginTestCase(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template('index.html')
b = self.engine.get_template('index.html')
self.assertEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, True)
self.assertIs(a.origin != b.origin, False)
def test_origin_compares_not_equal(self):
a = self.engine.get_template('first/test.html')
b = self.engine.get_template('second/test.html')
self.assertNotEqual(a.origin, b.origin)
# Use assertIs() to test __eq__/__ne__.
self.assertIs(a.origin == b.origin, False)
self.assertIs(a.origin != b.origin, True)
def test_repr(self):
a = self.engine.get_template('index.html')
name = os.path.join(TEMPLATE_DIR, 'index.html')
self.assertEqual(repr(a.origin), '<Origin name=%r>' % name)
|
print("NAME: Maurya Sharma \nE-MAIL: mauryasharma2001@gmail.com \nSLACK USERNAME: @Maurya \nBIOSTACK: Drug Development \nTwitter Handle: @Maurya")
def hamming_distance(a,b):
count=0
for i in range(len(a)):
if a[i] != b[i]:
count +=1
return count
print(hamming_distance('@Maurya','@Maurya'))
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json, datetime
from frappe import _, scrub
import frappe.desk.query_report
from frappe.utils import cint
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.modules import make_boilerplate
from frappe.core.doctype.page.page import delete_custom_role
from frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles
from frappe.desk.reportview import append_totals_row
from six import iteritems
from frappe.utils.safe_exec import safe_exec
class Report(Document):
def validate(self):
"""only administrator can save standard report"""
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "No":
# allow only script manager to edit scripts
if frappe.session.user!="Administrator":
frappe.only_for('Script Manager', True)
if frappe.db.get_value("Report", self.name, "is_standard") == "Yes":
frappe.throw(_("Cannot edit a standard report. Please duplicate and create a new report"))
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.throw(_("Only Administrator can save a standard report. Please rename and save."))
if self.report_type == "Report Builder":
self.update_report_json()
def before_insert(self):
self.set_doctype_roles()
def on_update(self):
self.export_doc()
def on_trash(self):
delete_custom_role('report', self.name)
def set_doctype_roles(self):
if not self.get('roles') and self.is_standard == 'No':
meta = frappe.get_meta(self.ref_doctype)
roles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]
self.set('roles', roles)
def is_permitted(self):
"""Returns true if Has Role is not set or the user is allowed."""
from frappe.utils import has_common
allowed = [d.role for d in frappe.get_all("Has Role", fields=["role"],
filters={"parent": self.name})]
custom_roles = get_custom_allowed_roles('report', self.name)
allowed.extend(custom_roles)
if not allowed:
return True
if has_common(frappe.get_roles(), allowed):
return True
def update_report_json(self):
if not self.json:
self.json = '{}'
def export_doc(self):
if frappe.flags.in_import:
return
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module, create_init=True)
self.create_report_py()
def create_report_py(self):
if self.report_type == "Script Report":
make_boilerplate("controller.py", self, {"name": self.name})
make_boilerplate("controller.js", self, {"name": self.name})
def execute_script_report(self, filters):
# save the timestamp to automatically set to prepared
threshold = 30
res = []
start_time = datetime.datetime.now()
# The JOB
if self.is_standard == 'Yes':
res = self.execute_module(filters)
else:
res = self.execute_script(filters)
# automatically set as prepared
execution_time = (datetime.datetime.now() - start_time).total_seconds()
if execution_time > threshold and not self.prepared_report:
self.db_set('prepared_report', 1)
frappe.cache().hset('report_execution_time', self.name, execution_time)
return res
def execute_module(self, filters):
# report in python module
module = self.module or frappe.db.get_value("DocType", self.ref_doctype, "module")
method_name = get_report_module_dotted_path(module, self.name) + ".execute"
return frappe.get_attr(method_name)(frappe._dict(filters))
def execute_script(self, filters):
# server script
loc = {"filters": frappe._dict(filters), 'data':[]}
safe_exec(self.report_script, None, loc)
return loc['data']
def get_data(self, filters=None, limit=None, user=None, as_dict=False):
columns = []
out = []
if self.report_type in ('Query Report', 'Script Report', 'Custom Report'):
# query and script reports
data = frappe.desk.query_report.run(self.name, filters=filters, user=user)
for d in data.get('columns'):
if isinstance(d, dict):
col = frappe._dict(d)
if not col.fieldname:
col.fieldname = col.label
columns.append(col)
else:
fieldtype, options = "Data", None
parts = d.split(':')
if len(parts) > 1:
if parts[1]:
fieldtype, options = parts[1], None
if fieldtype and '/' in fieldtype:
fieldtype, options = fieldtype.split('/')
columns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0], options=options))
out += data.get('result')
else:
# standard report
params = json.loads(self.json)
if params.get('fields'):
columns = params.get('fields')
elif params.get('columns'):
columns = params.get('columns')
elif params.get('fields'):
columns = params.get('fields')
else:
columns = [['name', self.ref_doctype]]
for df in frappe.get_meta(self.ref_doctype).fields:
if df.in_list_view:
columns.append([df.fieldname, self.ref_doctype])
_filters = params.get('filters') or []
if filters:
for key, value in iteritems(filters):
condition, _value = '=', value
if isinstance(value, (list, tuple)):
condition, _value = value
_filters.append([key, condition, _value])
def _format(parts):
# sort by is saved as DocType.fieldname, covert it to sql
return '`tab{0}`.`{1}`'.format(*parts)
if params.get('sort_by'):
order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
elif params.get('order_by'):
order_by = params.get('order_by')
else:
order_by = _format([self.ref_doctype, 'modified']) + ' desc'
if params.get('sort_by_next'):
order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
result = frappe.get_list(self.ref_doctype,
fields = [_format([c[1], c[0]]) for c in columns],
filters=_filters,
order_by = order_by,
as_list=True,
limit=limit,
user=user)
_columns = []
for (fieldname, doctype) in columns:
meta = frappe.get_meta(doctype)
if meta.get_field(fieldname):
field = meta.get_field(fieldname)
else:
field = frappe._dict(fieldname=fieldname, label=meta.get_label(fieldname))
# since name is the primary key for a document, it will always be a Link datatype
if fieldname == "name":
field.fieldtype = "Link"
field.options = doctype
_columns.append(field)
columns = _columns
out = out + [list(d) for d in result]
if params.get('add_totals_row'):
out = append_totals_row(out)
if as_dict:
data = []
for row in out:
if isinstance(row, (list, tuple)):
_row = frappe._dict()
for i, val in enumerate(row):
_row[columns[i].get('fieldname')] = val
elif isinstance(row, dict):
# no need to convert from dict to dict
_row = frappe._dict(row)
data.append(_row)
else:
data = out
return columns, data
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable))
@frappe.whitelist()
def is_prepared_report_disabled(report):
return frappe.db.get_value('Report',
report, 'disable_prepared_report') or 0
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import unittest
try:
from pxr import UsdMaya
except ImportError:
from pixar import UsdMaya
from maya import cmds
from maya import standalone
class testUsdMayaBlockSceneModificationContext(unittest.TestCase):
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def _AssertSceneIsModified(self, modified):
isSceneModified = cmds.file(query=True, modified=True)
self.assertEqual(isSceneModified, modified)
def setUp(self):
cmds.file(new=True, force=True)
self._AssertSceneIsModified(False)
def testPreserveSceneModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has already been
modified correctly maintains the modification status after the context
exits.
"""
# Create a cube to dirty the scene.
cmds.polyCube()
self._AssertSceneIsModified(True)
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should still be modified.
self._AssertSceneIsModified(True)
def testPreserveSceneNotModified(self):
"""
Tests that making scene modifications using a
UsdMayaBlockSceneModificationContext on a scene that has not been
modified correctly maintains the modification status after the context
exits.
"""
with UsdMaya.BlockSceneModificationContext():
# Create a cube inside the context manager.
cmds.polyCube()
# The scene should NOT be modified.
self._AssertSceneIsModified(False)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
# all x, y and golden outputs should be of the same length
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
# Same params
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# Same params, different scale
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# All params different
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
|
import os
import tempfile
from pathlib import Path
from PIL import Image
from google.cloud import storage
from retrying import retry
THUMBNAIL_SIZE = int(os.getenv('THUMBNAIL_SIZE', '128'))
THUMBNAIL_MAX_DIM = THUMBNAIL_SIZE, THUMBNAIL_SIZE
THUMBNAIL_SUFFIX = f'_thumb{THUMBNAIL_SIZE}'
SUPPORTED_FILE_EXTENSIONS = {'jpg', 'jpeg', 'png'}
def receive_event(event, context):
"""Triggered by a change to a Cloud Storage bucket.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
file_extension = Path(event['name']).suffix.lstrip('.')
if not is_event_supported_image(event, file_extension):
return
print(event, context)
bucket = storage.Client().get_bucket(event['bucket'])
with tempfile.NamedTemporaryFile() as temp_image_file, tempfile.NamedTemporaryFile() as temp_thumb_file:
get_image_file(event, temp_image_file, bucket)
image_format = generate_and_save_thumbnail(temp_image_file.name, temp_thumb_file.name)
upload_thumbnail_to_bucket(bucket, temp_thumb_file, get_thumbnail_name(event['name'], file_extension),
image_format)
def is_file_extension_supported(file_extension):
return file_extension.lower() in SUPPORTED_FILE_EXTENSIONS
def is_event_supported_image(event, file_extension):
return (event['contentType'].startswith('image') and
THUMBNAIL_SUFFIX not in event['name'] and
is_file_extension_supported(file_extension))
def get_thumbnail_name(image_name, file_extension):
return f'{Path(image_name).stem}{THUMBNAIL_SUFFIX}.{file_extension}'
def generate_and_save_thumbnail(image_file_name, thumbnail_file_name):
image = Image.open(image_file_name)
image_format = image.format
image.thumbnail(THUMBNAIL_MAX_DIM, Image.ANTIALIAS)
image.save(thumbnail_file_name, format=image_format)
return image_format
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def upload_thumbnail_to_bucket(bucket, temp_thumb_file, thumbnail_filename, image_format):
bucket.blob(thumbnail_filename).upload_from_filename(temp_thumb_file.name,
content_type=f'image/{image_format.lower()}')
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)
def get_image_file(event, destination_file, bucket):
blob = bucket.get_blob(event['name'])
blob.download_to_file(destination_file)
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import os
import pkgutil
import pipes
import platform
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.target_info.is_windows() and not self.target_info.is_mingw():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_coverage()
self.configure_substitutions()
self.configure_features()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.executor,
exec_env=self.exec_env)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx).startswith('clang-cl')
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = []
link_flags = _prefixed_env_list('LIB', '-L')
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_features(self):
if self.target_info.is_windows():
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Don't warn about using common but nonstandard unprefixed functions
# like chdir, fileno.
self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
# Build the tests in the same configuration as libcxx itself,
# to avoid mismatches if linked statically.
self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# On GCC, the libc++ headers cause errors due to throw() decorators
# on operator new clashing with those from the test suite, so we
# don't enable warnings in system headers on GCC.
if self.cxx.type != 'gcc':
self.cxx.compile_flags += ['-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER']
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.target_info.is_windows() and \
not self.target_info.is_zos():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
(path, version) = os.path.split(cxx_headers)
(path, cxx) = os.path.split(path)
triple = self.get_lit_conf('target_triple', None)
if triple is not None:
cxx_target_headers = os.path.join(path, triple, cxx, version)
if os.path.isdir(cxx_target_headers):
self.cxx.compile_flags += ['-I' + cxx_target_headers]
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib++']
else:
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows() and not self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if self.abi_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
else:
self.add_path(self.exec_env, self.abi_runtime_root)
def configure_link_flags_cxx_library(self):
if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
# This matches the set of libraries linked in the toplevel
# libcxx CMakeLists.txt if building targeting msvc.
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
# The compiler normally links in oldnames.lib too, but we've
# specified -nostdlib above, so we need to specify it manually.
self.cxx.link_flags += ['-loldnames']
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def quote(self, s):
if platform.system() == 'Windows':
return lit.TestRunner.quote_windows_command([s])
return pipes.quote(s)
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', self.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
|
#
# Copyright 2015 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_service import service
from aodh.evaluator import event
from aodh import messaging
from aodh import storage
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('event_alarm_topic',
default='alarm.all',
deprecated_group='DEFAULT',
help='The topic that aodh uses for event alarm evaluation.'),
cfg.IntOpt('batch_size',
default=1,
help='Number of notification messages to wait before '
'dispatching them.'),
cfg.IntOpt('batch_timeout',
default=None,
help='Number of seconds to wait before dispatching samples '
'when batch_size is not reached (None means indefinitely).'),
]
class EventAlarmEndpoint(object):
def __init__(self, evaluator):
self.evaluator = evaluator
def sample(self, notifications):
LOG.debug('Received %s messages in batch.', len(notifications))
for notification in notifications:
self.evaluator.evaluate_events(notification['payload'])
class EventAlarmEvaluationService(service.Service):
def __init__(self, conf):
super(EventAlarmEvaluationService, self).__init__()
self.conf = conf
def start(self):
super(EventAlarmEvaluationService, self).start()
self.storage_conn = storage.get_connection_from_config(self.conf)
self.evaluator = event.EventAlarmEvaluator(self.conf)
self.listener = messaging.get_batch_notification_listener(
messaging.get_transport(self.conf),
[oslo_messaging.Target(
topic=self.conf.listener.event_alarm_topic)],
[EventAlarmEndpoint(self.evaluator)], False,
self.conf.listener.batch_size,
self.conf.listener.batch_timeout)
self.listener.start()
# Add a dummy thread to have wait() working
self.tg.add_timer(604800, lambda: None)
def stop(self):
if getattr(self, 'listener', None):
self.listener.stop()
self.listener.wait()
super(EventAlarmEvaluationService, self).stop()
|
"""
test_analyze.py
Testing analyze.py
"""
import os
from collections import defaultdict
import numpy as np
import seekr2.modules.common_analyze as common_analyze
import seekr2.modules.mmvt_analyze as mmvt_analyze
import seekr2.analyze as analyze
import seekr2.modules.common_base as base
import seekr2.modules.mmvt_base as mmvt_base
import seekr2.tests.smoluchowski_system as smoluchowski
this_dir = os.path.dirname(os.path.realpath(__file__))
test_output_filename = os.path.join(this_dir, "test_analyze_outputfile.txt")
test_statistics_filename = os.path.join(this_dir, "test_analyze_statistics.txt")
def test_read_output_file():
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list(
[test_output_filename])
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
#N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha1 = \
# analyze.openmm_read_output_file_list([test_output_filename])
N_i_j_alpha_dict2 = {(1, 2): 52, (2, 1): 52}
R_i_alpha_dict2 = {1: 1658.696, 2: 198.912}
N_alpha_beta_dict2 = {1: 2423, 2: 98}
T_alpha2 = 1954.760
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], N_i_j_alpha_dict2[key])
for key in R_i_alpha_dict1:
assert key in R_i_alpha_dict2
assert np.isclose(R_i_alpha_dict1[key], R_i_alpha_dict2[key])
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], N_alpha_beta_dict2[key])
assert np.isclose(T_alpha1, T_alpha2)
N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \
R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \
T_alpha_list, T_alpha_average, T_alpha_std_dev, \
T_alpha_total, existing_lines \
= mmvt_analyze.openmm_read_output_file_list([test_output_filename,
test_output_filename],
skip_restart_check=True)
N_i_j_alpha_dict1 = N_i_j_alpha
R_i_alpha_dict1 = R_i_alpha_total
N_alpha_beta_dict1 = N_alpha_beta
T_alpha1 = T_alpha_total
#N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha = \
# analyze.openmm_read_output_file_list([test_output_filename,
# test_output_filename])
for key in N_i_j_alpha_dict1:
assert key in N_i_j_alpha_dict2
assert np.isclose(N_i_j_alpha_dict1[key], 2*N_i_j_alpha_dict2[key],
rtol=0.01)
for key in N_alpha_beta_dict1:
assert key in N_alpha_beta_dict2
assert np.isclose(N_alpha_beta_dict1[key], 2*N_alpha_beta_dict2[key],
rtol=0.01)
return
def test_minor2d():
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
B = np.array([[1,3],[7,9]])
C = np.array([[1,2],[4,5]])
D = np.array([[2,8],[3,9]])
assert common_analyze.minor2d(A, 1, 1).all() == B.all()
assert common_analyze.minor2d(A, 2, 2).all() == C.all()
assert common_analyze.minor2d(A, 1, 0).all() == D.all()
return
def test_minor1d():
A = np.array([1,2,3])
B = np.array([1,3])
C = np.array([2,3])
D = np.array([1,2])
assert common_analyze.minor1d(A, 1).all() == B.all()
assert common_analyze.minor1d(A, 0).all() == C.all()
assert common_analyze.minor1d(A, 2).all() == D.all()
return
def test_pretty_string_value_error():
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-3, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 0.2 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
5.6e-2, 2.0e-1, error_digits=1, use_unicode=False)
expectedstr = "5.6 +/- 20.0 * 10^-02"
assert(mystr == expectedstr)
mystr = common_analyze.pretty_string_value_error(
1.23456789e8, 4.5678e5, error_digits=2, use_unicode=False)
expectedstr = "1.2346 +/- 0.0046 * 10^+08"
assert(mystr == expectedstr)
def make_fake_output_file_osc(anchor, tmp_path, timestep=1.0):
num_steps = 50
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % 2) == 0:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def make_fake_output_file2(anchor, tmp_path, ups=1, downs=9, timestep=1.0):
num_steps = 50
total = ups + downs
mmvt_output_filename = os.path.join(
tmp_path, anchor.name, "prod",
"%s%d.%s" % (mmvt_base.OPENMMVT_BASENAME, 1,
mmvt_base.OPENMMVT_EXTENSION))
with open(mmvt_output_filename, "w") as f:
if anchor.index == 0:
for i in range(num_steps+1):
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
else:
for i in range(num_steps+1):
if (i % total) < ups:
line = "%d,%d,%f\n" % (2, i, i*timestep)
f.write(line)
else:
line = "%d,%d,%f\n" % (1, i, i*timestep)
f.write(line)
return
def test_solve_rate_matrix():
Q = np.array(
[[-0.5, 0.5, 0.0, 0.0],
[0.1, -0.3, 0.2, 0.0],
[0.0, 0.15, -0.3, 0.15],
[0.0, 0.0, 0.3, -0.4]])
K = np.zeros(Q.shape, dtype=np.longdouble)
for i in range(Q.shape[0]):
for j in range(Q.shape[0]):
if i == j:
K[i,j] = 0.0
else:
K[i,j] = -Q[i,j] / Q[i,i]
for i in range(K.shape[0]-1):
my_sum = sum(K[i,:])
for j in range(K.shape[0]):
K[i,j] = K[i,j] / my_sum
test_times_1 = common_analyze.solve_rate_matrix(Q)
one_vector = np.ones((Q.shape[0]))
test_times_2 = np.linalg.solve(Q, -one_vector)
error = np.linalg.norm(test_times_2 - test_times_1)
assert error < 1e-8
return
"""
def make_smol_calculation(tmp_path, func=None):
num_anchors = 10
D = 0.01
interval = 1.0
n = 101
intervals = []
for i in range(num_anchors):
intervals.append(interval)
if func is None:
func = smoluchowski.expW_constant
q_s = np.zeros(num_anchors)
mymodel = smoluchowski.make_smol_model(tmp_path, num_anchors, intervals)
my_analysis = analyze.Analysis(mymodel)
elberN_ij = defaultdict(float)
elberR_i = defaultdict(float)
smols = []
for i, anchor in enumerate(mymodel.anchors[:-1]):
a = interval*i
b = interval*(i+1)
smol = smoluchowski.Smoluchowski(a, b, func, n=n, D=D)
q_s[i] = smol.expWq
if i == 0:
smol.reflect_lower = True
k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, \
R_i_backwards, R_i_forwards, N_ij_backwards, N_ij_forwards \
= smol.compute_MMVT_kinetics_quantities()
N_i_j_alpha_dict = defaultdict(int)
R_i_alpha_dict = defaultdict(float)
N_alpha_beta_dict = defaultdict(int)
new_time_factor = (R_i_forwards + R_i_backwards) / T_alpha
new_T_alpha = new_time_factor * T_alpha
if i == 0:
N_alpha_beta_dict[1] = new_time_factor
R_i_alpha_dict[1] = new_T_alpha
else:
N_i_j_alpha_dict[(1, 2)] = N_ij_forwards
N_i_j_alpha_dict[(2, 1)] = N_ij_backwards
R_i_alpha_dict[1] = R_i_forwards
R_i_alpha_dict[2] = R_i_backwards
N_alpha_beta_dict[1] = N_backwards * new_time_factor
N_alpha_beta_dict[2] = N_forwards * new_time_factor
anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha=i)
anchor_stats.N_i_j_alpha = N_i_j_alpha_dict
anchor_stats.R_i_alpha_total = R_i_alpha_dict
anchor_stats.R_i_alpha_std_dev = R_i_alpha_dict
anchor_stats.R_i_alpha_list = {}
for key in anchor_stats.R_i_alpha_total:
anchor_stats.R_i_alpha_list[key] = []
anchor_stats.N_alpha_beta = N_alpha_beta_dict
anchor_stats.T_alpha_total = new_T_alpha
anchor_stats.T_alpha_std_dev = new_T_alpha
for key in N_alpha_beta_dict:
anchor_stats.k_alpha_beta[key] = N_alpha_beta_dict[key] \
/ new_T_alpha
# N_i_j_alpha_dict, R_i_alpha_dict, N_alpha_beta_dict, new_T_alpha,
# alpha=i)
# FIll out values here...
my_analysis.anchor_stats_list.append(anchor_stats)
smols.append(smol)
for i, anchor in enumerate(mymodel.anchors[:-1]):
smol1 = smols[i]
if i == 0:
smol2 = smols[i+1]
elberN_ij[(0,1)] = 1.0
# need to make sure that u and exp(-beta*W) match up
# on the edge.
smol1_edge_value = smol1.expWfunc(smol1.b, q=smol1.expWq)
elberR_i[0] = (smol2.u_q_forward + (1.0/smol1_edge_value)) / (smol2.J_forward)
elif i == mymodel.num_milestones-1:
elberN_ij[(mymodel.num_milestones-1,mymodel.num_milestones-2)] = 1.0
elberR_i[mymodel.num_milestones-1] = (smol1.u_q_backward) / (smol1.J_backward)
else:
smol2 = smols[i+1]
elberN_ij[(i,i+1)] = smol2.J_forward / (smol2.J_forward + smol1.J_backward)
elberN_ij[(i,i-1)] = smol1.J_backward / (smol2.J_forward + smol1.J_backward)
elberR_i[i] = (smol2.u_q_forward + smol1.u_q_backward) / (smol2.J_forward + smol1.J_backward)
my_analysis.mmvt_check_anchor_stats()
#my_analyze._calculate_equilibrium_probability()
#my_analyze._calculate_overall_statistics()
#my_analysis.extract_data()
my_analysis.fill_out_data_samples()
my_analysis.main_data_sample.pi_alpha = np.zeros(mymodel.num_anchors)
for i, anchor in enumerate(mymodel.anchors[:-1]):
my_analysis.main_data_sample.pi_alpha[i] = q_s[i] / np.sum(q_s)
my_analysis.fill_out_data_samples()
my_analysis.process_data_samples()
my_analysis.main_data_sample.Q = np.zeros((mymodel.num_milestones,
mymodel.num_milestones), dtype=np.longdouble)
elberQ = np.zeros((mymodel.num_milestones,
mymodel.num_milestones), dtype=np.longdouble)
for i in range(mymodel.num_milestones):
for j in range(mymodel.num_milestones):
if my_analysis.main_data_sample.R_i[i] == 0.0:
my_analysis.main_data_sample.Q[i,j] = 0.0
else:
my_analysis.main_data_sample.Q[i,j] \
= my_analysis.main_data_sample.N_ij[i,j] \
/ my_analysis.main_data_sample.R_i[i]
if elberR_i[i] > 0.0:
elberQ[i,j] = elberN_ij[i,j] / elberR_i[i]
for i in range(mymodel.num_milestones):
my_analysis.main_data_sample.Q[i][i] = \
-np.sum(my_analysis.main_data_sample.Q[i])
elberQ[i][i] = -np.sum(elberQ[i])
#my_analyze._rate_mat_to_prob_mat()
#print("my_analyze.Q:", my_analyze.Q)
#print("elberQ:", elberQ)
#print("my_analyze.K:", my_analyze.K)
#my_analyze.calculate_kinetics()
my_analysis.main_data_sample.calculate_kinetics()
mmvt_time = my_analysis.main_data_sample.MFPTs[(0,"bulk")]
#print("mmvt_time:", mmvt_time)
my_analysis.main_data_sample.Q = elberQ
my_analysis.main_data_sample.calculate_kinetics()
elber_time = my_analysis.main_data_sample.MFPTs[(0,"bulk")]
#print("elber_time:", elber_time)
a1 = 0.0
b1 = interval
a2 = interval
b2 = interval*num_anchors
smol1 = smoluchowski.Smoluchowski(a1, b1, func, n=n, D=D)
smol2 = smoluchowski.Smoluchowski(a2, b2, func, n=n, D=D)
q1 = smol1.expWq
q2 = smol2.expWq
k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, R_i_backwards, \
R_i_forwards, N_ij_backwards, N_ij_forwards \
= smol2.compute_MMVT_kinetics_quantities()
J2 = q2 / (R_i_forwards + R_i_backwards)
correct_time = R_i_forwards + q1/J2
#print("correct_time:", correct_time)
print("Time predicted by Elber:", elber_time, "Time predicted by MMVT:",
mmvt_time, "Exact time:", correct_time)
""
x_s = np.arange(0.0, num_anchors, interval)
func_vals1 = np.zeros(num_anchors)
func_vals2 = np.zeros(num_anchors)
print("q_s:", q_s)
for i, x in enumerate(x_s):
print("i:", i, "my_analyze.pi_alpha[i]:", my_analyze.pi_alpha[i], "q_s[i]:", q_s[i] / np.sum(q_s))
func_vals1[i] = my_analyze.pi_alpha[i]
func_vals2[i] = q_s[i] / np.sum(q_s)
plt.plot(x_s, func_vals1, "g", x_s, func_vals2, "r")
plt.show()
""
return mmvt_time, elber_time, correct_time
def test_smoluchowski_solution_flat_1(tmp_path):
print("Constant PMF:")
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
print("linear PMF:")
func = smoluchowski.expW_linear
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
print("quadratic PMF:")
func = smoluchowski.expW_quadratic
mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)
assert np.isclose(mmvt_time, true_time, rtol=0.001)
assert np.isclose(elber_time, true_time, rtol=0.001)
"""
|
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy
import oamap.dataset
import oamap.database
class NumpyFileBackend(oamap.database.FilesystemBackend):
def __init__(self, directory):
super(NumpyFileBackend, self).__init__(directory, arraysuffix=".npy")
def instantiate(self, partitionid):
return NumpyArrays(lambda name: self.fullname(partitionid, name, create=False),
lambda name: self.fullname(partitionid, name, create=True))
class NumpyArrays(object):
def __init__(self, loadname, storename):
self._loadname = loadname
self._storename = storename
def __getitem__(self, name):
return numpy.load(self._loadname(name))
def __setitem__(self, name, value):
numpy.save(self._storename(name), value)
class NumpyFileDatabase(oamap.database.FilesystemDatabase):
def __init__(self, directory, namespace=""):
super(NumpyFileDatabase, self).__init__(directory, backends={namespace: NumpyFileBackend(directory)}, namespace=namespace)
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_complex_type import MoBaseComplexType
from intersight.model.softwarerepository_appliance_upload import SoftwarerepositoryApplianceUpload
from intersight.model.softwarerepository_cifs_server import SoftwarerepositoryCifsServer
from intersight.model.softwarerepository_http_server import SoftwarerepositoryHttpServer
from intersight.model.softwarerepository_local_machine import SoftwarerepositoryLocalMachine
from intersight.model.softwarerepository_nfs_server import SoftwarerepositoryNfsServer
globals()['MoBaseComplexType'] = MoBaseComplexType
globals()['SoftwarerepositoryApplianceUpload'] = SoftwarerepositoryApplianceUpload
globals()['SoftwarerepositoryCifsServer'] = SoftwarerepositoryCifsServer
globals()['SoftwarerepositoryHttpServer'] = SoftwarerepositoryHttpServer
globals()['SoftwarerepositoryLocalMachine'] = SoftwarerepositoryLocalMachine
globals()['SoftwarerepositoryNfsServer'] = SoftwarerepositoryNfsServer
class SoftwarerepositoryFileServer(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
('object_type',): {
'ACCESS.ADDRESSTYPE': "access.AddressType",
'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig",
'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings",
'ADAPTER.ETHSETTINGS': "adapter.EthSettings",
'ADAPTER.FCSETTINGS': "adapter.FcSettings",
'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings",
'APPLIANCE.APISTATUS': "appliance.ApiStatus",
'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase",
'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair",
'APPLIANCE.STATUSCHECK': "appliance.StatusCheck",
'ASSET.ADDRESSINFORMATION': "asset.AddressInformation",
'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential",
'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential",
'ASSET.CLOUDCONNECTION': "asset.CloudConnection",
'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage",
'ASSET.CONTRACTINFORMATION': "asset.ContractInformation",
'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation",
'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo",
'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo",
'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation",
'ASSET.DEVICEINFORMATION': "asset.DeviceInformation",
'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics",
'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction",
'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate",
'ASSET.HTTPCONNECTION': "asset.HttpConnection",
'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection",
'ASSET.METERINGTYPE': "asset.MeteringType",
'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential",
'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential",
'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential",
'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential",
'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions",
'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService",
'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature",
'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential",
'ASSET.PRODUCTINFORMATION': "asset.ProductInformation",
'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential",
'ASSET.SSHCONNECTION': "asset.SshConnection",
'ASSET.SUDIINFO': "asset.SudiInfo",
'ASSET.TARGETKEY': "asset.TargetKey",
'ASSET.TARGETSIGNATURE': "asset.TargetSignature",
'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails",
'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions",
'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions",
'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential",
'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions",
'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService",
'ASSET.VMHOST': "asset.VmHost",
'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions",
'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions",
'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions",
'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions",
'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions",
'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions",
'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService",
'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions",
'BOOT.BOOTLOADER': "boot.Bootloader",
'BOOT.ISCSI': "boot.Iscsi",
'BOOT.LOCALCDD': "boot.LocalCdd",
'BOOT.LOCALDISK': "boot.LocalDisk",
'BOOT.NVME': "boot.Nvme",
'BOOT.PCHSTORAGE': "boot.PchStorage",
'BOOT.PXE': "boot.Pxe",
'BOOT.SAN': "boot.San",
'BOOT.SDCARD': "boot.SdCard",
'BOOT.UEFISHELL': "boot.UefiShell",
'BOOT.USB': "boot.Usb",
'BOOT.VIRTUALMEDIA': "boot.VirtualMedia",
'BULK.HTTPHEADER': "bulk.HttpHeader",
'BULK.RESTRESULT': "bulk.RestResult",
'BULK.RESTSUBREQUEST': "bulk.RestSubRequest",
'CAPABILITY.PORTRANGE': "capability.PortRange",
'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits",
'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits",
'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits",
'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability",
'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc",
'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone",
'CLOUD.BILLINGUNIT': "cloud.BillingUnit",
'CLOUD.CLOUDREGION': "cloud.CloudRegion",
'CLOUD.CLOUDTAG': "cloud.CloudTag",
'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes",
'CLOUD.IMAGEREFERENCE': "cloud.ImageReference",
'CLOUD.INSTANCETYPE': "cloud.InstanceType",
'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig",
'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress",
'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment",
'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment",
'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule",
'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables",
'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment",
'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment",
'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo",
'CLOUD.VOLUMETYPE': "cloud.VolumeType",
'CMRF.CMRF': "cmrf.CmRf",
'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock",
'COMM.IPV4INTERFACE': "comm.IpV4Interface",
'COMM.IPV6INTERFACE': "comm.IpV6Interface",
'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary",
'COMPUTE.IPADDRESS': "compute.IpAddress",
'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule",
'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation",
'COMPUTE.SERVERCONFIG': "compute.ServerConfig",
'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus",
'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation",
'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive",
'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation",
'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive",
'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation",
'COND.ALARMSUMMARY': "cond.AlarmSummary",
'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage",
'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage",
'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream",
'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt",
'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage",
'CONNECTOR.FILECHECKSUM': "connector.FileChecksum",
'CONNECTOR.FILEMESSAGE': "connector.FileMessage",
'CONNECTOR.HTTPREQUEST': "connector.HttpRequest",
'CONNECTOR.SSHCONFIG': "connector.SshConfig",
'CONNECTOR.SSHMESSAGE': "connector.SshMessage",
'CONNECTOR.STARTSTREAM': "connector.StartStream",
'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice",
'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge",
'CONNECTOR.STREAMINPUT': "connector.StreamInput",
'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive",
'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage",
'CONNECTOR.URL': "connector.Url",
'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest",
'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage",
'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate",
'CONTENT.COMPLEXTYPE': "content.ComplexType",
'CONTENT.PARAMETER': "content.Parameter",
'CONTENT.TEXTPARAMETER': "content.TextParameter",
'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary",
'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary",
'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary",
'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty",
'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity",
'FABRIC.LLDPSETTINGS': "fabric.LldpSettings",
'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings",
'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier",
'FABRIC.QOSCLASS': "fabric.QosClass",
'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings",
'FABRIC.UDLDSETTINGS': "fabric.UdldSettings",
'FABRIC.VLANSETTINGS': "fabric.VlanSettings",
'FCPOOL.BLOCK': "fcpool.Block",
'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData",
'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact",
'FIRMWARE.CIFSSERVER': "firmware.CifsServer",
'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact",
'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta",
'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload",
'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact",
'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory",
'FIRMWARE.HTTPSERVER': "firmware.HttpServer",
'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType",
'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare",
'FIRMWARE.NFSSERVER': "firmware.NfsServer",
'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact",
'FORECAST.MODEL': "forecast.Model",
'HCL.CONSTRAINT': "hcl.Constraint",
'HCL.FIRMWARE': "hcl.Firmware",
'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile",
'HCL.PRODUCT': "hcl.Product",
'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary",
'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint",
'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings",
'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo",
'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference",
'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack",
'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry",
'HYPERFLEX.FILEPATH': "hyperflex.FilePath",
'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo",
'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt",
'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt",
'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt",
'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt",
'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt",
'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt",
'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt",
'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt",
'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt",
'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt",
'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt",
'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange",
'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone",
'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange",
'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo",
'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint",
'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk",
'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan",
'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan",
'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap",
'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo",
'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule",
'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo",
'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore",
'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair",
'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule",
'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus",
'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus",
'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo",
'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry",
'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles",
'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief",
'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint",
'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus",
'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo",
'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo",
'HYPERFLEX.SUMMARY': "hyperflex.Summary",
'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk",
'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile",
'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine",
'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo",
'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage",
'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange",
'I18N.MESSAGE': "i18n.Message",
'I18N.MESSAGEPARAM': "i18n.MessageParam",
'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo",
'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo",
'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps",
'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions",
'IAM.CLIENTMETA': "iam.ClientMeta",
'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties",
'IAM.FEATUREDEFINITION': "iam.FeatureDefinition",
'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles",
'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties",
'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters",
'IAM.PERMISSIONREFERENCE': "iam.PermissionReference",
'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles",
'IAM.RULE': "iam.Rule",
'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection",
'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes",
'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage",
'INFRA.HARDWAREINFO': "infra.HardwareInfo",
'INFRA.METADATA': "infra.MetaData",
'INVENTORY.INVENTORYMO': "inventory.InventoryMo",
'INVENTORY.UEMINFO': "inventory.UemInfo",
'IPPOOL.IPV4BLOCK': "ippool.IpV4Block",
'IPPOOL.IPV4CONFIG': "ippool.IpV4Config",
'IPPOOL.IPV6BLOCK': "ippool.IpV6Block",
'IPPOOL.IPV6CONFIG': "ippool.IpV6Config",
'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock",
'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo",
'KUBERNETES.ADDON': "kubernetes.Addon",
'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration",
'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo",
'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig",
'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration",
'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig",
'KUBERNETES.CONFIGURATION': "kubernetes.Configuration",
'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus",
'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus",
'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon",
'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig",
'KUBERNETES.ETHERNET': "kubernetes.Ethernet",
'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher",
'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig",
'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus",
'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails",
'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config",
'KUBERNETES.KEYVALUE': "kubernetes.KeyValue",
'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer",
'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec",
'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress",
'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel",
'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint",
'KUBERNETES.NODEINFO': "kubernetes.NodeInfo",
'KUBERNETES.NODESPEC': "kubernetes.NodeSpec",
'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus",
'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta",
'KUBERNETES.OVSBOND': "kubernetes.OvsBond",
'KUBERNETES.PODSTATUS': "kubernetes.PodStatus",
'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig",
'KUBERNETES.ROUTE': "kubernetes.Route",
'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus",
'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus",
'KUBERNETES.TAINT': "kubernetes.Taint",
'MACPOOL.BLOCK': "macpool.Block",
'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal",
'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity",
'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace",
'META.ACCESSPRIVILEGE': "meta.AccessPrivilege",
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
'META.IDENTITYDEFINITION': "meta.IdentityDefinition",
'META.PROPDEFINITION': "meta.PropDefinition",
'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition",
'MO.MOREF': "mo.MoRef",
'MO.TAG': "mo.Tag",
'MO.VERSIONCONTEXT': "mo.VersionContext",
'NIAAPI.DETAIL': "niaapi.Detail",
'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail",
'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo",
'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex",
'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform",
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus",
'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo",
'NIATELEMETRY.INTERFACE': "niatelemetry.Interface",
'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement",
'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail",
'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink",
'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters",
'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni",
'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn",
'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp",
'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense",
'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus",
'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition",
'NOTIFICATION.SENDEMAIL': "notification.SendEmail",
'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer",
'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage",
'ONPREM.SCHEDULE': "onprem.Schedule",
'ONPREM.UPGRADENOTE': "onprem.UpgradeNote",
'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase",
'OPRS.KVPAIR': "oprs.Kvpair",
'OS.ANSWERS': "os.Answers",
'OS.GLOBALCONFIG': "os.GlobalConfig",
'OS.IPV4CONFIGURATION': "os.Ipv4Configuration",
'OS.IPV6CONFIGURATION': "os.Ipv6Configuration",
'OS.PHYSICALDISK': "os.PhysicalDisk",
'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse",
'OS.PLACEHOLDER': "os.PlaceHolder",
'OS.SERVERCONFIG': "os.ServerConfig",
'OS.VALIDATIONINFORMATION': "os.ValidationInformation",
'OS.VIRTUALDRIVE': "os.VirtualDrive",
'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse",
'OS.VMWAREPARAMETERS': "os.VmwareParameters",
'OS.WINDOWSPARAMETERS': "os.WindowsParameters",
'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName",
'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec",
'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec",
'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm",
'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName",
'POLICY.ACTIONPARAM': "policy.ActionParam",
'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier",
'POLICY.CONFIGCHANGE': "policy.ConfigChange",
'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext",
'POLICY.CONFIGCONTEXT': "policy.ConfigContext",
'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext",
'POLICY.QUALIFIER': "policy.Qualifier",
'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo",
'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule",
'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector",
'RESOURCE.SELECTOR': "resource.Selector",
'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources",
'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder",
'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters",
'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters",
'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics",
'SDCARD.DRIVERS': "sdcard.Drivers",
'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility",
'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem",
'SDCARD.PARTITION': "sdcard.Partition",
'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility",
'SDCARD.USERPARTITION': "sdcard.UserPartition",
'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType",
'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType",
'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger",
'SNMP.TRAP': "snmp.Trap",
'SNMP.USER': "snmp.User",
'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload",
'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer",
'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels",
'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer",
'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult",
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer",
'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup",
'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization",
'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity",
'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator",
'STORAGE.INITIATOR': "storage.Initiator",
'STORAGE.KEYSETTING': "storage.KeySetting",
'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting",
'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig",
'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup",
'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag",
'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan",
'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule",
'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability",
'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage",
'STORAGE.NETAPPPORT': "storage.NetAppPort",
'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency",
'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization",
'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization",
'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization",
'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization",
'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout",
'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization",
'STORAGE.R0DRIVE': "storage.R0Drive",
'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting",
'STORAGE.SPANDRIVES': "storage.SpanDrives",
'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus",
'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization",
'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration",
'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy",
'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization",
'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient",
'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient",
'TAM.ACTION': "tam.Action",
'TAM.APIDATASOURCE': "tam.ApiDataSource",
'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails",
'TAM.EOLSEVERITY': "tam.EolSeverity",
'TAM.IDENTIFIERS': "tam.Identifiers",
'TAM.MILESTONE': "tam.Milestone",
'TAM.PSIRTSEVERITY': "tam.PsirtSeverity",
'TAM.QUERYENTRY': "tam.QueryEntry",
'TAM.S3DATASOURCE': "tam.S3DataSource",
'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails",
'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource",
'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam",
'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam",
'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam",
'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage",
'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource",
'TERRAFORM.RUNSTATE': "terraform.Runstate",
'UCSD.CONNECTORPACK': "ucsd.ConnectorPack",
'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters",
'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage",
'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock",
'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo",
'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration",
'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration",
'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration",
'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration",
'VIRTUALIZATION.BONDSTATE': "virtualization.BondState",
'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig",
'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity",
'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation",
'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo",
'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus",
'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec",
'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration",
'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec",
'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration",
'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration",
'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration",
'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration",
'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo",
'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration",
'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo",
'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation",
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface",
'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort",
'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo",
'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity",
'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig",
'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig",
'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk",
'VIRTUALIZATION.VMDISK': "virtualization.VmDisk",
'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk",
'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface",
'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo",
'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption",
'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo",
'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover",
'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange",
'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo",
'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo",
'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo",
'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo",
'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo",
'VMEDIA.MAPPING': "vmedia.Mapping",
'VNIC.ARFSSETTINGS': "vnic.ArfsSettings",
'VNIC.CDN': "vnic.Cdn",
'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings",
'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings",
'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings",
'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings",
'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings",
'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings",
'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings",
'VNIC.FLOGISETTINGS': "vnic.FlogiSettings",
'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile",
'VNIC.LUN': "vnic.Lun",
'VNIC.NVGRESETTINGS': "vnic.NvgreSettings",
'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings",
'VNIC.PLOGISETTINGS': "vnic.PlogiSettings",
'VNIC.ROCESETTINGS': "vnic.RoceSettings",
'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings",
'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings",
'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings",
'VNIC.USNICSETTINGS': "vnic.UsnicSettings",
'VNIC.VIFSTATUS': "vnic.VifStatus",
'VNIC.VLANSETTINGS': "vnic.VlanSettings",
'VNIC.VMQSETTINGS': "vnic.VmqSettings",
'VNIC.VSANSETTINGS': "vnic.VsanSettings",
'VNIC.VXLANSETTINGS': "vnic.VxlanSettings",
'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition",
'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType",
'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles",
'WORKFLOW.CLICOMMAND': "workflow.CliCommand",
'WORKFLOW.COMMENTS': "workflow.Comments",
'WORKFLOW.CONSTRAINTS': "workflow.Constraints",
'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem",
'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty",
'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType",
'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties",
'WORKFLOW.DECISIONCASE': "workflow.DecisionCase",
'WORKFLOW.DECISIONTASK': "workflow.DecisionTask",
'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue",
'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta",
'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList",
'WORKFLOW.ENUMENTRY': "workflow.EnumEntry",
'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt",
'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask",
'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp",
'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations",
'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp",
'WORKFLOW.FILETRANSFER': "workflow.FileTransfer",
'WORKFLOW.FORKTASK': "workflow.ForkTask",
'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext",
'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties",
'WORKFLOW.JOINTASK': "workflow.JoinTask",
'WORKFLOW.LOOPTASK': "workflow.LoopTask",
'WORKFLOW.MESSAGE': "workflow.Message",
'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem",
'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType",
'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty",
'WORKFLOW.PARAMETERSET': "workflow.ParameterSet",
'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem",
'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty",
'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType",
'WORKFLOW.PROPERTIES': "workflow.Properties",
'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler",
'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask",
'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask",
'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty",
'WORKFLOW.SSHCMD': "workflow.SshCmd",
'WORKFLOW.SSHCONFIG': "workflow.SshConfig",
'WORKFLOW.SSHSESSION': "workflow.SshSession",
'WORKFLOW.STARTTASK': "workflow.StartTask",
'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask",
'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask",
'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext",
'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType",
'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty",
'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints",
'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo",
'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter",
'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError",
'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation",
'WORKFLOW.WAITTASK': "workflow.WaitTask",
'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt",
'WORKFLOW.WEBAPI': "workflow.WebApi",
'WORKFLOW.WORKERTASK': "workflow.WorkerTask",
'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx",
'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties",
'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties",
'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties",
'WORKFLOW.XMLAPI': "workflow.XmlApi",
'X509.CERTIFICATE': "x509.Certificate",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'softwarerepository.ApplianceUpload': SoftwarerepositoryApplianceUpload,
'softwarerepository.CifsServer': SoftwarerepositoryCifsServer,
'softwarerepository.HttpServer': SoftwarerepositoryHttpServer,
'softwarerepository.LocalMachine': SoftwarerepositoryLocalMachine,
'softwarerepository.NfsServer': SoftwarerepositoryNfsServer,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""SoftwarerepositoryFileServer - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseComplexType,
],
'oneOf': [
],
}
|
"""
Test that the build parameters for external modules with dependencies are computed correctly.
"""
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts")
load("//:providers.bzl", "HaxeLibraryInfo")
load("//:utils.bzl", "determine_source_root")
def _haxe_executable_dependency_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = analysistest.target_under_test(env)
hxml = target_under_test[HaxeLibraryInfo].hxml
asserts.equals(env, 2, len(hxml["source_files"]))
asserts.equals(env, "module-bin", hxml["output_dir"])
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/test-module-dist/module-bin", target_under_test[HaxeLibraryInfo].lib.path)
asserts.equals(env, "external/test-module-a/", determine_source_root(hxml["source_files"][0]))
# The directory portion 'external/dist-test/' in this test comes from the fact that the test is being loaded via a
# dependent module below, in the target_under_test parameter. When run in the test directory itself, the value is
# correct, without the 'external/dist-test/'.
asserts.equals(env, "bazel-out/x64_windows-fastbuild/bin/external/dist-test/module-bin/dist-test", target_under_test[HaxeLibraryInfo].hxml["build_file"])
return analysistest.end(env)
haxe_executable_dependency_test = analysistest.make(_haxe_executable_dependency_test_impl)
def test_haxe_executable_dependency():
haxe_executable_dependency_test(
name = "haxe_executable_dependency_test",
target_under_test = "@test-module-dist//:module-bin",
size = "small",
)
|
from typing import List
from dirty_models import ArrayField, BooleanField, ModelField, StringField, StringIdField
from . import BaseCollectionManager, BaseModelManager
from ..models import BaseModel, DateTimeField
from ..results import Result
class Participant(BaseModel):
is_admin = BooleanField(default=False)
"""
Whether the participant is a group administrator or not.
"""
is_super_admin = BooleanField()
"""
Whether the participant is a group super administrator or not. ¿?
"""
class GroupMetadata(BaseModel):
"""
Group metadata model.
"""
announce = StringIdField()
"""
¿?
"""
creation = DateTimeField()
"""
Group creation timestamp.
"""
desc = StringField()
"""
Group description.
"""
desc_owner = StringIdField()
"""
Who change last group description.
"""
desc_time = DateTimeField()
"""
When last group description was changed.
"""
owner = StringIdField()
"""
Who made group.
"""
participants = ArrayField(field_type=ModelField(model_class=Participant))
"""
List of participants.
"""
restrict = StringIdField()
"""
¿?
"""
group_invite_link = StringIdField()
"""
Group link to invite people.
"""
invite_code = StringIdField()
"""
Group code to invite people.
"""
class ParticipantManager(BaseModelManager[Participant]):
"""
Participant manager.
"""
MODEL_CLASS = Participant
class ParticipantCollectionManager(BaseCollectionManager[ParticipantManager]):
"""
Participant collection manager. It allows manage group participants.
"""
MODEL_MANAGER_CLASS = ParticipantManager
def add_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('addParticipants', {'contactIds': contact_ids})
def can_add(self, contact_id: str) -> Result[bool]:
return self._execute_command('canAdd', {'contactId': contact_id})
def remove_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('removeParticipants', {'contactIds': contact_ids})
def can_remove(self, contact_id: str) -> Result[bool]:
return self._execute_command('canRemove', {'contactId': contact_id})
def promote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('promoteParticipants', {'contactIds': contact_ids})
def can_promote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canPromote', {'contactId': contact_id})
def demote_participants(self, contact_ids: List[str]) -> Result[None]:
return self._execute_command('demoteParticipants', {'contactIds': contact_ids})
def can_demote(self, contact_id: str) -> Result[bool]:
return self._execute_command('canDemote', {'contactId': contact_id})
class GroupMetadataManager(BaseModelManager[GroupMetadata]):
"""
Group metadata manager. It allows manage groups, further than a chat.
.. attribute:: participants
:class:`~whalesong.managers.group_metadata.ParticipantCollectionManager`
Group's participants collection manager.
"""
MODEL_CLASS = GroupMetadata
def __init__(self, driver, manager_path=''):
super(GroupMetadataManager, self).__init__(driver=driver, manager_path=manager_path)
self.add_submanager('participants', ParticipantCollectionManager(
driver=self._driver,
manager_path=self._build_command('participants')
))
def group_invite_code(self) -> Result[None]:
return self._execute_command('groupInviteCode')
def revoke_group_invite(self) -> Result[None]:
return self._execute_command('revokeGroupInvite')
class GroupMetadataCollectionManager(BaseCollectionManager[GroupMetadataManager]):
MODEL_MANAGER_CLASS = GroupMetadataManager
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class QueueProperty(Base):
"""The QueueProperty class encapsulates a required queueProperty node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the QueueProperty property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'queueProperty'
def __init__(self, parent):
super(QueueProperty, self).__init__(parent)
@property
def MinimumDataRateGuaranteed(self):
"""If true, indicates that a minimum data rate is guaranteed.
Returns:
bool
"""
return self._get_attribute('minimumDataRateGuaranteed')
@MinimumDataRateGuaranteed.setter
def MinimumDataRateGuaranteed(self, value):
self._set_attribute('minimumDataRateGuaranteed', value)
@property
def None(self):
"""If true, indicates that no property is defined for the queue.
Returns:
bool
"""
return self._get_attribute('none')
@None.setter
def None(self, value):
self._set_attribute('none', value)
|
import os
from . import pre, csr
import imageio
from tqdm import tqdm
import numpy as np
from skimage import morphology
import pandas as pd
from .image_stats import image_summary
from skimage.feature import shape_index
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing as mp
CPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count()))
def _get_scale(image, md_path_or_scale):
"""Get a valid scale from an image and a metadata path or scale.
Parameters
----------
image : np.ndarray
The input image.
md_path_or_scale : float or image filename
The path to the file containing the metadata, or the scale.
Returns
-------
scale : float
"""
scale = None
try:
scale = float(md_path_or_scale)
except ValueError:
pass
if md_path_or_scale is not None and scale is None:
md_path = md_path_or_scale.split(sep='/')
meta = image.meta
for key in md_path:
meta = meta[key]
scale = float(meta)
else:
if scale is None:
scale = 1 # measurements will be in pixel units
return scale
def process_single_image(filename, image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius, smooth_method):
image = imageio.imread(filename, format=image_format)
scale = _get_scale(image, scale_metadata_path)
if crop_radius > 0:
c = crop_radius
image = image[c:-c, c:-c]
pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
thresholded = pre.threshold(image, sigma=pixel_smoothing_radius,
radius=pixel_threshold_radius,
offset=brightness_offset,
smooth_method=smooth_method)
quality = shape_index(image, sigma=pixel_smoothing_radius,
mode='reflect')
skeleton = morphology.skeletonize(thresholded) * quality
framedata = csr.summarise(skeleton, spacing=scale)
framedata['squiggle'] = np.log2(framedata['branch-distance'] /
framedata['euclidean-distance'])
framedata['scale'] = scale
framedata.rename(columns={'mean pixel value': 'mean shape index'},
inplace=True)
framedata['filename'] = filename
return image, thresholded, skeleton, framedata
def process_images(filenames, image_format, threshold_radius,
smooth_radius, brightness_offset, scale_metadata_path,
crop_radius=0, smooth_method='Gaussian',
num_threads=CPU_COUNT):
"""Full pipeline from images to skeleton stats with local median threshold.
Parameters
----------
filenames : list of string
The list of input filenames.
image_format : string
The format of the files. 'auto' is automatically determined by the
imageio library. See imageio documentation for valid image formats.
threshold_radius : float
The radius for median thresholding,
smooth_radius : float in [0, 1]
The value of sigma with which to Gaussian-smooth the image,
**relative to `threshold_radius`**.
brightness_offset : float
The standard brightness value with which to threshold is the local
median, `m(x, y)`. Use this value to offset from there: the threshold
used will be `m(x, y) + brightness_offset`.
scale_metadata_path : string
The path in the image dictionary to find the metadata on pixel scale,
separated by forward slashes ('/').
crop_radius : int, optional
Crop `crop_radius` pixels from each margin of the image before
processing.
smooth_method : {'Gaussian', 'TV', 'NL'}, optional
Which method to use for smoothing.
num_threads : int, optional
How many threads to use for computation. This should generally be
set to the number of CPU cores available to you.
Returns
-------
results : generator
The pipeline yields individual image results in the form of a tuple
of ``(filename, image, thresholded_image, skeleton, data_frame)``.
Finally, after all the images have been processed, the pipeline yields
a DataFrame containing all the collated branch-level results.
"""
image_format = None if image_format == 'auto' else image_format
results = []
image_results = []
with ThreadPoolExecutor(max_workers=num_threads) as ex:
future_data = {ex.submit(process_single_image, filename,
image_format, scale_metadata_path,
threshold_radius, smooth_radius,
brightness_offset, crop_radius,
smooth_method): filename
for filename in filenames}
for completed_data in tqdm(as_completed(future_data)):
image, thresholded, skeleton, framedata = completed_data.result()
filename = future_data[completed_data]
results.append(framedata)
image_stats = image_summary(skeleton,
spacing=framedata['scale'][0])
image_stats['filename'] = filename
image_stats['branch density'] = (framedata.shape[0] /
image_stats['area'])
j2j = framedata[framedata['branch-type'] == 2]
image_stats['mean J2J branch distance'] = (
j2j['branch-distance'].mean())
image_results.append(image_stats)
yield filename, image, thresholded, skeleton, framedata
yield pd.concat(results), pd.concat(image_results)
|
"""
consolidate_sources.py
Consolida visão dos vários documentos de descrição de relatorios em um unico grupo de arquivos
Mais fácil de ler por uma ferramenta de BI.
Esse script resolve um monte de problemas
Por Alex Cordeiro
"""
import pandas as pd, os
print('Entre com o diretório origem:')
source_path = input()
print('Entre com o diretório destino (enter -> assume igual o origem:')
target_path = input()
If target_path == "":
target_path = source_path
dir = os.listdir( source_path )
print(source_path)
df_reports = pd.DataFrame()
df_rep_kpi = pd.DataFrame()
df_datasources = pd.DataFrame()
df_rep_fields = pd.DataFrame()
for file in dir:
full_path = os.path.join(source_path, file)
if os.path.isfile(full_path) and file.endswith('.xlsx') and not file.startswith('consolidate'):
print ("Processando arquivo '{}'".format(file))
df_reports_temp = pd.read_excel (full_path, sheet_name='Relatórios')
df_reports_temp = df_reports_temp.rename({"Relatorio":"report",
"Ferramenta":"current_bi_tool",
"area resp":"resp_department",
"Resp Relatório": "resp_person",
"Criticidade": "criticality",
"Envio por e-mail?":"email_sent",
"Hora atualização - Ideal":"update_time_ideal",
"Hora at. Desejada": "update_time_desired",
"Hora envio - ANS": "update_time_limit",
"Publico": "consumers",
"Quantidade de usuários":"consume_user_quantity",
"Frequencia de uso": "consume_frequency"})
df_datasources_temp = pd.read_excel (full_path, sheet_name='Fontes de dados')
df_datasources_temp = df_datasources_temp.rename({"Fonte de dados":"datasource",
"Sistema Origem":"sourcesystem",
"Descrição fonte":"datasource_desc",
"Tipo":"type",
"Servidor/site":"server_url",
"Tabela/caminho":"table_path",
"Tabela/caminho/ SQL CRC":"table_path_crc",
"Como é carregado?":"how_is_loaded",
"Periodicidade":"frequency",
"Visibilidade":"visibility"})
df_rep_fields_temp = pd.read_excel (full_path, sheet_name='Fontes de dados x Campos')
df_rep_fields_temp = df_rep_fields_temp.rename({"Fontes":"datasource",
"Campo":"filed",
"Descrição":"description",
"Chave?":"primary_key",
"Tipo":"type",
"Tamanho":"size",
"Decimal":"decimal",
"Sensível?":"is_sensible"})
# converte linhas para colunas
df_rep_kpi_temp = pd.read_excel (full_path, sheet_name='Relatórios x Indicadores')
df_rep_kpi_temp = df_rep_kpi_temp.rename(columns={
r'Relatorio': 'report_name',
r'Nome Indicador/ Metrica': 'kpi_name',
r'Descrição': 'kpi_desc',
r'Responsavel por Preencher': 'kpi_resp_filling',
r'Area responsável': 'kpi_resp_area',
r'Responsável negócio': 'kpi_resp_emploee'})
df_rep_kpi_temp = df_rep_kpi_temp.melt(id_vars=['report_name',
'kpi_name',
'kpi_desc',
'kpi_resp_filling',
'kpi_resp_area',
'kpi_resp_emploee'],
var_name="Datasource",
value_name="Value")
#df.drop(df[df.score < 50].index, inplace=True)
#df = df.drop(df[df.score < 50].index)
#print(train[train["Electrical"].isnull()][null_columns])
df_rep_kpi_temp = df_rep_kpi_temp.drop(df_rep_kpi_temp[df_rep_kpi_temp.Value.isnull() ].index)
df_rep_kpi_temp = df_rep_kpi_temp.drop(df_rep_kpi_temp[df_rep_kpi_temp.Value.str.contains('N', 'n') ].index)
df_reports = pd.concat([df_reports, df_reports_temp])
df_rep_kpi = pd.concat([df_rep_kpi , df_rep_kpi_temp])
df_datasources = pd.concat([df_datasources, df_datasources_temp])
df_rep_fields = pd.concat([df_rep_fields, df_rep_fields_temp])
#print(list(df_rep_kpi_temp.columns))
#print(df_rep_kpi_temp)
df_reports["consume_user_quantity"] = df_reports["consume_user_quantity"].str.extract(r'^(\d{6})', expand=False)
df_reports.to_excel(target_path + 'consolidate_reports.xlsx', index = False)
df_rep_kpi.to_excel(target_path + 'consolidate_rep_kpi.xlsx', index = False)
df_datasources.to_excel(target_path + 'consolidate_datasource.xlsx', index = False)
df_rep_fields.to_excel(target_path + 'consolidate_rep_fields.xlsx', index = False)
print("Finalizado com sucesso!")
#df_rep_kpi_temp.to_csv(output_file_name +'.csv', encoding='utf-8', sep = ';', quotechar = '"')
#compression_opts = dict(method='zip',
# archive_name='out.csv')
#df.to_csv('out.zip', index=False,
# compression=compression_opts)
#df_rep_kpi_temp.head()
#df_rep_kpi_temp.n
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Sharing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('public', models.BooleanField(default=True)),
('sharing_id', models.PositiveIntegerField()),
('sharing_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
),
]
|
#
#-----------------------------------------------------------------------------
# Copyright 2007-2011 Mentor Graphics Corporation
# Copyright 2007-2011 Cadence Design Systems, Inc.
# Copyright 2010 Synopsys, Inc.
# Copyright 2013 NVIDIA Corporation
# Copyright 2019-2020 Tuomas Poikela (tpoikela)
# All Rights Reserved Worldwide
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing
# permissions and limitations under the License.
#-----------------------------------------------------------------------------
from typing import Dict, Any
from .sv import sv, sv_obj
from .uvm_misc import UVMStatusContainer
from .uvm_object_globals import (UVM_PRINT, UVM_NONE, UVM_COPY, UVM_COMPARE,
UVM_RECORD, UVM_SETINT, UVM_SETOBJ, UVM_SETSTR, UVM_PACK, UVM_UNPACK)
from .uvm_globals import uvm_report_error, uvm_report_warning, uvm_report_info
from typing import Tuple
class UVMObject(sv_obj):
"""
The `UVMObject` class is the base class for all UVM data and hierarchical classes.
Its primary role is to define a set of methods for such common operations as `create`,
`copy`, `compare`, `print`, and `record`.
Classes deriving from `UVMObject` must implement methods such as
`create` and `get_type_name`.
:ivar str name: Name of the object
:ivar int inst_id: Unique instance ID for this object
Group: Seeding
:cvar bool use_uvm_seeding: This bit enables or disables the UVM seeding
mechanism. It globally affects the operation of the `reseed` method.
When enabled, UVM-based objects are seeded based on their type and full
hierarchical name rather than allocation order. This improves random
stability for objects whose instance names are unique across each type.
The `UVMComponent` class is an example of a type that has a unique
instance name.
"""
# Should be set by uvm_*_utils macro
type_id = None # type: Any
depth = 0
m_inst_count = 0
m_inst_count = 0
use_uvm_seeding = True
uvm_global_copy_map = {} # type: Dict['UVMObject', 'UVMObject']
_m_uvm_status_container = UVMStatusContainer()
def __init__(self, name: str):
""" Creates a new uvm_object with the given instance `name`. If `name` is not
supplied, the object is unnamed.
"""
sv_obj.__init__(self)
self.name = name
self.inst_id = UVMObject.m_inst_count
UVMObject.m_inst_count += 1
self.leaf_name = name
def reseed(self) -> None:
"""
Calls `srandom` on the object to reseed the object using the UVM seeding
mechanism, which sets the seed based on type name and instance name instead
of based on instance position in a thread.
If the `use_uvm_seeding` static variable is set to 0, then reseed() does
not perform any function.
"""
if (UVMObject.use_uvm_seeding):
pass
def set_name(self, name: str):
"""
Group: Identification
Sets the instance name of this object, overwriting any previously
given name.
Args:
name:
"""
self.leaf_name = name
def get_name(self) -> str:
"""
Returns the name of the object, as provided by the `name` argument in the
`new` constructor or `set_name` method.
Returns:
str: Name of the object.
"""
return self.leaf_name
def get_full_name(self) -> str:
"""
Objects possessing hierarchy, such as <uvm_components>, override the default
implementation. Other objects might be associated with component hierarchy
but are not themselves components. For example, <uvm_sequence #(REQ,RSP)>
classes are typically associated with a <uvm_sequencer #(REQ,RSP)>. In this
case, it is useful to override get_full_name to return the sequencer's
full name concatenated with the sequence's name. This provides the sequence
a full context, which is useful when debugging.
Returns:
str: The full hierarchical name of this object. The default
implementation is the same as <get_name>, as uvm_objects do not inherently
possess hierarchy.
"""
return self.get_name()
def get_inst_id(self) -> int:
"""
Returns:
int: The object's unique, numeric instance identifier.
"""
return self.inst_id
@classmethod
def get_inst_count(self) -> int:
"""
Returns:
int: The current value of the instance counter, which represents the
total number of uvm_object-based objects that have been allocated in
simulation. The instance counter is used to form a unique numeric instance
identifier.
"""
return UVMObject.m_inst_count
def get_type(self) -> None:
"""
Returns the type-proxy (wrapper) for this object. The `UVMFactory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments.
The default implementation of this method produces an error and returns
`None`. To enable use of this method, a user's subtype must implement a
version that returns the subtype's wrapper.
For example:
.. code-block:: python
class cmd(UVMObject):
type_id = None
@classmethod
def get_type(cls):
return cls.type_id.get()
Then, to use:
.. code-block:: python
factory.set_type_override(cmd.get_type(), subcmd.get_type())
This function is implemented by the uvm_*_utils functions, if employed.
Returns:
"""
uvm_report_error("NOTYPID", "get_type not implemented in derived class: "
+ str(self), UVM_NONE)
return None
def get_object_type(self) -> Any:
"""
Function: get_object_type
Returns the type-proxy (wrapper) for this object. The `uvm_factory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments. This method is the same as the static
`get_type` method, but uses an already allocated object to determine
the type-proxy to access (instead of using the static object).
The default implementation of this method does a factory lookup of the
proxy using the return value from `get_type_name`. If the type returned
by `get_type_name` is not registered with the factory, then a `None`
handle is returned.
For example:
.. code-block:: python
class cmd (UVMObject):
type_id = UVMObjectRegistry()
@classmethod
def type_id get_type(cls):
return type_id.get()
def get_object_type(self):
return cmd.type_id.get()
This function is implemented by the `uvm_*_utils macros, if employed.
Returns:
"""
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
factory = cs.get_factory()
if self.get_type_name() == "<unknown>":
return None
return factory.find_wrapper_by_name(self.get_type_name())
def get_type_name(self) -> str:
"""
This function returns the type name of the object, which is typically the
type identifier enclosed in quotes. It is used for various debugging
functions in the library, and it is used by the factory for creating
objects.
This function must be defined in every derived class.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
type_name = "mytype"
def get_type_name(self):
return my_type.type_name
We define the `type_name` static variable to enable access to the type name
without need of an object of the class, i.e., to enable access via the
scope operator, ~mytype::type_name~.
Returns:
str: Type name of the object.
"""
return "<unknown>"
def create(self, name="") -> 'UVMObject':
"""
Group: Creation
The `create` method allocates a new object of the same type as this object
and returns it via a base uvm_object handle. Every class deriving from
uvm_object, directly or indirectly, must implement the create method.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
def create(self, name=""):
mytype t = mytype(name)
return t
Args:
name (str): Name of the created object.
Returns:
obj: New object.
"""
return UVMObject(name)
def clone(self) -> 'UVMObject':
"""
The `clone` method creates and returns an exact copy of this object.
The default implementation calls `create` followed by `copy`. As clone is
virtual, derived classes may override this implementation if desired.
Returns:
UVMObject: Clone of the object.
"""
tmp = self.create(self.get_name())
if tmp is None:
uvm_report_warning("CRFLD", sv.sformatf(
"The create method failed for %s, object cannot be cloned",
self.get_name()), UVM_NONE)
else:
tmp.copy(self)
return tmp
def print_obj(self, printer=None) -> None:
"""
Group: Printing
Function: print
The `print` method deep-prints this object's properties in a format and
manner governed by the given `printer` argument; if the `printer` argument
is not provided, the global `uvm_default_printer` is used. See
`uvm_printer` for more information on printer output formatting. See also
`uvm_line_printer`, `uvm_tree_printer`, and `uvm_table_printer` for details
on the pre-defined printer "policies," or formatters, provided by the UVM.
The `print` method is not virtual and must not be overloaded. To include
custom information in the `print` and `sprint` operations, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if printer is None:
uvm_report_error("NonePRINTER", "uvm_default_printer is None")
sv.fwrite(printer.knobs.mcd, self.sprint(printer))
def sprint(self, printer=None) -> str:
"""
The `sprint` method works just like the `print` method, except the output
is returned in a string rather than displayed.
The `sprint` method is not virtual and must not be overloaded. To include
additional fields in the `print` and `sprint` operation, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output. The printer policy will manage all string
concatenations and provide the string to `sprint` to return to the caller.
Args:
printer (UVMPrinter): Printer that is used in printing.
Returns:
str: String representation of the object.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if not printer.istop():
UVMObject._m_uvm_status_container.printer = printer
self._m_uvm_field_automation(None, UVM_PRINT, "")
self.do_print(printer)
return ""
self._m_uvm_status_container = UVMObject._m_uvm_status_container
printer.print_object(self.get_name(), self)
if printer.m_string != "":
return printer.m_string
return printer.emit()
def do_print(self, printer) -> None:
"""
The `do_print` method is the user-definable hook called by `print` and
`sprint` that allows users to customize what gets printed or sprinted
beyond the field information provided by the `uvm_field_* macros,
<Utility and Field Macros for Components and Objects>.
The `printer` argument is the policy object that governs the format and
content of the output. To ensure correct `print` and `sprint` operation,
and to ensure a consistent output format, the `printer` must be used
by all `do_print` implementations. That is, instead of using ~$display~ or
string concatenations directly, a `do_print` implementation must call
through the ~printer's~ API to add information to be printed or sprinted.
An example implementation of `do_print` is as follows::
class mytype (UVMObject):
data_obj data
int f1
virtual function void do_print (uvm_printer printer)
super.do_print(printer)
printer.print_field_int("f1", f1, $bits(f1), UVM_DEC)
printer.print_object("data", data)
endfunction
Then, to print and sprint the object, you could write::
t = mytype()
t.print()
uvm_report_info("Received",t.sprint())
See `UVMPrinter` for information about the printer API.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
return
def convert2string(self) -> str:
"""
This virtual function is a user-definable hook, called directly by the
user, that allows users to provide object information in the form of
a string. Unlike `sprint`, there is no requirement to use a `uvm_printer`
policy object. As such, the format and content of the output is fully
customizable, which may be suitable for applications not requiring the
consistent formatting offered by the `print`/`sprint`/`do_print`
API.
Fields declared in <Utility Macros> macros (`uvm_field_*), if used, will
not automatically appear in calls to convert2string.
An example implementation of convert2string follows.
.. code-block:: python
class Base(UVMObject):
field = "foo"
def convert2string(self):
return "base_field=" + self.field
class Obj2(UVMObject):
field = "bar"
def convert2string()
convert2string = "child_field=" + self.field
class Obj(Base):
addr = 0x123
data = 0x456
write = 1
child = Obj2()
def convert2string(self):
convert2string = super().convert2string() +
sv.sformatf(" write=%0d addr=%8h data=%8h ",write,addr,data) +
child.convert2string()
Then, to display an object, you could write:
.. code-block:: python
o = Obj()
uvm_report_info("BusMaster", "Sending:\n " + o.convert2string())
The output will look similar to::
UVM_INFO @ 0: reporter [BusMaster] Sending:
base_field=foo write=1 addr=00000123 data=00000456 child_field=bar
Returns:
str: Object converted into string.
"""
return ""
def _m_uvm_field_automation(self, tmp_data__, what__, str__) -> None:
pass
def record(self, recorder=None) -> None:
"""
Group: Recording
The `record` method deep-records this object's properties according to an
optional `recorder` policy. The method is not virtual and must not be
overloaded. To include additional fields in the record operation, derived
classes should override the `do_record` method.
The optional `recorder` argument specifies the recording policy, which
governs how recording takes place. See
`uvm_recorder` for information.
A simulator's recording mechanism is vendor-specific. By providing access
via a common interface, the uvm_recorder policy provides vendor-independent
access to a simulator's recording capabilities.
Args:
recorder (UVMRecorder):
"""
if recorder is None:
return
UVMObject._m_uvm_status_container.recorder = recorder
recorder.recording_depth += 1
self._m_uvm_field_automation(None, UVM_RECORD, "")
self.do_record(recorder)
recorder.recording_depth -= 1
def do_record(self, recorder) -> None:
"""
The `do_record` method is the user-definable hook called by the `record`
method. A derived class should override this method to include its fields
in a record operation.
The `recorder` argument is policy object for recording this object. A
do_record implementation should call the appropriate recorder methods for
each of its fields. Vendor-specific recording implementations are
encapsulated in the `recorder` policy, thereby insulating user-code from
vendor-specific behavior. See `uvm_recorder` for more information.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
data_obj data
int f1
def do_record (self, recorder):
recorder.record_field("f1", f1, sv.bits(f1), UVM_DEC)
recorder.record_object("data", data)
Args:
recorder (UVMRecorder): Recorder policy object.
"""
return
def copy(self, rhs: 'UVMObject'):
"""
The copy makes this object a copy of the specified object.
The `copy` method is not virtual and should not be overloaded in derived
classes. To copy the fields of a derived class, that class should override
the `do_copy` method.
Args:
rhs (UVMObject): An object to be copied.
"""
# For cycle checking
UVMObject.depth = 0
if (rhs is not None) and rhs in UVMObject.uvm_global_copy_map:
return
if rhs is None:
uvm_report_warning("NoneCP",
"A None object was supplied to copy; copy is ignored", UVM_NONE)
return
UVMObject.uvm_global_copy_map[rhs] = self
UVMObject.depth += 1
self._m_uvm_field_automation(rhs, UVM_COPY, "")
self.do_copy(rhs)
UVMObject.depth -= 1
if UVMObject.depth == 0:
UVMObject.uvm_global_copy_map = {}
def do_copy(self, rhs) -> None:
"""
The `do_copy` method is the user-definable hook called by the `copy` method.
A derived class should override this method to include its fields in a `copy`
operation.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
field_1 = 0
def do_copy(self, rhs):
super.do_copy(rhs)
# Optionanl type checking
field_1 = rhs.field_1
The implementation must call `super().do_copy`, and can optionally do
type checking before copying.
Args:
rhs (UVMObject): Object to be copied.
"""
return
def compare(self, rhs, comparer=None) -> bool:
"""
Deep compares members of this data object with those of the object provided
in the `rhs` (right-hand side) argument, returning 1 on a match, 0 otherwise.
The `compare` method is not virtual and should not be overloaded in derived
classes. To compare the fields of a derived class, that class should
override the `do_compare` method.
The optional `comparer` argument specifies the comparison policy. It allows
you to control some aspects of the comparison operation. It also stores the
results of the comparison, such as field-by-field miscompare information
and the total number of miscompares. If a compare policy is not provided,
then the global `uvm_default_comparer` policy is used. See `uvm_comparer`
for more information.
Args:
rhs (UVMObject): Object to be compared against.
comparer (UVMComparer): Comparer policy object.
Returns:
bool: True if objects match, False otherwise.
"""
# t = 0
dc = 0
#static int style
# style = 0
done = 0
cls = UVMObject
if comparer is not None:
cls._m_uvm_status_container.comparer = comparer
else:
from .uvm_global_vars import uvm_default_comparer
cls._m_uvm_status_container.comparer = uvm_default_comparer
comparer = cls._m_uvm_status_container.comparer
if(not cls._m_uvm_status_container.scope.depth()):
comparer.compare_map.delete()
comparer.result = 0
comparer.miscompares = ""
comparer.scope = cls._m_uvm_status_container.scope
if self.get_name() == "":
cls._m_uvm_status_container.scope.down("<object>")
else:
cls._m_uvm_status_container.scope.down(self.get_name())
if(not done and (rhs is None)):
if(cls._m_uvm_status_container.scope.depth()):
comparer.print_msg_object(self, rhs)
else:
comparer.print_msg_object(self, rhs)
uvm_report_info("MISCMP",
sv.sformatf("%0d Miscompare(s) for object %s@%0d vs. None",
comparer.result,
cls._m_uvm_status_container.scope.get(),
self.get_inst_id()),
cls._m_uvm_status_container.comparer.verbosity)
done = 1
if(not done and comparer.compare_map.exists(rhs)):
if(comparer.compare_map[rhs] != self):
comparer.print_msg_object(self, comparer.compare_map[rhs])
done = 1 # don't do any more work after this case, but do cleanup
if(not done and comparer.check_type and (rhs is not None) and
(self.get_type_name() != rhs.get_type_name())):
cls._m_uvm_status_container.stringv = ("lhs type = \"" + self.get_type_name()
+ "' : rhs type = '" + rhs.get_type_name() + "'")
comparer.print_msg(cls._m_uvm_status_container.stringv)
if not done:
comparer.compare_map[rhs] = self
self._m_uvm_field_automation(rhs, UVM_COMPARE, "")
dc = self.do_compare(rhs, comparer)
if cls._m_uvm_status_container.scope.depth() == 1:
cls._m_uvm_status_container.scope.up()
if rhs is not None:
comparer.print_rollup(self, rhs)
return (comparer.result == 0 and dc == 1)
def do_compare(self, rhs, comparer) -> bool:
"""
The `do_compare` method is the user-definable hook called by the `compare`
method. A derived class should override this method to include its fields
in a compare operation. It should return 1 if the comparison succeeds, 0
otherwise.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
f1 = 0
def do_compare(self, rhs, comparer):
do_compare = super.do_compare(rhs,comparer)
# Optional type checking
do_compare &= comparer.compare_field_int("f1", f1, rhs.f1)
return do_compare
A derived class implementation must call `super().do_compare` to ensure its
base class' properties, if any, are included in the comparison. If type
matching is required instead of duck-typing, the user can also
implemented this checking.
The actual comparison should be implemented using the `UVMComparer` object
rather than direct field-by-field comparison. This enables users of your
class to customize how comparisons are performed and how much miscompare
information is collected. See `UVMComparer` for more details.
Args:
rhs (UVMObject):
comparer (UVMComparer):
Returns:
bool: True if objects match, False otherwise.
"""
return True
# // Group: Packing
# // Function: pack
#
# extern function int pack (ref bit bitstream[],
# input uvm_packer packer=None)
def pack(self, packer=None) -> Tuple[Any, Any]:
packer = self.m_pack(packer)
return packer.get_packed_size(), packer.get_bits()
# // Function: pack_bytes
#
# extern function int pack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def pack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_pack(packer)
packed_bytes = packer.get_bytes()
for b in packed_bytes:
bytestream.append(b)
return packer.get_packed_size()
# // Function: pack_ints
# //
# // The pack methods bitwise-concatenate this object's properties into an array
# // of bits, bytes, or ints. The methods are not virtual and must not be
# // overloaded. To include additional fields in the pack operation, derived
# // classes should override the <do_pack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // the packing operation. If a packer policy is not provided, the global
# // <uvm_default_packer> policy is used. See <uvm_packer> for more information.
# //
# // The return value is the total number of bits packed into the given array.
# // Use the array's built-in `size` method to get the number of bytes or ints
# // consumed during the packing process.
#
# extern function int pack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def pack_ints(self, intstream, packer=None) -> Any:
packer = self.m_pack(packer)
ints = packer.get_ints()
for i in ints:
intstream.append(i)
return packer.get_packed_size()
# // Function: do_pack
# //
# // The `do_pack` method is the user-definable hook called by the <pack> methods.
# // A derived class should override this method to include its fields in a pack
# // operation.
# //
# // The `packer` argument is the policy object for packing. The policy object
# // should be used to pack objects.
# //
# // A typical example of an object packing itself is as follows
# //
# //| class mysubtype extends mysupertype
# //| ...
# //| shortint myshort
# //| obj_type myobj
# //| byte myarray[]
# //| ...
# //| function void do_pack (uvm_packer packer)
# //| super.do_pack(packer); // pack mysupertype properties
# //| packer.pack_field_int(myarray.size(), 32)
# //| foreach (myarray)
# //| packer.pack_field_int(myarray[index], 8)
# //| packer.pack_field_int(myshort, $bits(myshort))
# //| packer.pack_object(myobj)
# //| endfunction
# //
# // The implementation must call ~super.do_pack~ so that base class properties
# // are packed as well.
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to unpack into an equivalent data
# // structure when unpacking, you must include meta-information about the
# // dynamic data when packing as follows.
# //
# // - For queues, dynamic arrays, or associative arrays, pack the number of
# // elements in the array in the 32 bits immediately before packing
# // individual elements, as shown above.
# //
# // - For string data types, append a zero byte after packing the string
# // contents.
# //
# // - For objects, pack 4 bits immediately before packing the object. For `None`
# // objects, pack 4'b0000. For non-`None` objects, pack 4'b0001.
# //
# // When the `uvm_field_* macros are used,
# // <Utility and Field Macros for Components and Objects>,
# // the above meta information is included provided the <uvm_packer::use_metadata>
# // variable is set for the packer.
# //
# // Packing order does not need to match declaration order. However, unpacking
# // order must match packing order.
def do_pack(self, packer) -> None:
return
# // Group: Unpacking
#
# // Function: unpack
#
# extern function int unpack (ref bit bitstream[],
# input uvm_packer packer=None)
def unpack(self, bitstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bits(bitstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_bytes
#
# extern function int unpack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def unpack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bytes(bytestream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_ints
# //
# // The unpack methods extract property values from an array of bits, bytes, or
# // ints. The method of unpacking `must` exactly correspond to the method of
# // packing. This is assured if (a) the same `packer` policy is used to pack
# // and unpack, and (b) the order of unpacking is the same as the order of
# // packing used to create the input array.
# //
# // The unpack methods are fixed (non-virtual) entry points that are directly
# // callable by the user. To include additional fields in the <unpack>
# // operation, derived classes should override the <do_unpack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // both the pack and unpack operation. If a packer policy is not provided,
# // then the global `uvm_default_packer` policy is used. See uvm_packer for
# // more information.
# //
# // The return value is the actual number of bits unpacked from the given array.
#
# extern function int unpack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def unpack_ints(self, intstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_ints(intstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: do_unpack
# //
# // The `do_unpack` method is the user-definable hook called by the <unpack>
# // method. A derived class should override this method to include its fields
# // in an unpack operation.
# //
# // The `packer` argument is the policy object for both packing and unpacking.
# // It must be the same packer used to pack the object into bits. Also,
# // do_unpack must unpack fields in the same order in which they were packed.
# // See <uvm_packer> for more information.
# //
# // The following implementation corresponds to the example given in do_pack.
# //
# //| function void do_unpack (uvm_packer packer)
# //| int sz
# //| super.do_unpack(packer); // unpack super's properties
# //| sz = packer.unpack_field_int(myarray.size(), 32)
# //| myarray.delete()
# //| for(int index=0; index<sz; index++)
# //| myarray[index] = packer.unpack_field_int(8)
# //| myshort = packer.unpack_field_int($bits(myshort))
# //| packer.unpack_object(myobj)
# //| endfunction
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to <unpack> into an equivalent data
# // structure, you must have included meta-information about the dynamic data
# // when it was packed.
# //
# // - For queues, dynamic arrays, or associative arrays, unpack the number of
# // elements in the array from the 32 bits immediately before unpacking
# // individual elements, as shown above.
# //
# // - For string data types, unpack into the new string until a `None` byte is
# // encountered.
# //
# // - For objects, unpack 4 bits into a byte or int variable. If the value
# // is 0, the target object should be set to `None` and unpacking continues to
# // the next property, if any. If the least significant bit is 1, then the
# // target object should be allocated and its properties unpacked.
def do_unpack(self, packer) -> None:
return
def set_int_local(self, field_name: str, value: int, recurse=True):
"""
Group: Configuration
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool):
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.bitstream = value
self._m_uvm_field_automation(None, UVM_SETINT, field_name)
if UVMObject._m_uvm_status_container.warning and not self._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name),UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_string_local(self, field_name: str, value: str, recurse=True):
"""
Function: set_string_local
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool): If True, recurse into sub-objects.
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.stringv = value
self._m_uvm_field_automation(None, UVM_SETSTR, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s (@%0d)",
field_name, self.get_inst_id()), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_object_local(self, field_name: str, value: 'UVMObject', clone=1, recurse=1):
"""
These methods provide write access to integral, string, and
uvm_object-based properties indexed by a `field_name` string. The object
designer choose which, if any, properties will be accessible, and overrides
the appropriate methods depending on the properties' types. For objects,
the optional `clone` argument specifies whether to clone the `value`
argument before assignment.
The global `uvm_is_match` function is used to match the field names, so
`field_name` may contain wildcards.
An example implementation of all three methods is as follows.
.. code-block:: python
class mytype(UVMObject):
def __init__(self, name):
super().__init__(name)
self.myint = 0
self.mybyte = 0
self.myshort = 0
self.mystring = ""
self.myobj = None
# provide access to integral properties
def set_int_local(self, field_name, value):
if (uvm_is_match (field_name, "myint")):
self.myint = value
elif (uvm_is_match (field_name, "mybyte")):
selef.mybyte = value
# provide access to string properties
def set_string_local(self, field_name, value):
if (uvm_is_match (field_name, "mystring")):
self.mystring = value
# provide access to sub-objects
def set_object_local(self, field_name, value,clone=1):
if (uvm_is_match (field_name, "myobj")):
if (value is not None):
tmp = None
# if provided value is not correct type, produce error
if (!$cast(tmp, value)):
# error
else:
if(clone)
self.myobj = tmp.clone()
else
self.myobj = tmp
else:
myobj = None # value is None, so simply assign None to myobj
end
...
Although the object designer implements these methods to provide outside
access to one or more properties, they are intended for internal use (e.g.,
for command-line debugging and auto-configuration) and should not be called
directly by the user.
Args:
field_name (str): Variable to set
value: Value for the variable
clone (bool):
recurse (bool):
"""
# cc = None # uvm_object cc
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
if clone and (value is not None):
cc = value.clone()
if cc is not None:
cc.set_name(field_name)
value = cc
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.object = value
UVMObject._m_uvm_status_container.clone = clone
self._m_uvm_field_automation(None, UVM_SETOBJ, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
# //---------------------------------------------------------------------------
# // **** Internal Methods and Properties ***
# // Do not use directly
# //---------------------------------------------------------------------------
#
# extern local function void m_pack (inout uvm_packer packer)
def m_pack(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_PACK, "")
self.do_pack(packer)
packer.set_packed_size()
packer.scope.up()
return packer
# extern local function void m_unpack_pre (inout uvm_packer packer)
def m_unpack_pre(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
return packer
# extern local function void m_unpack_post (uvm_packer packer)
def m_unpack_post(self, packer) -> None:
provided_size = packer.get_packed_size()
# Put this object into the hierarchy
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_UNPACK, "")
self.do_unpack(packer)
# Scope back up before leaving
packer.scope.up()
if packer.get_packed_size() != provided_size:
uvm_report_warning("BDUNPK", sv.sformatf(
"Unpack operation unsuccessful: unpacked %0d bits from a total of %0d bits",
packer.get_packed_size(), provided_size), UVM_NONE)
|
from errors import Request
from .mixins import Identify
class Report(Identify):
def _validate(self, request):
super()._validate(request)
validator = self._application.validator
self.__message = self._get(request, 'message', '').strip()
if validator.isempty(self.__message):
raise Request('message', self.__message)
def _process(self, request):
storage = self._application.storage
mail = self._application.mail
token = self._session.token
task_id = self._task.id
subject = f'Report from {token} about task #{task_id}'
mail.send(subject, self.__message)
storage.push(
self._session.account.uuid,
'''
Thank you for leaving report
We're working on your issue
''',
)
|
#install canvasapi, pyTelegramBotAPI
# Imports
import sys, os
import canvasapi
import telebot
from html.parser import HTMLParser
from canvasapi import Canvas, discussion_topic
#----# CANVAS #----#
#Class handles html to ascii conversion
class HTMLFilter(HTMLParser):
text = ""
def handle_data(self, postContent):
self.text += postContent
#bool for check
new = False
#Canvas API URL
API_URL = "!CANVAS BASE URL!"
#Canvas API key
API_KEY = "!CANVAS USER API KEY!"
#Initialize a new Canvas object
canvas = Canvas(API_URL, API_KEY)
COURSEID = "123456"
#Grab course 123456
course = canvas.get_course(COURSEID)
#Access the course's name
courseName = course.name
#For output
user = "Teacher"
#Opens txt file for check
aCheck = open("latest.txt","r")
aCheckStr = aCheck.read()
#Gets latest announcement
ann = canvas.get_announcements(context_codes=['course_{}'.format(COURSEID)])
#gets dumb stupid message from html
postContent = str(ann[0].__getattribute__("message"))
#Converts post from html to ascii
post = HTMLFilter()
post.feed(postContent)
finalPost = post.text
#Converts to string for following if statement
a = str(ann[0])
#stores message so it doesnt send repeating messages
if a != str(aCheckStr):
new = True
aCheckOverWrite = open("latest.txt","w+")
aCheckOverWrite.write(a)
aCheck.close()
aCheckOverWrite.close()
#---------------------#
#if new = true, use to push message
#---# Telegram #---#
bot = telebot.TeleBot("!TELEGRAM BOT API KEY!")
#Handle commands: /link, /help, & /latest
@bot.message_handler(commands=['link'])
def handle_command(message):
bot.reply_to(message, "Bot message: Here is a direct link to the canvas course. It will only work if you're logged in: https://gastoncs.instructure.com/courses/102829")
@bot.message_handler(commands=['help'])
def handle_command(message):
bot.reply_to(message, "Bot message: The bot is Active. This bot was made in python by the one and only VoluSign. The source code for this bot can be found at https://github.com/VoluSign/CanvasToTelegramBot")
bot.reply_to(message, "Commands: /help, /link, /latest")
@bot.message_handler(commands=['latest'])
def handle_command(message):
bot.reply_to(message, "Bot message: The following message will contain the most recent post to the Class of 2022 pertaining to scholarships:")
bot.reply_to(message, f"{courseName} - {user}: {finalPost}")
#Bot sends latest post on start up (Trying to get automatic push if bool permits)
if new == True:
bot.reply_to(message, f'Latest Announcement: {finalPost}')
#Starts server while script is running
bot.polling()
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ExportTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.bulkexports.v1.exports("resource_type").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://bulkexports.twilio.com/v1/Exports/resource_type',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"resource_type": "Calls",
"url": "https://bulkexports.twilio.com/v1/Exports/Calls",
"links": {
"days": "https://bulkexports.twilio.com/v1/Exports/Calls/Days"
}
}
'''
))
actual = self.client.bulkexports.v1.exports("resource_type").fetch()
self.assertIsNotNone(actual)
|
'''OIDC server example'''
# import datetime
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.sqlite import JSON
# from authlib.integrations.sqla_oauth2 import (
# OAuth2ClientMixin,
# OAuth2TokenMixin,
# OAuth2AuthorizationCodeMixin
# )
from database import Base
from utils import disambiguate_referent
import uuid
class User(Base): # pylint: disable=R0903
'''User class example'''
__tablename__ = "user"
id = Column(Integer, primary_key=True)
uuid = Column(String(100), unique=True)
def get_id(self):
'''Fetch user identifier'''
return self.id
# OIDC Authentication Challenge
# Template for a proof request that will be sent as a challenge to authenticating users
class OIDCProofRequest(Base):
'''OIDC Proof Request class example'''
__tablename__ = 'oidc_proof_request'
# The oidc scope allows a relying party to specify the proof request the OP should challenge the user with
oidc_scope = Column(String(100), primary_key=True)
# Attribute within the proof request that identifies the subject responding the to authentication challenge
subject_identifier = Column(String(100))
proof_request = Column(JSON)
def get_oidc_scope(self):
'''Fetch oidc proof request identifier'''
return self.oidc_scope
def __str__(self):
return f"{self.id}"
def to_json(self):
proof_request = {
"name": self.proof_request.get("name", ""),
"version": self.proof_request.get("version", ""),
"requested_attributes": {},
"requested_predicates": {},
}
for attr in self.proof_request.get("requested_attributes", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_attributes", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_attributes"].update({label: attr})
for attr in self.proof_request.get("requested_predicates", []):
label = attr.get("label", str(uuid.uuid4()))
if label in proof_request.get("requested_predicates", {}).keys():
label = disambiguate_referent(label)
proof_request["requested_predicates"].update({label: attr})
return {"proof_request": proof_request}
|
from unisdk.sms import UniSMS
from unisdk.exception import UniException
def example():
client = UniSMS("your access key id", "your access key secret")
try:
res = client.send({
"to": "your phone number",
"signature": "UniSMS",
"templateId": "login_tmpl",
"templateData": {
"code": 7777
}
})
print(res)
except UniException as e:
print(e)
if __name__ == '__main__':
example()
|
from setuptools import setup, find_packages
from truewho import __version__
def read_file(filename, lines=False):
try:
with open(filename, "r") as f:
if lines:
return [i.strip() for i in f.readlines() if (i.strip())]
return f.read()
except:
print("Can not read file:", filename)
return None
long_description = read_file("README.md")
setup(
name="truewho",
version=__version__,
author="Ibrahim Rafi",
author_email="me@ibrahimrafi.me",
license="MIT",
url="https://github.com/rafiibrahim8/truewho",
download_url="https://github.com/rafiibrahim8/truewho/archive/v{}.tar.gz".format(
__version__
),
install_requires=["phone-iso3166", "requests", "click"],
description="Check a phone number for name with Truecaller in command line.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords=["truewho", "Truecaller", "Spam", "Call"],
packages=find_packages(),
entry_points=dict(console_scripts=["truewho=truewho.truewho:main"]),
platforms=["any"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_bigquery`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.operators.bigquery_to_bigquery import BigQueryToBigQueryOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_bigquery`.",
DeprecationWarning, stacklevel=2
)
|
class Protocol:
""" Protocol abstraction"""
def __init__(self, transaction, transport, default_port=None):
self.name = type(self).__name__
self.default_port = default_port
self.transaction = transaction
self.transport = transport
|
# -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import test_organization
from openprocurement.tender.openua.tests.base import test_bids
from openprocurement.tender.openuadefense.tests.base import (
BaseTenderUAContentWebTest,
test_tender_data,
test_features_tender_ua_data)
class TenderBidResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def test_create_tender_biddder_invalid(self):
response = self.app.post_json('/tenders/some_id/bids', {
'data': {'tenderers': [test_organization], "value": {"amount": 500}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
request_path = '/tenders/{}/bids'.format(self.tender_id)
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'No JSON object could be decoded',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(
request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {
'invalid_field': 'invalid_value'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'identifier': [
u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': {}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']}, u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [{
'name': 'name', 'identifier': {'uri': 'invalid_value'}}], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'contactPoint': [u'This field is required.'], u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'], u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}], u'location': u'body', u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'tenderers': [test_organization], "value": {"amount": 500, 'currency': "USD"}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'currency of bid should be identical to currency of value of tender'], u'location': u'body', u'name': u'value'},
])
response = self.app.post_json(request_path, {'data': {'tenderers': test_organization, "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u"invalid literal for int() with base 10: 'contactPoint'", u'location': u'body', u'name': u'data'},
])
def test_create_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.assertEqual(bid['tenderers'][0]['name'], test_organization['name'])
self.assertIn('id', bid)
self.assertIn(bid['id'], response.headers['Location'])
self.set_status('complete')
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add bid in current (complete) tender status")
def test_patch_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'selfEligible': True, 'selfQualified': True, 'status': 'draft',
'tenderers': [test_organization], "value": {"amount": 500}}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 600}}}, status=200)
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token), {"data": {'status': 'active'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {'status': 'active', "value": {"amount": 500}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
self.assertNotEqual(response.json['data']['date'], bid['date'])
response = self.app.patch_json('/tenders/{}/bids/some_id'.format(self.tender_id), {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.patch_json('/tenders/some_id/bids/some_id', {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
self.set_status('complete')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
response = self.app.patch_json('/tenders/{}/bids/{}?acc_token={}'.format(
self.tender_id, bid['id'], bid_token), {"data": {"value": {"amount": 400}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update bid in current (complete) tender status")
def test_get_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], bid)
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bid_data = response.json['data']
#self.assertIn(u'participationUrl', bid_data)
#bid_data.pop(u'participationUrl')
self.assertEqual(bid_data, bid)
response = self.app.get('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't delete bid in current (active.qualification) tender status")
def test_delete_tender_bidder(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# deleted bid does not contain bid information
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
revisions = self.db.get(self.tender_id).get('revisions')
self.assertTrue(any([i for i in revisions[-2][u'changes'] if i['op'] == u'remove' and i['path'] == u'/bids']))
self.assertTrue(any([i for i in revisions[-1][u'changes'] if i['op'] == u'replace' and i['path'] == u'/bids/0/status']))
response = self.app.delete('/tenders/{}/bids/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.delete('/tenders/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
# finished tender does not show deleted bid info
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['bids']), 1)
bid_data = response.json['data']['bids'][0]
self.assertEqual(bid_data['id'], bid['id'])
self.assertEqual(bid_data['status'], 'deleted')
self.assertFalse('value' in bid_data)
self.assertFalse('tenderers' in bid_data)
self.assertFalse('date' in bid_data)
def test_deleted_bid_is_not_restorable(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid_token = response.json['access']['token']
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bid['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# try to restore deleted bid
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'status': 'active',
}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']['status'], 'deleted')
self.assertEqual(response.json['data']['status'], 'active')
def test_deleted_bid_do_not_locks_tender_in_state(self):
bids = []
for bid_amount in (400, 405):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": bid_amount}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids.append(response.json['data'])
# delete first bid
response = self.app.delete('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['id'], bids[0]['id'])
self.assertEqual(response.json['data']['status'], 'deleted')
# try to change tender state
self.set_status('active.qualification')
# check tender status
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
# check bids
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[0]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'deleted')
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bids[1]['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
def test_get_tender_tenderers(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
response = self.app.get('/tenders/{}/bids'.format(self.tender_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bids in current (active.tendering) tender status")
self.set_status('active.qualification')
response = self.app.get('/tenders/{}/bids'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'][0], bid)
response = self.app.get('/tenders/some_id/bids', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
def test_bid_Administrator_change(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/tenders/{}/bids/{}'.format(self.tender_id, bid['id']), {"data": {
'tenderers': [{"identifier": {"id": "00000000"}}],
"value": {"amount": 400}
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']["value"]["amount"], 400)
self.assertEqual(response.json['data']["tenderers"][0]["identifier"]["id"], "00000000")
def test_bids_invalidation_on_tender_change(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# update tender. we can set value that is less than a value in bids as
# they will be invalidated by this request
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token), {"data":
{"value": {'amount': 300.0}}
})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["value"]["amount"], 300)
# check bids status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# check that tender status change does not invalidate bids
# submit one more bid. check for invalid value first
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': test_bids[0]}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be less than value of tender'], u'location': u'body', u'name': u'value'}
])
# and submit valid bid
data = deepcopy(test_bids[0])
data['value']['amount'] = 299
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
valid_bid_id = response.json['data']['id']
# change tender status
self.set_status('active.qualification')
# check tender status
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active.qualification')
# tender should display all bids
self.assertEqual(len(response.json['data']['bids']), 3)
# invalidated bids should show only 'id' and 'status' fields
for bid in response.json['data']['bids']:
if bid['status'] == 'invalid':
self.assertTrue('id' in bid)
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
# invalidated bids stay invalidated
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
# invalidated bids displays only 'id' and 'status' fields
self.assertFalse('value' in response.json['data'])
self.assertFalse('tenderers' in response.json['data'])
self.assertFalse('date' in response.json['data'])
# and valid bid is not invalidated
response = self.app.get('/tenders/{}/bids/{}'.format(self.tender_id, valid_bid_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# and displays all his data
self.assertTrue('value' in response.json['data'])
self.assertTrue('tenderers' in response.json['data'])
self.assertTrue('date' in response.json['data'])
# check bids availability on finished tender
self.set_status('complete')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']['bids']), 3)
for bid in response.json['data']['bids']:
if bid['id'] in bids_access: # previously invalidated bids
self.assertEqual(bid['status'], 'invalid')
self.assertFalse('value' in bid)
self.assertFalse('tenderers' in bid)
self.assertFalse('date' in bid)
else: # valid bid
self.assertEqual(bid['status'], 'active')
self.assertTrue('value' in bid)
self.assertTrue('tenderers' in bid)
self.assertTrue('date' in bid)
def test_bids_activation_on_tender_documents(self):
bids_access = {}
# submit bids
for data in test_bids:
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bids_access[response.json['data']['id']] = response.json['access']['token']
# check initial status
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.post('/tenders/{}/documents?acc_token={}'.format(
self.tender_id, self.tender_token), upload_files=[('file', u'укр.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
for bid_id, token in bids_access.items():
response = self.app.get('/tenders/{}/bids/{}?acc_token={}'.format(self.tender_id, bid_id, token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'invalid')
class TenderBidFeaturesResourceTest(BaseTenderUAContentWebTest):
initial_data = test_features_tender_ua_data
initial_status = 'active.tendering'
def test_features_bidder(self):
test_features_bids = [
{
# "status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.1,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
},
{
"status": "active",
"parameters": [
{
"code": i["code"],
"value": 0.15,
}
for i in self.initial_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
]
for i in test_features_bids:
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': i})
i['status'] = "active"
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid.pop(u'date')
bid.pop(u'id')
self.assertEqual(bid, i)
def test_features_bidder_invalid(self):
data = {
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
},
'selfEligible': True,
'selfQualified': True,
}
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"] = [
{
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
}
]
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'All features parameters is required.'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"].append({
"code": "OCDS-123454-AIR-INTAKE",
"value": 0.1,
})
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'Parameter code should be uniq for all parameters'], u'location': u'body', u'name': u'parameters'}
])
data["parameters"][1]["code"] = "OCDS-123454-YEARS"
data["parameters"][1]["value"] = 0.2
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [{u'value': [u'value should be one of feature value.']}], u'location': u'body', u'name': u'parameters'}
])
class TenderBidDocumentResourceTest(BaseTenderUAContentWebTest):
initial_status = 'active.tendering'
def setUp(self):
super(TenderBidDocumentResourceTest, self).setUp()
# Create bid
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
self.bid_id = bid['id']
self.bid_token = response.json['access']['token']
def test_not_found(self):
response = self.app.post('/tenders/some_id/bids/some_id/documents', status=404, upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.post('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404, upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.post('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=404, upload_files=[
('invalid_value', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/some_id/bids/some_id/documents/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/some_id'.format(self.tender_id, self.bid_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'document_id'}
])
response = self.app.put('/tenders/some_id/bids/some_id/documents/some_id', status=404,
upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.put('/tenders/{}/bids/some_id/documents/some_id'.format(self.tender_id), status=404, upload_files=[
('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
self.app.authorization = ('Basic', ('invalid', ''))
response = self.app.put('/tenders/{}/bids/{}/documents/some_id'.format(
self.tender_id, self.bid_id), status=404, upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def test_create_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('name.doc', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents'.format(self.tender_id, self.bid_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid documents in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents?acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents?all=true&acc_token={}'.format(self.tender_id, self.bid_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"][0]["id"])
self.assertEqual('name.doc', response.json["data"][0]["title"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?download=some_id&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'download'}
])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}'.format(
self.tender_id, self.bid_id, doc_id, key), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 7)
self.assertEqual(response.body, 'content')
response = self.app.get('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid document in current (active.tendering) tender status")
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
self.set_status('active.awarded')
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def test_put_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id),
status=404,
upload_files=[('invalid_name', 'name.doc', 'content')])
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'body', u'name': u'file'}
])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content2')
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('name.doc', response.json["data"]["title"])
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), 'content3', content_type='application/msword')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.get('/tenders/{}/bids/{}/documents/{}?{}&acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, key, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/msword')
self.assertEqual(response.content_length, 8)
self.assertEqual(response.body, 'content3')
self.set_status('active.awarded')
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, self.bid_id, doc_id), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_patch_tender_bidder_document(self):
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, self.bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot"
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'This field is required.'], u'location': u'body', u'name': u'relatedItem'},
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {
"documentOf": "lot",
"relatedItem": '0' * 32
}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'relatedItem should be one of lots'], u'location': u'body', u'name': u'relatedItem'}
])
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
response = self.app.get('/tenders/{}/bids/{}/documents/{}?acc_token={}'.format(
self.tender_id, self.bid_id, doc_id, self.bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual('document description', response.json["data"]["description"])
self.set_status('active.awarded')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(self.tender_id, self.bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
def test_create_tender_bidder_document_nopending(self):
response = self.app.post_json('/tenders/{}/bids'.format(
self.tender_id), {'data': {'tenderers': [test_organization], "value": {"amount": 500}, 'selfEligible': True, 'selfQualified': True}})
bid = response.json['data']
bid_id = bid['id']
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.set_status('active.qualification')
response = self.app.patch_json('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.put('/tenders/{}/bids/{}/documents/{}'.format(
self.tender_id, bid_id, doc_id), 'content3', content_type='application/msword', status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending or active state")
response = self.app.post('/tenders/{}/bids/{}/documents'.format(
self.tender_id, bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending or active state")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderBidDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderBidFeaturesResourceTest))
suite.addTest(unittest.makeSuite(TenderBidResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('simsoexp', '0006_auto_20150721_1432'),
]
operations = [
migrations.AddField(
model_name='schedulingpolicy',
name='contributor',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import pytest
from flaky import flaky
from pipenv._compat import Path, TemporaryDirectory
from pipenv.utils import temp_environ
from pipenv.vendor import delegator
@pytest.mark.setup
@pytest.mark.basic
@pytest.mark.install
def test_basic_setup(PipenvInstance):
with PipenvInstance() as p:
with PipenvInstance(pipfile=False) as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
def test_basic_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
mirror_url = os.environ.pop(
"PIPENV_TEST_INDEX", "https://pypi.python.org/simple"
)
assert "pypi.org" not in mirror_url
# This should sufficiently demonstrate the mirror functionality
# since pypi.org is the default when PIPENV_TEST_INDEX is unset.
c = p.pipenv("install requests --pypi-mirror {0}".format(mirror_url))
assert c.return_code == 0
# Ensure the --pypi-mirror parameter hasn't altered the Pipfile or Pipfile.lock sources
assert len(p.pipfile["source"]) == 1
assert len(p.lockfile["_meta"]["sources"]) == 1
assert "https://pypi.org/simple" == p.pipfile["source"][0]["url"]
assert "https://pypi.org/simple" == p.lockfile["_meta"]["sources"][0]["url"]
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.needs_internet
def test_bad_mirror_install(PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
# This demonstrates that the mirror parameter is being used
os.environ.pop("PIPENV_TEST_INDEX", None)
c = p.pipenv("install requests --pypi-mirror https://pypi.example.org")
assert c.return_code != 0
@pytest.mark.lock
@pytest.mark.complex
@pytest.mark.skip(reason="Does not work unless you can explicitly install into py2")
def test_complex_lock(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install apscheduler")
assert c.return_code == 0
assert "apscheduler" in p.pipfile["packages"]
assert "funcsigs" in p.lockfile[u"default"]
assert "futures" in p.lockfile[u"default"]
@flaky
@pytest.mark.dev
@pytest.mark.run
def test_basic_dev_install(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install requests --dev")
assert c.return_code == 0
assert "requests" in p.pipfile["dev-packages"]
assert "requests" in p.lockfile["develop"]
assert "chardet" in p.lockfile["develop"]
assert "idna" in p.lockfile["develop"]
assert "urllib3" in p.lockfile["develop"]
assert "certifi" in p.lockfile["develop"]
c = p.pipenv("run python -m requests.help")
assert c.return_code == 0
@flaky
@pytest.mark.dev
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev(PipenvInstance):
"""Ensure that running `pipenv install` doesn't install dev packages"""
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
[dev-packages]
tablib = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert "tablib" in p.pipfile["dev-packages"]
assert "six" in p.lockfile["default"]
assert "tablib" in p.lockfile["develop"]
c = p.pipenv('run python -c "import tablib"')
assert c.return_code != 0
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.basic
@pytest.mark.install
def test_install_without_dev_section(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.pipfile["packages"]
assert p.pipfile.get("dev-packages", {}) == {}
assert "six" in p.lockfile["default"]
assert p.lockfile["develop"] == {}
c = p.pipenv('run python -c "import six"')
assert c.return_code == 0
@flaky
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
def test_extras_install(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install requests[socks]")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@flaky
@pytest.mark.pin
@pytest.mark.basic
@pytest.mark.install
def test_windows_pinned_pipfile(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = "==2.19.1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.pipfile["packages"]
assert "requests" in p.lockfile["default"]
@flaky
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.resolver
@pytest.mark.backup_resolver
def test_backup_resolver(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
"ibm-db-sa-py3" = "==0.3.1-1"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "ibm-db-sa-py3" in p.lockfile["default"]
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_alternative_version_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
requests = {version = "*"}
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@flaky
@pytest.mark.run
@pytest.mark.alt
def test_outline_table_specifier(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages.requests]
version = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "requests" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "certifi" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
c = p.pipenv('run python -c "import requests; import idna; import certifi;"')
assert c.return_code == 0
@pytest.mark.bad
@pytest.mark.basic
@pytest.mark.install
def test_bad_packages(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install NotAPackage")
assert c.return_code > 0
@pytest.mark.lock
@pytest.mark.extras
@pytest.mark.install
@pytest.mark.requirements
def test_requirements_to_pipfile(PipenvInstance, pypi):
with PipenvInstance(pipfile=False, chdir=True) as p:
# Write a requirements file
with open("requirements.txt", "w") as f:
f.write("-i {}\nrequests[socks]==2.19.1\n".format(pypi.url))
c = p.pipenv("install")
assert c.return_code == 0
print(c.out)
print(c.err)
print(delegator.run("ls -l").out)
# assert stuff in pipfile
assert "requests" in p.pipfile["packages"]
assert "extras" in p.pipfile["packages"]["requests"]
# assert stuff in lockfile
assert "requests" in p.lockfile["default"]
assert "chardet" in p.lockfile["default"]
assert "idna" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
assert "pysocks" in p.lockfile["default"]
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.skip_osx
@pytest.mark.requirements
def test_skip_requirements_when_pipfile(PipenvInstance):
"""Ensure requirements.txt is NOT imported when
1. We do `pipenv install [package]`
2. A Pipfile already exists when we run `pipenv install`.
"""
with PipenvInstance(chdir=True) as p:
with open("requirements.txt", "w") as f:
f.write("requests==2.18.1\n")
c = p.pipenv("install six")
assert c.return_code == 0
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
fake_package = "<0.12"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.ok
assert "fake_package" in p.pipfile["packages"]
assert "fake-package" in p.lockfile["default"]
assert "six" in p.pipfile["packages"]
assert "six" in p.lockfile["default"]
assert "requests" not in p.pipfile["packages"]
assert "requests" not in p.lockfile["default"]
@pytest.mark.cli
@pytest.mark.clean
def test_clean_on_empty_venv(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("clean")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_does_not_extrapolate_environ(PipenvInstance):
"""Ensure environment variables are not expanded in lock file.
"""
with temp_environ(), PipenvInstance(chdir=True) as p:
# os.environ["PYPI_URL"] = pypi.url
os.environ["PYPI_URL"] = p.pypi
with open(p.pipfile_path, "w") as f:
f.write(
"""
[[source]]
url = '${PYPI_URL}/simple'
verify_ssl = true
name = 'mockpi'
"""
)
# Ensure simple install does not extrapolate.
c = p.pipenv("install")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
# Ensure package install does not extrapolate.
c = p.pipenv("install six")
assert c.return_code == 0
assert p.pipfile["source"][0]["url"] == "${PYPI_URL}/simple"
assert p.lockfile["_meta"]["sources"][0]["url"] == "${PYPI_URL}/simple"
@pytest.mark.basic
@pytest.mark.editable
@pytest.mark.badparameter
@pytest.mark.install
def test_editable_no_args(PipenvInstance):
with PipenvInstance() as p:
c = p.pipenv("install -e")
assert c.return_code != 0
assert "Error: -e option requires an argument" in c.err
@pytest.mark.basic
@pytest.mark.install
@pytest.mark.virtualenv
def test_install_venv_project_directory(PipenvInstance):
"""Test the project functionality during virtualenv creation.
"""
with PipenvInstance(chdir=True) as p:
with temp_environ(), TemporaryDirectory(
prefix="pipenv-", suffix="temp_workon_home"
) as workon_home:
os.environ["WORKON_HOME"] = workon_home.name
if "PIPENV_VENV_IN_PROJECT" in os.environ:
del os.environ["PIPENV_VENV_IN_PROJECT"]
c = p.pipenv("install six")
assert c.return_code == 0
venv_loc = None
for line in c.err.splitlines():
if line.startswith("Virtualenv location:"):
venv_loc = Path(line.split(":", 1)[-1].strip())
assert venv_loc is not None
assert venv_loc.joinpath(".project").exists()
@pytest.mark.cli
@pytest.mark.deploy
@pytest.mark.system
def test_system_and_deploy_work(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install tablib")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
c = delegator.run("virtualenv .venv")
assert c.return_code == 0
c = p.pipenv("install --system --deploy")
assert c.return_code == 0
c = p.pipenv("--rm")
assert c.return_code == 0
Path(p.pipfile_path).write_text(
u"""
[packages]
tablib = "*"
""".strip()
)
c = p.pipenv("install --system")
assert c.return_code == 0
@pytest.mark.basic
@pytest.mark.install
def test_install_creates_pipfile(PipenvInstance):
with PipenvInstance(chdir=True) as p:
if os.path.isfile(p.pipfile_path):
os.unlink(p.pipfile_path)
if "PIPENV_PIPFILE" in os.environ:
del os.environ["PIPENV_PIPFILE"]
assert not os.path.isfile(p.pipfile_path)
c = p.pipenv("install")
assert c.return_code == 0
assert os.path.isfile(p.pipfile_path)
@pytest.mark.basic
@pytest.mark.install
def test_install_non_exist_dep(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install dateutil")
assert not c.ok
assert "dateutil" not in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_install_package_with_dots(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install backports.html")
assert c.ok
assert "backports.html" in p.pipfile["packages"]
@pytest.mark.basic
@pytest.mark.install
def test_rewrite_outline_table(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, 'w') as f:
contents = """
[packages]
six = {version = "*"}
[packages.requests]
version = "*"
extras = ["socks"]
""".strip()
f.write(contents)
c = p.pipenv("install flask")
assert c.return_code == 0
with open(p.pipfile_path) as f:
contents = f.read()
assert "[packages.requests]" not in contents
assert 'six = {version = "*"}' in contents
assert 'requests = {version = "*"' in contents
assert 'flask = "*"' in contents
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cake documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cake
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cake'
copyright = u"2018, Aaron Duke"
author = u"Aaron Duke"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cake.__version__
# The full version, including alpha/beta/rc tags.
release = cake.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cakedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cake.tex',
u'cake Documentation',
u'Aaron Duke', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cake',
u'cake Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cake',
u'cake Documentation',
author,
'cake',
'One line description of project.',
'Miscellaneous'),
]
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitPal Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the scantxoutset rpc call."""
from test_framework.test_framework import BitPalTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import shutil
import os
def descriptors(out):
return sorted(u['desc'] for u in out['unspents'])
class ScantxoutsetTest(BitPalTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(110)
addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit")
pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey']
addr_LEGACY = self.nodes[0].getnewaddress("", "legacy")
pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey']
addr_BECH32 = self.nodes[0].getnewaddress("", "bech32")
pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey']
self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 0.001)
self.nodes[0].sendtoaddress(addr_LEGACY, 0.002)
self.nodes[0].sendtoaddress(addr_BECH32, 0.004)
#send to child keys of tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK
self.nodes[0].sendtoaddress("mkHV1C6JLheLoUSSZYk7x3FH5tnx9bu7yc", 0.008) # (m/0'/0'/0')
self.nodes[0].sendtoaddress("mipUSRmJAj2KrjSvsPQtnP8ynUon7FhpCR", 0.016) # (m/0'/0'/1')
self.nodes[0].sendtoaddress("n37dAGe6Mq1HGM9t4b6rFEEsDGq7Fcgfqg", 0.032) # (m/0'/0'/1500')
self.nodes[0].sendtoaddress("mqS9Rpg8nNLAzxFExsgFLCnzHBsoQ3PRM6", 0.064) # (m/0'/0'/0)
self.nodes[0].sendtoaddress("mnTg5gVWr3rbhHaKjJv7EEEc76ZqHgSj4S", 0.128) # (m/0'/0'/1)
self.nodes[0].sendtoaddress("mketCd6B9U9Uee1iCsppDJJBHfvi6U6ukC", 0.256) # (m/0'/0'/1500)
self.nodes[0].sendtoaddress("mj8zFzrbBcdaWXowCQ1oPZ4qioBVzLzAp7", 0.512) # (m/1/1/0')
self.nodes[0].sendtoaddress("mfnKpKQEftniaoE1iXuMMePQU3PUpcNisA", 1.024) # (m/1/1/1')
self.nodes[0].sendtoaddress("mou6cB1kaP1nNJM1sryW6YRwnd4shTbXYQ", 2.048) # (m/1/1/1500')
self.nodes[0].sendtoaddress("mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", 4.096) # (m/1/1/0)
self.nodes[0].sendtoaddress("mxp7w7j8S1Aq6L8StS2PqVvtt4HGxXEvdy", 8.192) # (m/1/1/1)
self.nodes[0].sendtoaddress("mpQ8rokAhp1TAtJQR6F6TaUmjAWkAWYYBq", 16.384) # (m/1/1/1500)
self.nodes[0].generate(1)
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
shutil.rmtree(os.path.join(self.nodes[0].datadir, self.chain, 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
scan = self.nodes[0].scantxoutset("start", [])
info = self.nodes[0].gettxoutsetinfo()
assert_equal(scan['success'], True)
assert_equal(scan['height'], info['height'])
assert_equal(scan['txouts'], info['txouts'])
assert_equal(scan['bestblock'], info['bestblock'])
self.restart_node(0, ['-nowallet'])
self.log.info("Test if we have found the non HD unspent outputs.")
assert_equal(self.nodes[0].scantxoutset("start", [ "pkh(" + pubk1 + ")", "pkh(" + pubk2 + ")", "pkh(" + pubk3 + ")"])['total_amount'], Decimal("0.002"))
assert_equal(self.nodes[0].scantxoutset("start", [ "wpkh(" + pubk1 + ")", "wpkh(" + pubk2 + ")", "wpkh(" + pubk3 + ")"])['total_amount'], Decimal("0.004"))
assert_equal(self.nodes[0].scantxoutset("start", [ "sh(wpkh(" + pubk1 + "))", "sh(wpkh(" + pubk2 + "))", "sh(wpkh(" + pubk3 + "))"])['total_amount'], Decimal("0.001"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(" + pubk1 + ")", "combo(" + pubk2 + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "addr(" + addr_BECH32 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
self.log.info("Test range validation.")
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": -1}])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [-1, 10]}])
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}])
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}])
self.log.info("Test extended key derivation.")
# Run various scans, and verify that the sum of the amounts of the matches corresponds to the expected subset.
# Note that all amounts in the UTXO set are powers of 2 multiplied by 0.001 BCC, so each amounts uniquely identifies a subset.
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500h)"])['total_amount'], Decimal("2.048"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo([abcdef88/1/2'/3/4h]tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1499}])['total_amount'], Decimal("1.536"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1500}])['total_amount'], Decimal("3.584"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": [1500,1500]}])['total_amount'], Decimal("16.384"))
# Test the reported descriptors for a few matches
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)#dzxw429x", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)#43rvceed"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)#vchwd07g', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)#z2t3ypsa'])
# Check that status and abort don't need second arg
assert_equal(self.nodes[0].scantxoutset("status"), None)
assert_equal(self.nodes[0].scantxoutset("abort"), False)
# Check that second arg is needed for start
assert_raises_rpc_error(-1, "scanobjects argument is required for the start action", self.nodes[0].scantxoutset, "start")
if __name__ == '__main__':
ScantxoutsetTest().main()
|
import numpy as np
import astropy.nddata
from astropy.io import fits
from astropy.io.fits import CompImageHDU, HDUList, Header, ImageHDU, PrimaryHDU
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
__all__ = ['parse_input_data', 'parse_input_shape', 'parse_input_weights',
'parse_output_projection']
def parse_input_data(input_data, hdu_in=None):
"""
Parse input data to return a Numpy array and WCS object.
"""
if isinstance(input_data, str):
return parse_input_data(fits.open(input_data), hdu_in=hdu_in)
elif isinstance(input_data, HDUList):
if hdu_in is None:
if len(input_data) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_data(input_data[hdu_in])
elif isinstance(input_data, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_data.data, WCS(input_data.header)
elif isinstance(input_data, tuple) and isinstance(input_data[0], np.ndarray):
if isinstance(input_data[1], Header):
return input_data[0], WCS(input_data[1])
else:
return input_data
elif isinstance(input_data, astropy.nddata.NDDataBase):
return input_data.data, input_data.wcs
else:
raise TypeError("input_data should either be an HDU object or a tuple "
"of (array, WCS) or (array, Header)")
def parse_input_shape(input_shape, hdu_in=None):
"""
Parse input shape information to return an array shape tuple and WCS object.
"""
if isinstance(input_shape, str):
return parse_input_shape(fits.open(input_shape), hdu_in=hdu_in)
elif isinstance(input_shape, HDUList):
if hdu_in is None:
if len(input_shape) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_shape(input_shape[hdu_in])
elif isinstance(input_shape, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_shape.shape, WCS(input_shape.header)
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], np.ndarray):
if isinstance(input_shape[1], Header):
return input_shape[0].shape, WCS(input_shape[1])
else:
return input_shape[0].shape, input_shape[1]
elif isinstance(input_shape, tuple) and isinstance(input_shape[0], tuple):
if isinstance(input_shape[1], Header):
return input_shape[0], WCS(input_shape[1])
else:
return input_shape
elif isinstance(input_shape, astropy.nddata.NDDataBase):
return input_shape.data.shape, input_shape.wcs
else:
raise TypeError("input_shape should either be an HDU object or a tuple "
"of (array-or-shape, WCS) or (array-or-shape, Header)")
def parse_input_weights(input_weights, hdu_weights=None):
"""
Parse input weights to return a Numpy array.
"""
if isinstance(input_weights, str):
return parse_input_data(fits.open(input_weights), hdu_in=hdu_weights)[0]
elif isinstance(input_weights, HDUList):
if hdu_weights is None:
if len(input_weights) > 1:
raise ValueError("More than one HDU is present, please specify "
"HDU to use with ``hdu_weights=`` option")
else:
hdu_weights = 0
return parse_input_data(input_weights[hdu_weights])[0]
elif isinstance(input_weights, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_weights.data
elif isinstance(input_weights, np.ndarray):
return input_weights
else:
raise TypeError("input_weights should either be an HDU object or a Numpy array")
def parse_output_projection(output_projection, shape_out=None, output_array=None):
if shape_out is None:
if output_array is not None:
shape_out = output_array.shape
elif shape_out is not None and output_array is not None:
if shape_out != output_array.shape:
raise ValueError("shape_out does not match shape of output_array")
if isinstance(output_projection, Header):
wcs_out = WCS(output_projection)
try:
shape_out = [output_projection['NAXIS{}'.format(i + 1)]
for i in range(output_projection['NAXIS'])][::-1]
except KeyError:
if shape_out is None:
raise ValueError("Need to specify shape since output header "
"does not contain complete shape information")
elif isinstance(output_projection, BaseHighLevelWCS):
wcs_out = output_projection
if shape_out is None:
raise ValueError("Need to specify shape_out when specifying "
"output_projection as WCS object")
elif isinstance(output_projection, str):
hdu_list = fits.open(output_projection)
shape_out = hdu_list[0].data.shape
header = hdu_list[0].header
wcs_out = WCS(header)
hdu_list.close()
else:
raise TypeError('output_projection should either be a Header, a WCS '
'object, or a filename')
if len(shape_out) == 0:
raise ValueError("The shape of the output image should not be an "
"empty tuple")
return wcs_out, shape_out
|
from dataclasses import dataclass
from dechainy.plugins import Probe
from dechainy.ebpf import EbpfCompiler
@dataclass
class Valid(Probe):
def __post_init__(self):
self.ingress.required = True
self.ingress.cflags.append("-DCUSTOM_VARIABLE=0")
self.egress.required = False
super().__post_init__(path=__file__)
def autopatch(self):
self.ingress.cflags[-1] = "-DCUSTOM_VARIABLE=1"
EbpfCompiler().patch_hook("ingress", self._programs.ingress,
self.ingress.code, self.ingress.cflags)
|
class DefaultColor(object):
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
# RESET is not a real color code. It is used as in indicator
# within the code that any foreground / background color should
# be cleared
RESET = -1
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_FG = 250
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
GIT_AHEAD_BG = 240
GIT_AHEAD_FG = 250
GIT_BEHIND_BG = 240
GIT_BEHIND_FG = 250
GIT_STAGED_BG = 22
GIT_STAGED_FG = 15
GIT_NOTSTAGED_BG = 130
GIT_NOTSTAGED_FG = 15
GIT_UNTRACKED_BG = 52
GIT_UNTRACKED_FG = 15
GIT_CONFLICTED_BG = 9
GIT_CONFLICTED_FG = 15
GIT_STASH_BG = 221
GIT_STASH_FG = 0
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
BATTERY_NORMAL_BG = 22
BATTERY_NORMAL_FG = 7
BATTERY_LOW_BG = 196
BATTERY_LOW_FG = 7
AWS_PROFILE_FG = 39
AWS_PROFILE_BG = 238
TIME_FG = 250
TIME_BG = 238
CONST_FG = 15
CONST_BG = 0
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
|
from rest_framework.permissions import BasePermission
class SuperAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
if not (user and user.is_authenticated):
return False
if user.is_superuser:
return True
return False
|
from __future__ import unicode_literals
import datetime
from chatterbot.input import InputAdapter
from chatterbot.conversation import Statement
class Mailgun(InputAdapter):
"""
Get input from Mailgun.
"""
def __init__(self, **kwargs):
super(Mailgun, self).__init__(**kwargs)
# Use the bot's name for the name of the sender
self.name = kwargs.get('name')
self.from_address = kwargs.get('mailgun_from_address')
self.api_key = kwargs.get('mailgun_api_key')
self.endpoint = kwargs.get('mailgun_api_endpoint')
def get_email_stored_events(self):
import requests
yesterday = datetime.datetime.now() - datetime.timedelta(1)
return requests.get(
'{}/events'.format(self.endpoint),
auth=('api', self.api_key),
params={
'begin': yesterday.isoformat(),
'ascending': 'yes',
'limit': 1
}
)
def get_stored_email_urls(self):
response = self.get_email_stored_events()
data = response.json()
for item in data.get('items', []):
if 'storage' in item:
if 'url' in item['storage']:
yield item['storage']['url']
def get_message(self, url):
import requests
return requests.get(
url,
auth=('api', self.api_key)
)
def process_input(self, statement):
urls = self.get_stored_email_urls()
url = list(urls)[0]
response = self.get_message(url)
message = response.json()
text = message.get('stripped-text')
return Statement(text)
|
import matplotlib
import matplotlib.pyplot as plt
x = [20, 23, 29, 27, 30, 34, 35, 37, 40, 43]
y = [1.32, 1.67, 2.17, 2.70, 2.75, 2.87, 3.65, 2.86, 3.61, 4.25]
n = len(x)
assert(n == len(y))
# Means
bar_x = sum(x) / n
bar_y = sum(y) / n
# Sum of squares
sxy = sum([(x[i] - bar_x) * (y[i] - bar_y) for i in range(n)])
sxx = sum([(x[i] - bar_x)**2 for i in range(n)])
syy = sum([(y[i] - bar_y)**2 for i in range(n)])
print("S_xy = {0:5f}, S_xx = {1:5f}, S_yy = {2:5f}".format(sxy ,sxx, syy))
# Point estimates for \beta_0 and \beta_1
b1 = sxy / sxx
b0 = bar_y - b1 * bar_x
print("n = {0}".format(n))
print("\\bar{{x}} = {0:5f}".format(bar_x))
print("\\bar{{y}} = {0:5f}".format(bar_y))
print("Estimated regression line: y = {0:5f} + {1:5f} x".format(b0, b1))
# Plot x and y and save it
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x, y)
x_values = range(min(x), max(x))
ax.plot(x_values, [b0 + b1 * xi for xi in x_values])
fig.savefig("plot.png")
# error sum of squares
sse = sum([(y[i] - (b0 + b1 * x[i]))**2 for i in range(n)])
# total sum of squares
sst = sum([y[i]**2 for i in range(n)]) - sum(y)**2 / n
sigma_square = sse / (n - 2)
print("SSE: {0:5f}".format(sse))
print("SST: {0:5f}".format(sst))
print("\sigma^2 = {0:5f}".format(sigma_square))
print("\sigma = {0:5f}".format(sigma_square ** 0.5))
print("r^2 = {0:5f}".format(1 - sse / sst))
|
"""Support for Sure PetCare Flaps/Pets sensors."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
MASS_GRAMS,
PERCENTAGE,
VOLUME_MILLILITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from surepy.entities import SurepyEntity
from surepy.entities.devices import (
Feeder as SureFeeder,
FeederBowl as SureFeederBowl,
Felaqua as SureFelaqua,
Flap as SureFlap,
SurepyDevice,
)
from surepy.enums import EntityType, LockState
# pylint: disable=relative-beyond-top-level
from . import SurePetcareAPI
from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER
PARALLEL_UPDATES = 2
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: Any,
discovery_info: Any = None,
) -> None:
"""Set up Sure PetCare sensor platform."""
await async_setup_entry(hass, config, async_add_entities)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any
) -> None:
"""Set up config entry Sure PetCare Flaps sensors."""
entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = []
spc: SurePetcareAPI = hass.data[DOMAIN][SPC]
for surepy_entity in spc.coordinator.data.values():
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
]:
entities.append(Flap(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FELAQUA:
entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc))
elif surepy_entity.type == EntityType.FEEDER:
for bowl in surepy_entity.bowls.values():
entities.append(
FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data())
)
entities.append(Feeder(spc.coordinator, surepy_entity.id, spc))
if surepy_entity.type in [
EntityType.CAT_FLAP,
EntityType.PET_FLAP,
EntityType.FEEDER,
EntityType.FELAQUA,
]:
voltage_batteries_full = cast(
float, config_entry.options.get(ATTR_VOLTAGE_FULL)
)
voltage_batteries_low = cast(
float, config_entry.options.get(ATTR_VOLTAGE_LOW)
)
entities.append(
Battery(
spc.coordinator,
surepy_entity.id,
spc,
voltage_full=voltage_batteries_full,
voltage_low=voltage_batteries_low,
)
)
async_add_entities(entities)
class SurePetcareSensor(CoordinatorEntity, SensorEntity):
"""A binary sensor implementation for Sure Petcare Entities."""
_attr_should_poll = False
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
"""Initialize a Sure Petcare sensor."""
super().__init__(coordinator)
self._id = _id
self._spc: SurePetcareAPI = spc
self._coordinator = coordinator
self._surepy_entity: SurepyEntity = self._coordinator.data[_id]
self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"]
self._attr_available = bool(self._state)
self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}"
self._attr_extra_state_attributes = (
{**self._surepy_entity.raw_data()} if self._state else {}
)
self._attr_name: str = (
f"{self._surepy_entity.type.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
@property
def device_info(self):
device = {}
try:
model = f"{self._surepy_entity.type.name.replace('_', ' ').title()}"
if serial := self._surepy_entity.raw_data().get("serial_number"):
model = f"{model} ({serial})"
elif mac_address := self._surepy_entity.raw_data().get("mac_address"):
model = f"{model} ({mac_address})"
elif tag_id := self._surepy_entity.raw_data().get("tag_id"):
model = f"{model} ({tag_id})"
device = {
"identifiers": {(DOMAIN, self._id)},
"name": self._surepy_entity.name.capitalize(),
"manufacturer": SURE_MANUFACTURER,
"model": model,
}
if self._state:
versions = self._state.get("version", {})
if dev_fw_version := versions.get("device", {}).get("firmware"):
device["sw_version"] = dev_fw_version
if (lcd_version := versions.get("lcd", {})) and (
rf_version := versions.get("rf", {})
):
device["sw_version"] = (
f"lcd: {lcd_version.get('version', lcd_version)['firmware']} | "
f"fw: {rf_version.get('version', rf_version)['firmware']}"
)
except AttributeError:
pass
return device
class Flap(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None:
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFlap
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = None
if self._state:
self._attr_extra_state_attributes = {
"learn_mode": bool(self._state["learn_mode"]),
**self._surepy_entity.raw_data(),
}
if locking := self._state.get("locking"):
self._attr_state = LockState(locking["mode"]).name.casefold()
@property
def state(self) -> str | None:
"""Return battery level in percent."""
if (
state := cast(SureFlap, self._coordinator.data[self._id])
.raw_data()
.get("status")
):
return LockState(state["locking"]["mode"]).name.casefold()
class Felaqua(SurePetcareSensor):
"""Sure Petcare Felaqua."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFelaqua
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = VOLUME_MILLILITERS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]):
return int(felaqua.water_remaining) if felaqua.water_remaining else None
class FeederBowl(SurePetcareSensor):
"""Sure Petcare Feeder Bowl."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
bowl_data: dict[str, int | str],
):
"""Initialize a Bowl sensor."""
super().__init__(coordinator, _id, spc)
self.feeder_id = _id
self.bowl_id = int(bowl_data["index"])
self._id = int(f"{_id}{str(self.bowl_id)}")
self._spc: SurePetcareAPI = spc
self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id]
self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[
self.bowl_id
]
self._state: dict[str, Any] = bowl_data
# https://github.com/PyCQA/pylint/issues/2062
# pylint: disable=no-member
self._attr_name = (
f"{EntityType.FEEDER.name.replace('_', ' ').title()} "
f"{self._surepy_entity.name.capitalize()}"
)
self._attr_icon = "mdi:bowl"
self._attr_state = int(self._surepy_entity.weight)
self._attr_unique_id = (
f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}"
)
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the remaining water."""
if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and (
weight := feeder.bowls[self.bowl_id].weight
):
return int(weight) if weight and weight > 0 else None
class Feeder(SurePetcareSensor):
"""Sure Petcare Feeder."""
def __init__(self, coordinator, _id: int, spc: SurePetcareAPI):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SureFeeder
self._attr_entity_picture = self._surepy_entity.icon
self._attr_unit_of_measurement = MASS_GRAMS
@property
def state(self) -> float | None:
"""Return the total remaining food."""
if feeder := cast(SureFeeder, self._coordinator.data[self._id]):
return int(feeder.total_weight) if feeder.total_weight else None
class Battery(SurePetcareSensor):
"""Sure Petcare Flap."""
def __init__(
self,
coordinator,
_id: int,
spc: SurePetcareAPI,
voltage_full: float,
voltage_low: float,
):
super().__init__(coordinator, _id, spc)
self._surepy_entity: SurepyDevice
self._attr_name = f"{self._attr_name} Battery Level"
self.voltage_low = voltage_low
self.voltage_full = voltage_full
self._attr_unit_of_measurement = PERCENTAGE
self._attr_device_class = DEVICE_CLASS_BATTERY
self._attr_unique_id = (
f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery"
)
@property
def state(self) -> int | None:
"""Return battery level in percent."""
if battery := cast(SurepyDevice, self._coordinator.data[self._id]):
self._surepy_entity = battery
battery_level = battery.calculate_battery_level(
voltage_full=self.voltage_full, voltage_low=self.voltage_low
)
# return batterie level between 0 and 100
return battery_level
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the additional attrs."""
attrs = {}
if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and (
state := device.raw_data().get("status")
):
self._surepy_entity = device
voltage = float(state["battery"])
attrs = {
"battery_level": device.battery_level,
ATTR_VOLTAGE: f"{voltage:.2f}",
f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}",
}
return attrs
|
#------------------------------------------------------------------------------
# Name: toc_trends_analysis.py
# Purpose: Analyse RESA2 data for trends.
#
# Author: James Sample
#
# Created: Fri Jul 15 11:35:12 2016
# Copyright: (c) James Sample and NIVA
# Licence:
#------------------------------------------------------------------------------
""" Tore has previously written code to perform trend analyses on the data in
RESA2. I haven't been able to find the code, but it appears to shift data
between RESA2, Excel and Access, which seems a bit messy.
In the notebook updated_toc_trends_analysis.ipynb, I tested some code which
refactors all the analysis into Python, interfacing directly with the
database and returning results as dataframes. This seems to have worked
well.
The code below takes the main functions from this notebook and tidies them
up a bit. This file can then be imported into new notebooks, which should
make it easy to re-run trend analyses on different datasets in the future.
"""
def mk_test(x, stn_id, par, alpha=0.05):
""" Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
by Sat Kumar Tomer.
Perform the MK test for monotonic trends. Uses the "normal
approximation" to determine significance and therefore should
only be used if the number of values is >= 10.
Args:
x: 1D array of data
name: Name for data series (string)
alpha: Significance level
Returns:
var_s: Variance of test statistic
s: M-K test statistic
z: Normalised test statistic
p: p-value of the significance test
trend: Whether to reject the null hypothesis (no trend) at
the specified significance level. One of:
'increasing', 'decreasing' or 'no trend'
"""
import numpy as np
from scipy.stats import norm
n = len(x)
if n < 10:
print (' Data series for %s at site %s has fewer than 10 non-null values. '
'Significance estimates may be unreliable.' % (par, int(stn_id)))
# calculate S
s = 0
for k in range(n-1):
for j in range(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18.
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in range(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
# Sat Kumar's code has "+ np.sum", which is incorrect
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = np.nan
# calculate the p_value
p = 2*(1-norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2.)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
elif np.isnan(z):
trend = np.nan
else:
trend = 'no trend'
return var_s, s, z, p, trend
def wc_stats(raw_df, st_yr=None, end_yr=None, plot=False, fold=None):
""" Calculate key statistics for the TOC trends analysis:
'station_id'
'par_id'
'non_missing'
'median'
'mean'
'std_dev'
'period'
'mk_std_dev'
'mk_stat'
'norm_mk_stat'
'mk_p_val'
'trend'
'sen_slp'
Args:
raw_df: Dataframe with annual data for a single station. Columns must
be: [station_id, year, par1, par2, ... parn]
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
plot: Whether to generate a PNG plot of the Sen's slope
regression
fold: Folder in which to save PNGs if plot=True
Returns:
df of key statistics.
"""
import numpy as np, pandas as pd
import seaborn as sn, matplotlib.pyplot as plt, os
from scipy.stats import theilslopes
sn.set_context('poster')
# Checking
df = raw_df.copy()
assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'
assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'
# Get just the period of interest
if st_yr:
df = df.query('YEAR >= @st_yr')
if end_yr:
df = df.query('YEAR <= @end_yr')
# Only continue if data
if len(df) > 0:
# Get stn_id
stn_id = df['STATION_ID'].iloc[0]
# Tidy up df
df.index = df['YEAR']
df.sort_index(inplace=True)
del df['STATION_ID'], df['YEAR']
# Container for results
data_dict = {'station_id':[],
'par_id':[],
'non_missing':[],
'n_start':[],
'n_end':[],
'median':[],
'mean':[],
'std_dev':[],
'period':[],
'mk_std_dev':[],
'mk_stat':[],
'norm_mk_stat':[],
'mk_p_val':[],
'trend':[],
'sen_slp':[]}
# Loop over pars
for col in df.columns:
# 1. Station ID
data_dict['station_id'].append(stn_id)
# 2. Par ID
data_dict['par_id'].append(col)
# 3. Non-missing
data_dict['non_missing'].append(pd.notnull(df[col]).sum())
# 4. Number of non nulls at start
if st_yr:
# Record the number of non-nulls within 5 years of start year
data_dict['n_start'].append(pd.notnull(df[df.index<(st_yr+5)][col]).sum())
else:
# Record the number of non-nulls in first 5 years of record
data_dict['n_start'].append(pd.notnull(df[col].head(5)).sum())
# 5. Number of non nulls at end
if end_yr:
# Record the number of non-nulls within 5 years of end year
data_dict['n_end'].append(pd.notnull(df[df.index>(end_yr-5)][col]).sum())
else:
# Record the number of non-nulls in last 5 years of record
data_dict['n_end'].append(pd.notnull(df[col].tail(5)).sum())
# 6. Median
data_dict['median'].append(df[col].median())
# 7. Mean
data_dict['mean'].append(df[col].mean())
# 8. Std dev
data_dict['std_dev'].append(df[col].std())
# 9. Period
st_yr = df.index.min()
end_yr = df.index.max()
per = '%s-%s' % (int(st_yr), int(end_yr))
data_dict['period'].append(per)
# 10. M-K test
# Drop missing values
mk_df = df[[col]].dropna(how='any')
# Only run stats if more than 1 valid value
if len(mk_df) > 1:
var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)
data_dict['mk_std_dev'].append(np.sqrt(var_s))
data_dict['mk_stat'].append(s)
data_dict['norm_mk_stat'].append(z)
data_dict['mk_p_val'].append(p)
data_dict['trend'].append(trend)
# 11. Sen's slope. Returns:
# Median slope, median intercept, 95% CI lower bound,
# 95% CI upper bound
sslp, icpt, lb, ub = theilslopes(mk_df[col].values,
mk_df.index, 0.95)
data_dict['sen_slp'].append(sslp)
# 12. Plot if desired
if plot:
fig = plt.figure()
plt.plot(mk_df.index, mk_df[col].values, 'bo-')
plt.plot(mk_df.index, mk_df.index*sslp + icpt, 'k-')
if col in ('Al', 'TOC'):
plt.ylabel('%s (mg/l)' % col, fontsize=24)
else:
plt.ylabel('%s (ueq/l)' % col, fontsize=24)
plt.title('%s at station %s' % (col, int(stn_id)),
fontsize=32)
plt.tight_layout()
# Save fig
out_path = os.path.join(fold,
'%s_%s_%s-%s.png' % (int(stn_id), col,
st_yr, end_yr))
plt.savefig(out_path, dpi=150)
plt.close()
# Otherwise all NaN
else:
for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat',
'mk_p_val', 'trend', 'sen_slp']:
data_dict[par].append(np.nan)
# Build to df
res_df = pd.DataFrame(data_dict)
res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'n_start',
'n_end', 'mean', 'median', 'std_dev', 'mk_stat',
'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']]
return res_df
def read_resa2(proj_list, engine):
""" Reads raw data for the specified projects from RESA2. Extracts only
the parameters required for the trends analysis and calculates
aggregated annual values by taking medians.
Args:
proj_list: List of RESA2 project names for which to extract data
engine: SQLAlchemy 'engine' object already connected to RESA2
Returns:
[stn_df, wc_df, dup_df]. Dataframe of stations; Dataframe of annual
water chemistry values; dataframe of duplicates to check
"""
import pandas as pd
# Get par IDs etc. for pars of interest
par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC',
'Al', 'K', 'Na', 'NH4-N', 'pH']
sql = ('SELECT * FROM resa2.parameter_definitions '
'WHERE name in %s' % str(tuple(par_list)))
par_df = pd.read_sql_query(sql, engine)
# Get stations for a specified list of projects
if len(proj_list) == 1:
sql = ("SELECT station_id, station_code "
"FROM resa2.stations "
"WHERE station_id IN (SELECT UNIQUE(station_id) "
"FROM resa2.projects_stations "
"WHERE project_id IN (SELECT project_id "
"FROM resa2.projects "
"WHERE project_name = '%s'))"
% proj_list[0])
else:
sql = ('SELECT station_id, station_code '
'FROM resa2.stations '
'WHERE station_id IN (SELECT UNIQUE(station_id) '
'FROM resa2.projects_stations '
'WHERE project_id IN (SELECT project_id '
'FROM resa2.projects '
'WHERE project_name IN %s))'
% str(tuple(proj_list)))
stn_df = pd.read_sql(sql, engine)
# Get results for ALL pars for these sites
if len(stn_df)==1:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id = %s)"
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id IN %s)"
% str(tuple(stn_df['station_id'].values)))
wc_df = pd.read_sql_query(sql, engine)
# Get all sample dates for sites
if len(stn_df)==1:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id = %s "
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT water_sample_id, station_id, sample_date, depth1, depth2 "
"FROM resa2.water_samples "
"WHERE station_id IN %s "
% str(tuple(stn_df['station_id'].values)))
samp_df = pd.read_sql_query(sql, engine)
# Join in par IDs based on method IDs
sql = ('SELECT * FROM resa2.wc_parameters_methods')
meth_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, meth_par_df, how='left',
left_on='method_id', right_on='wc_method_id')
# Get just the parameters of interest
wc_df = wc_df.query('wc_parameter_id in %s'
% str(tuple(par_df['parameter_id'].values)))
# Join in sample dates
wc_df = pd.merge(wc_df, samp_df, how='left',
left_on='sample_id', right_on='water_sample_id')
# Get just the near-surface samples
wc_df = wc_df.query('(depth1 <= 1) and (depth2 <= 1)')
# Join in parameter units
sql = ('SELECT * FROM resa2.parameter_definitions')
all_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, all_par_df, how='left',
left_on='wc_parameter_id', right_on='parameter_id')
# Join in station codes
wc_df = pd.merge(wc_df, stn_df, how='left',
left_on='station_id', right_on='station_id')
# Convert units
wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']
# Extract columns of interest
wc_df = wc_df[['station_id', 'sample_date', 'name',
'value', 'entered_date_x']]
# Check for duplicates
dup_df = wc_df[wc_df.duplicated(subset=['station_id',
'sample_date',
'name'],
keep=False)].sort_values(by=['station_id',
'sample_date',
'name'])
if len(dup_df) > 0:
print (' The database contains duplicate values for some station-'
'date-parameter combinations.\n Only the most recent values '
'will be used, but you should check the repeated values are not '
'errors.\n The duplicated entries are returned in a separate '
'dataframe.\n')
# Choose most recent record for each duplicate
wc_df.sort_values(by='entered_date_x', inplace=True, ascending=True)
# Drop duplicates
wc_df.drop_duplicates(subset=['station_id', 'sample_date', 'name'],
keep='last', inplace=True)
# Sort
wc_df.sort_values(by=['station_id', 'sample_date', 'name'],
inplace=True)
# Tidy
del wc_df['entered_date_x']
wc_df.reset_index(inplace=True, drop=True)
# Unstack
wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)
wc_df = wc_df.unstack(level='name')
wc_df.columns = wc_df.columns.droplevel()
wc_df.reset_index(inplace=True)
wc_df.columns.name = None
# Extract year from date column
wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)
del wc_df['sample_date']
# Groupby station_id and year
grpd = wc_df.groupby(['station_id', 'year'])
# Calculate median
wc_df = grpd.agg('median')
return stn_df, wc_df, dup_df
def conv_units_and_correct(wc_df):
""" Take a dataframe of aggregated annual values in the units specified by
RESA2.PARAMETERS and performs unit conversions to ueq/l. Also applies
sea-salt correction where necessary.
Args:
wc_df: Dataframe in original units
Returns:
Dataframe in converted units
"""
import pandas as pd
# Tabulate chemical properties
chem_dict = {'molar_mass':[96, 35, 40, 24, 14, 39, 23, 14],
'valency':[2, 1, 2, 2, 1, 1, 1, 1],
'resa2_ref_ratio':[0.103, 1., 0.037, 0.196,
'N/A', 0.018, 0.859, 'N/A']}
chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg',
'NO3-N', 'K', 'Na', 'NH4-N'])
chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]
# Fill NoData for ANC calculation. Assume that NH4 can be ignored if not
# present.
# If have data for NH4, fill data gaps with 0
if 'NH4-N' in wc_df.columns:
wc_df['NH4-N'].fillna(value=0, inplace=True)
else: # Just assume 0
wc_df['NH4-N'] = 0
# 1. Convert to ueq/l
# 1.1. pH to H+
wc_df['EH'] = 1E6 * 10**(-wc_df['pH'])
# 1.2. Other pars
for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N']:
val = chem_df.at[par, 'valency']
mm = chem_df.at[par, 'molar_mass']
if par == 'NO3-N':
wc_df['ENO3'] = wc_df[par] * val / mm
elif par == 'NH4-N':
wc_df['ENH4'] = wc_df[par] * val / mm
else:
wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm
# 2. Apply sea-salt correction
for par in ['ESO4', 'EMg', 'ECa']:
ref = chem_df.at[par[1:], 'resa2_ref_ratio']
wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])
# 3. Calculate combinations
# 3.1. ESO4 + ECl
wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']
# 3.2. ECa + EMg
wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']
# 3.3. ECaX + EMgX
wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']
# 3.4. ANC = (ECa+EMg+EK+ENa+ENH4) - (ECl+ESO4+ENO3)
wc_df['ANC'] = ((wc_df['ECa'] + wc_df['EMg'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4'] + wc_df['ENO3']))
# 3.5. ANCX = (ECaX+EMgX+EK+ENa+ENH4) - (ECl+ESO4X+ENO3)
wc_df['ANCX'] = ((wc_df['ECaX'] + wc_df['EMgX'] + wc_df['EK'] +
wc_df['ENa'] + wc_df['ENH4']) -
(wc_df['ECl'] + wc_df['ESO4X'] + wc_df['ENO3']))
# 4. Delete unnecessary columns and tidy
for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N', 'pH',
'EMg', 'ECa', 'EK', 'ENa', 'ENH4', 'EMgX', 'ECaX']:
del wc_df[col]
wc_df.reset_index(inplace=True)
return wc_df
def run_trend_analysis(proj_list, engine, st_yr=None, end_yr=None,
plot=False, fold=None):
""" Run the trend analysis for the specified projects and time period.
Args:
proj_list: List of RESA2 project names for which to extract data
engine: SQLAlchemy 'engine' object already connected to RESA2
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
plot: Whether to generate a PNG plot of the Sen's slope
regression
fold: Folder in which to save PNGs if plot=True
Returns:
[res_df, dup_df, no_data_df]. Dataframe of statistics; dataframe of
duplicated water chemistry values for investigation; dataframe of
stations with no relevant data in the period of interest
"""
import pandas as pd, os
# Check paths valid
if plot:
assert os.path.isdir(fold), 'The specified folder does not exist.'
# Get raw data from db
print ('Extracting data from RESA2...')
stn_df, wc_df, dup_df = read_resa2(proj_list, engine)
# Identify stations with no relevant records
stns_no_data = (set(stn_df['station_id'].values) -
set(wc_df.index.get_level_values('station_id')))
if len(stns_no_data) > 0:
print (' Some stations have no relevant data in the period '
'specified. Their IDs are returned in a separate dataframe.\n')
no_data_df = pd.DataFrame({'station_id':list(stns_no_data)})
else:
no_data_df = None
print (' Done.')
# Convert units and apply sea-salt correction
print ('\nConverting units and applying sea-salt correction...')
wc_df = conv_units_and_correct(wc_df)
print (' Done.')
# Calculate stats
# Container for output
df_list = []
# Loop over sites
print ('\nCalculating statistics...')
for stn_id in wc_df['station_id'].unique():
# Extract data for this site
df = wc_df.query('station_id == @stn_id')
# Modify col names
names = list(df.columns)
names[:2] = ['STATION_ID', 'YEAR']
df.columns = names
# Heleen wants the annual time series for each site for further analysis
# Write df to output
#out_ann_fold = (r'../../../Thematic_Trends_Report_2019/results/annual_chemistry_series')
#out_ann_path = os.path.join(out_ann_fold, 'stn_%s.csv' % stn_id)
#df_trunc = df.query('(YEAR>=1990) & (YEAR<=2016)') # Truncate to 1990 to 2016
#df_trunc.to_csv(out_ann_path)
# Run analysis
df_list.append(wc_stats(df, st_yr=st_yr, end_yr=end_yr,
plot=plot, fold=fold))
res_df = pd.concat(df_list, axis=0)
# Convert station_id cols to ints
res_df['station_id'] = res_df['station_id'].map(int)
dup_df['station_id'] = dup_df['station_id'].map(int)
if no_data_df is not None:
no_data_df['station_id'] = no_data_df['station_id'].map(int)
print (' Done.')
print ('\nFinished.')
return res_df, dup_df, no_data_df
|
# Generated by Django 3.1.7 on 2021-02-04 14:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("sites", "0003_set_site_domain_and_name"),
]
operations = [
migrations.AlterModelOptions(
name="site",
options={
"ordering": ["domain"],
"verbose_name": "site",
"verbose_name_plural": "sites",
},
),
]
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for normalization layers."""
from absl.testing import absltest
import numpy as onp
from trax.layers import base
from trax.layers import normalization
from trax.math import numpy as np
from trax.shapes import ShapeDtype
class NormalizationLayerTest(absltest.TestCase):
def test_batch_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(normalization.BatchNorm(),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_batch_norm(self):
input_shape = (2, 3, 4)
input_dtype = np.float32
input_signature = ShapeDtype(input_shape, input_dtype)
eps = 1e-5
inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),
input_shape)
m1 = 11.5 # Mean of this random input.
v1 = 47.9167 # Variance of this random input.
layer = normalization.BatchNorm(axis=(0, 1, 2))
_, _ = layer.init(input_signature)
state = layer.state
onp.testing.assert_allclose(state[0], 0)
onp.testing.assert_allclose(state[1], 1)
self.assertEqual(state[2], 0)
out = layer(inp1)
state = layer.state
onp.testing.assert_allclose(state[0], m1 * 0.001)
onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)
self.assertEqual(state[2], 1)
onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),
rtol=1e-6)
def test_layer_norm_shape(self):
input_signature = ShapeDtype((29, 5, 7, 20))
result_shape = base.check_shape_agreement(
normalization.LayerNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
def test_frn_shape(self):
B, H, W, C = 64, 5, 7, 3 # pylint: disable=invalid-name
input_signature = ShapeDtype((B, H, W, C))
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(), input_signature)
self.assertEqual(result_shape, input_signature.shape)
result_shape = base.check_shape_agreement(
normalization.FilterResponseNorm(learn_epsilon=False),
input_signature)
self.assertEqual(result_shape, input_signature.shape)
if __name__ == '__main__':
absltest.main()
|
# Телеграм-бот v.002 - бот создаёт меню, присылает собачку, и анекдот
import telebot # pyTelegramBotAPI 4.3.1
from telebot import types
import requests
import bs4
import json
bot = telebot.TeleBot('5193117811:AAH0hWHVx0kH08sub52IFj2SAdJi1eugY-k') # Создаем экземпляр бота
# -----------------------------------------------------------------------
# Функция, обрабатывающая команду /start
@bot.message_handler(commands=["start"])
def start(message, res=False):
chat_id = message.chat.id
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("👋 Главное меню")
btn2 = types.KeyboardButton("❓ Помощь")
markup.add(btn1, btn2)
bot.send_message(chat_id,
text="Привет, {0.first_name}! Я тестовый бот для курса программирования на языке ПаЙтон".format(
message.from_user), reply_markup=markup)
# -----------------------------------------------------------------------
# Получение сообщений от юзера
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
if ms_text == "Главное меню" or ms_text == "👋 Главное меню" or ms_text == "Вернуться в главное меню": # ..........
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Задачи")
btn2 = types.KeyboardButton("Развлечения")
btn3 = types.KeyboardButton("Помощь")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, back)
bot.send_message(chat_id, text="Вы в главном меню", reply_markup=markup)
elif ms_text == "Развлечения": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Прислать анекдот c anekdotme.ru")
btn2 = types.KeyboardButton("Прислать анекдот c nekdo.ru")
btn3 = types.KeyboardButton("Прислать собаку")
btn4 = types.KeyboardButton("Прислать случайного пользователя")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, back)
bot.send_message(chat_id, text="Развлечения", reply_markup=markup)
elif ms_text == "Прислать анекдот c anekdotme.ru": # .............................................................................
bot.send_message(chat_id, text=get_anekdot('http://anekdotme.ru/random', '.anekdot_text'))
elif ms_text == "Прислать анекдот c nekdo.ru": # .............................................................................
bot.send_message(chat_id, text=get_anekdot('https://nekdo.ru/random', '.text'))
elif ms_text == "Прислать собаку" or ms_text == "/dog": # ....................
contents = requests.get('https://random.dog/woof.json').json()
urlDOG = contents['url']
bot.send_photo(chat_id, photo=urlDOG, caption='Твоя собачка:)')
elif ms_text == "Прислать случайного пользователя" or ms_text == "/user": # ....................
contents = requests.get('https://randomuser.me/api/').json()
img = contents['results'][0]['picture']['large']
name = contents['results'][0]['name']['title'] + ' ' + contents['results'][0]['name']['first'] + ' ' + contents['results'][0]['name']['last']
age = contents['results'][0]['dob']['age']
place = contents['results'][0]['location']['timezone']['description']
place = place.split(',')[0]
info = name + ', ' + str(age) + '\n' + place
bot.send_photo(chat_id, photo=img, caption=info)
elif ms_text == "Задачи": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("1")
btn2 = types.KeyboardButton("2")
btn3 = types.KeyboardButton("3")
btn4 = types.KeyboardButton("4")
btn5 = types.KeyboardButton("5")
btn6 = types.KeyboardButton("6")
btn7 = types.KeyboardButton("7")
btn8 = types.KeyboardButton("8")
btn9 = types.KeyboardButton("9")
btn10 = types.KeyboardButton("10")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, btn5, btn6, btn7, btn8, btn9, btn10, back)
bot.send_message(chat_id, text="Задачи с 1-го занятия", reply_markup=markup)
elif ms_text == "1": # .........................................................
name = 'Вероника'
bot.send_message(chat_id, text=name)
elif ms_text == "2": # .........................................................
name = 'Вероника'
age = 20
message = 'Привет, меня зовут ' + name + '. Мне ' + str(age) + ' лет.'
bot.send_message(chat_id, text=message)
elif ms_text == "3": # .........................................................
name = 'Вероника'
name5 = name * 5
bot.send_message(chat_id, text=name5)
elif ms_text == "4": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userMessage = 'Привет, ' + userName + '! Тебе уже ' + userAge + ' лет?! Это так круто!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "5": # .........................................................
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if userAge < 18:
ageMessage = 'Ты не достиг еще совершеннолетия, возращайся позже'
else:
ageMessage = 'Ты уже достаточно взрослый, присоединяйся к нам!'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
elif ms_text == "6": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName[1:-1])
bot.send_message(chat_id, text=userName[::-1])
bot.send_message(chat_id, text=userName[-3:])
bot.send_message(chat_id, text=userName[0:5])
bot.register_next_step_handler(message, inputName)
elif ms_text == "7": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
nameMessage = 'Кол-во букв в имени: ' + str(len(userName))
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
import math
ageNum1 = math.floor(userAge / 10)
ageNum2 = userAge % 10
sum = ageNum1 + ageNum2
ageMessage1 = 'Сумма цифр возраста: ' + str(sum)
bot.send_message(chat_id, text=ageMessage1)
if ageNum1 < 1:
comp = ageNum2
else:
comp = ageNum1 * ageNum2
ageMessage2 = 'Произведение цифр возраста: ' + str(comp)
bot.send_message(chat_id, text=ageMessage2)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "8": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
bot.send_message(chat_id, text=userName.upper())
bot.send_message(chat_id, text=userName.lower())
bot.send_message(chat_id, text=userName.capitalize())
bot.register_next_step_handler(message, inputName)
elif ms_text == "9": # .........................................................
bot.send_message(chat_id, text='Ваше имя?')
@bot.message_handler(content_types=['text'])
def inputName(message):
userName = message.text
if " " in userName:
nameMessage = 'Error userName value'
else:
nameMessage = 'Correct userName value'
bot.send_message(chat_id, text=nameMessage)
bot.send_message(chat_id, text='Сколько Вам лет?')
@bot.message_handler(content_types=['text'])
def inputAge(message):
userAge = message.text
userAge = int(userAge)
if (userAge < 0) or (userAge > 150):
ageMessage = 'Error userAge value'
else:
ageMessage = 'Correct userAge value'
bot.send_message(chat_id, text=ageMessage)
bot.register_next_step_handler(message, inputAge)
bot.register_next_step_handler(message, inputName)
elif ms_text == "10": # .........................................................
bot.send_message(chat_id, text='Сколько будет 8+2*3?')
@bot.message_handler(content_types=['text'])
def inputAnswer(message):
userAnswer = message.text
userAnswer = int(userAnswer)
if userAnswer == 14:
userMessage = 'Правильно!'
else:
userMessage = 'Неверно!'
bot.send_message(chat_id, text=userMessage)
bot.register_next_step_handler(message, inputAnswer)
elif ms_text == "Помощь" or ms_text == "/help": # .................................................................
bot.send_message(chat_id, "Автор: Яковлева Вероника")
key1 = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/chicanica")
key1.add(btn1)
img = open('foto.jpg', 'rb')
bot.send_photo(message.chat.id, img, reply_markup=key1)
else: # ...........................................................................................................
bot.send_message(chat_id, text="Вы написали: " + ms_text)
def get_anekdot(link, className):
array_anekdots = []
req_anek = requests.get(link)
soup = bs4.BeautifulSoup(req_anek.text, "html.parser")
result_find = soup.select(className)
for result in result_find:
array_anekdots.append(result.getText().strip())
return array_anekdots[0]
# -----------------------------------------------------------------------
bot.polling(none_stop=True, interval=0) # Запускаем бота
print()
|
from .base import *
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': get_value_from_secret('DB_NAME'),
'HOST': get_value_from_secret('DB_HOST'),
'USER': get_value_from_secret('DB_USER'),
'PASSWORD': get_value_from_secret('DB_PASSWORD'),
'PORT': get_value_from_secret('DB_PORT'),
'OPTIONS': {'charset': 'utf8'}
}
}
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from string import ascii_lowercase
from typing import Union, AsyncGenerator, Optional
from pyrogram import raw
from pyrogram import types
from pyrogram.scaffold import Scaffold
class Filters:
ALL = "all"
KICKED = "kicked"
RESTRICTED = "restricted"
BOTS = "bots"
RECENT = "recent"
ADMINISTRATORS = "administrators"
QUERIES = [""] + [str(i) for i in range(10)] + list(ascii_lowercase)
QUERYABLE_FILTERS = (Filters.ALL, Filters.KICKED, Filters.RESTRICTED)
class IterChatMembers(Scaffold):
async def iter_chat_members(
self,
chat_id: Union[int, str],
limit: int = 0,
query: str = "",
filter: str = Filters.RECENT,
last_member_count: int = 0, # to speedup iteration for small chats
) -> Optional[AsyncGenerator["types.ChatMember", None]]:
"""Iterate through the members of a chat sequentially.
This convenience method does the same as repeatedly calling :meth:`~pyrogram.Client.get_chat_members` in a loop,
thus saving you from the hassle of setting up boilerplate code. It is useful for getting the whole members list
of a chat with a single call.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
limit (``int``, *optional*):
Limits the number of members to be retrieved.
By default, no limit is applied and all members are returned [1]_.
query (``str``, *optional*):
Query string to filter members based on their display names and usernames.
Defaults to "" (empty string) [2]_.
filter (``str``, *optional*):
Filter used to select the kind of members you want to retrieve. Only applicable for supergroups
and channels. It can be any of the followings:
*"all"* - all kind of members,
*"kicked"* - kicked (banned) members only,
*"restricted"* - restricted members only,
*"bots"* - bots only,
*"recent"* - recent members only,
*"administrators"* - chat administrators only.
Defaults to *"recent"*.
last_member_count (``int``):
Last member count number
.. [1] Server limit: on supergroups, you can get up to 10,000 members for a single query and up to 200 members
on channels.
.. [2] A query string is applicable only for *"all"*, *"kicked"* and *"restricted"* filters only.
Returns:
``Generator``: A generator yielding :obj:`~pyrogram.types.ChatMember` objects.
Example:
.. code-block:: python
# Iterate though all chat members
for member in app.iter_chat_members("pyrogramchat"):
print(member.user.first_name)
# Iterate though all administrators
for member in app.iter_chat_members("pyrogramchat", filter="administrators"):
print(member.user.first_name)
# Iterate though all bots
for member in app.iter_chat_members("pyrogramchat", filter="bots"):
print(member.user.first_name)
"""
current = 0
yielded = set()
if query:
queries = [query]
else:
if last_member_count > 200:
queries = QUERIES
else:
queries = [query]
# queries = [query] if query else QUERIES
total = limit or (1 << 31) - 1
limit = min(200, total)
resolved_chat_id = await self.resolve_peer(chat_id)
if filter not in QUERYABLE_FILTERS:
queries = [""]
import arrow
for q in queries:
offset = 0
while True:
# now=arrow.utcnow().timestamp()
chat_members = await self.get_chat_members(
chat_id=chat_id,
offset=offset,
limit=limit,
query=q,
filter=filter
)
# print(f"got chat members in : {arrow.utcnow().timestamp()-now}")
if not chat_members:
break
if isinstance(resolved_chat_id, raw.types.InputPeerChat):
total = len(chat_members)
offset += len(chat_members)
for chat_member in chat_members:
user_id = chat_member.user.id
if user_id in yielded:
continue
yield chat_member
yielded.add(chat_member.user.id)
current += 1
if current >= total:
return
|
# generate data: specify number of data points generated
NUM_DATA_POINTS = 5000
# the minimum number of variables required for the equation to work
MIN_VARIABLES = 4
# specify objective function
def func(x, num_var):
result = 0.0
for i in range(num_var):
result += x[i]**2
return result
# specify whether to minimise or maximise function, 0 for min 1 for max
MIN_OR_MAX_FLAG = 0
# set the min and max range for the variables
X_MIN_RANGE = -50.0
X_MAX_RANGE = 50.0
# specify constraints (return 0 if constraint is met, otherwise return absolute distance)
def c0(x, num_var):
# the constraint is: (80 - x[0] - x[1]) <= 0
result = 80 - x[0] - x[1]
if result <= 0.0:
return 0.0
else:
return result
def c1(x, num_var):
# the constraint is: x[2] + 45 <= 0
result = x[2] + 45
if result <= 0.0:
return 0.0
else:
return result
def c2(x, num_var):
# the constraint is: -7 <= x[2] + x[3] <= -5
LOWER_BOUND = -7.0
UPPER_BOUND = -5.0
result = x[2] + x[3]
if (result <= UPPER_BOUND) and (result >= LOWER_BOUND): # inside bounds
return 0.0
else:
if result < LOWER_BOUND:
distance = result - LOWER_BOUND
else:
distance = result - UPPER_BOUND
if distance >= 0: # always return a positive distance
return distance
else:
return (-distance)
# list of constraints: add specified constraints to this list in order for them to be considered
CONSTRAINTS = [
c0,
c1,
c2
]
# calculate the optimal result for the function for the constraint(s) to be met
optimal_point = [40.0, 40.0, -45.0, 40.0]
def calculate_optimal(num_var):
return func(optimal_point, num_var)
# generate data: specify num gen and num pop for the data generator GA
DATAGEN_GEN = 200 #500
DATAGEN_POP = 200
# generate data: specify min and max range for data
DATAGEN_MIN_RANGE = -1.0
DATAGEN_MAX_RANGE = 1.0
# learn representation: specify the number of latent variables and epochs for the vae
# NUM_LATENT = NUM_VARIABLES
NUM_EPOCHS = 200
# optimise: specify num gen and num pop for the optimiser GA
VAEGA_GEN = 50
VAEGA_POP = 20
# optimse: the range for the GA to generate random numbers for the latent variable
VAEGA_MIN_RANGE = -2.0
VAEGA_MAX_RANGE = 2.0
# comparison GA: specify num gen and num pop for the GA
# GA_NUM_INDIVIDUALS = NUM_VARIABLES # the number of individuals for the GA is the number of variables
GA_GEN = 50
GA_POP = 20
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import imgproc
from PIL import Image, ImageDraw
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return imgs, masks, xmls
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
def saveResult(img_file, img, boxes, font,dirname='./result/', verticals=None, texts=None):
""" save text detection result one by one
Args:
img_file (str): image file name
img (array): raw image context
boxes (array): array of result file
Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output
Return:
None
"""
img = np.array(img)
img_pil = Image.fromarray(img)
imgdraw = ImageDraw.Draw(img_pil)
# make result file list
filename, file_ext = os.path.splitext(os.path.basename(img_file))
# result directory
res_file = dirname + "res_" + filename + '.txt'
res_img_file = dirname + "res_" + filename + '.jpg'
if not os.path.isdir(dirname):
os.mkdir(dirname)
with open(res_file, 'w') as f:
if texts is not None :
for i, (box, text) in enumerate(zip(boxes, texts)):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) +','+text +'\r\n'
# poly = np.array(box).astype(np.int32)
# min_x = np.min(poly[:,0])
# max_x = np.max(poly[:,0])
# min_y = np.min(poly[:,1])
# max_y = np.max(poly[:,1])
# strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
# cv2.putText(img, text, tuple(poly[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.1, color = (0,0,255), thickness= 1)
imgdraw.polygon(poly.flatten().tolist(), fill = None, outline = (0,0,255))
imgdraw.text(tuple(poly[1]), text,font = font, fill = (0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
else :
for i, box in enumerate(boxes):
poly = np.array(box).astype(np.int32).reshape((-1))
strResult = ','.join([str(p) for p in poly]) + '\r\n'
# poly = np.array(box).astype(np.int32)
# min_x = np.min(poly[:,0])
# max_x = np.max(poly[:,0])
# min_y = np.min(poly[:,1])
# max_y = np.max(poly[:,1])
# strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\r\n'
f.write(strResult)
poly = poly.reshape(-1, 2)
# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)
imgdraw.polygon([poly.reshape((-1,1,2))], fill = None, outline =(0,0,255))
ptColor = (0, 255, 255)
if verticals is not None:
if verticals[i]:
ptColor = (255, 0, 0)
#
# if texts is not None:
# font = cv2.FONT_HERSHEY_SIMPLEX
# font_scale = 0.5
# cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)
# cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)
#
# #Save result image
cv2.imwrite(res_img_file, np.array(img_pil))
def load_txt(file, delimiter = ',') :
## character bbox는 \n\n으로 box별 구분
coords_ls = []
with open(file, 'r', encoding = 'utf-8-sig') as f :
boxes_list = f.read().split('\n\n')
for boxes in boxes_list :
if boxes.strip() == '' :
continue
char_boxes = boxes.split('\n')
# char_txt는 라벨이 따로 없다
charbox_ls = []
for charbox in char_boxes :
if len(char_boxes) == 0 :
continue
coords = charbox.split(delimiter)
coords = [float(c) for c in coords if c != '']
if len(coords) == 0 :
continue
coords = np.array(coords).reshape(-1,2)
charbox_ls.append(coords)
if len(charbox_ls) != 0 :
coords_ls.append(np.array(charbox_ls))
return coords_ls
|
"""empty message
Revision ID: 5b2f27493d7e
Revises: 1d17bfa8fe08
Create Date: 2018-06-14 14:54:29.224338
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5b2f27493d7e'
down_revision = '1d17bfa8fe08'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movie_plan',
sa.Column('id', sa.String(length=128), nullable=False),
sa.Column('mp_movie', sa.Integer(), nullable=True),
sa.Column('mp_hall', sa.Integer(), nullable=True),
sa.Column('mp_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['mp_hall'], ['hall.id'], ),
sa.ForeignKeyConstraint(['mp_movie'], ['movies.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movie_plan')
# ### end Alembic commands ###
|
import chainer
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class StatefulPeepholeLSTM(link.Chain):
"""Fully-connected LSTM layer with peephole connections.
This is a fully-connected LSTM layer with peephole connections as a chain.
Unlike the :class:`~chainer.links.LSTM` link, this chain holds ``peep_i``,
``peep_f`` and ``peep_o`` as child links besides ``upward`` and
``lateral``.
Given a input vector :math:`x`, Peephole returns the next hidden vector
:math:`h'` defined as
.. math::
a &=& \\tanh(upward x + lateral h), \\\\
i &=& \\sigma(upward x + lateral h + peep_i c), \\\\
f &=& \\sigma(upward x + lateral h + peep_f c), \\\\
c' &=& a \\odot i + f \\odot c, \\\\
o &=& \\sigma(upward x + lateral h + peep_o c'), \\\\
h' &=& o \\tanh(c'),
where :math:`\\sigma` is the sigmoid function, :math:`\\odot` is the
element-wise product, :math:`c` is the current cell state, :math:`c'`
is the next cell state and :math:`h` is the current hidden vector.
Args:
in_size(int): Dimension of the input vector :math:`x`.
out_size(int): Dimension of the hidden vector :math:`h`.
Attributes:
upward (~chainer.links.Linear): Linear layer of upward connections.
lateral (~chainer.links.Linear): Linear layer of lateral connections.
peep_i (~chainer.links.Linear): Linear layer of peephole connections
to the input gate.
peep_f (~chainer.links.Linear): Linear layer of peephole connections
to the forget gate.
peep_o (~chainer.links.Linear): Linear layer of peephole connections
to the output gate.
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the current time step.
"""
def __init__(self, in_size, out_size):
super(StatefulPeepholeLSTM, self).__init__()
self.state_size = out_size
self.reset_state()
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size)
self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
self.peep_i = linear.Linear(out_size, out_size, nobias=True)
self.peep_f = linear.Linear(out_size, out_size, nobias=True)
self.peep_o = linear.Linear(out_size, out_size, nobias=True)
def _to_device(self, device, skip_between_cupy_devices=False):
# Overrides Link._to_device
# TODO(niboshi): Avoid forcing concrete links to override _to_device
device = chainer.get_device(device)
super(StatefulPeepholeLSTM, self)._to_device(
device, skip_between_cupy_devices=skip_between_cupy_devices)
if self.c is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.c, cuda.ndarray)):
self.c.to_device(device)
if self.h is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.h, cuda.ndarray)):
self.h.to_device(device)
return self
def reset_state(self):
"""Resets the internal states.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
with chainer.using_device(self.device):
self.c = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
lstm_in = reshape.reshape(
lstm_in, (len(lstm_in), lstm_in.shape[1] // 4, 4))
a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
a = reshape.reshape(a, a.shape[:2])
i = reshape.reshape(i, i.shape[:2])
f = reshape.reshape(f, f.shape[:2])
o = reshape.reshape(o, o.shape[:2])
peep_in_i = self.peep_i(self.c)
peep_in_f = self.peep_f(self.c)
a = tanh.tanh(a)
i = sigmoid.sigmoid(i + peep_in_i)
f = sigmoid.sigmoid(f + peep_in_f)
self.c = a * i + f * self.c
peep_in_o = self.peep_o(self.c)
o = sigmoid.sigmoid(o + peep_in_o)
self.h = o * tanh.tanh(self.c)
return self.h
|
# coding: utf-8
"""
Xenia Python Client Library
Python Client Library to interact with the Xenia API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xenia_python_client_library.configuration import Configuration
class OrganizationUserUpdate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin': 'bool'
}
attribute_map = {
'admin': 'admin'
}
def __init__(self, admin=False, local_vars_configuration=None): # noqa: E501
"""OrganizationUserUpdate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin = None
self.discriminator = None
if admin is not None:
self.admin = admin
@property
def admin(self):
"""Gets the admin of this OrganizationUserUpdate. # noqa: E501
:return: The admin of this OrganizationUserUpdate. # noqa: E501
:rtype: bool
"""
return self._admin
@admin.setter
def admin(self, admin):
"""Sets the admin of this OrganizationUserUpdate.
:param admin: The admin of this OrganizationUserUpdate. # noqa: E501
:type: bool
"""
self._admin = admin
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationUserUpdate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OrganizationUserUpdate):
return True
return self.to_dict() != other.to_dict()
|
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Tests formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
TestGenerateFiles.test_generate_files
TestGenerateFiles.test_generate_files_with_trailing_newline
TestGenerateFiles.test_generate_files_binaries
TestGenerateFiles.test_generate_files_absolute_path
TestGenerateFiles.test_generate_files_output_dir
TestGenerateFiles.test_generate_files_permissions
Use the global clean_system fixture and run additional teardown code to remove
some special folders.
For a better understanding - order of fixture calls:
clean_system setup code
remove_additional_folders setup code
remove_additional_folders teardown code
clean_system teardown code
"""
from __future__ import unicode_literals
import os
import io
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
from cookiecutter import utils
@pytest.mark.parametrize('invalid_dirname', ['', '{foo}', '{{foo', 'bar}}'])
def test_ensure_dir_is_templated_raises(invalid_dirname):
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.ensure_dir_is_templated(invalid_dirname)
@pytest.fixture(scope='function')
def remove_additional_folders(request):
"""
Remove some special folders which are created by the tests.
"""
def fin_remove_additional_folders():
if os.path.exists('inputpizzä'):
utils.rmtree('inputpizzä')
if os.path.exists('inputgreen'):
utils.rmtree('inputgreen')
if os.path.exists('inputbinary_files'):
utils.rmtree('inputbinary_files')
if os.path.exists('tests/custom_output_dir'):
utils.rmtree('tests/custom_output_dir')
if os.path.exists('inputpermissions'):
utils.rmtree('inputpermissions')
request.addfinalizer(fin_remove_additional_folders)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_nontemplated_exception():
with pytest.raises(exceptions.NonTemplatedInputDirException):
generate.generate_files(
context={
'cookiecutter': {'food': 'pizza'}
},
repo_dir='tests/test-generate-files-nontemplated'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
simple_file = 'inputpizzä/simple.txt'
assert os.path.isfile(simple_file)
simple_text = io.open(simple_file, 'rt', encoding='utf-8').read()
assert simple_text == u'I eat pizzä'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_with_trailing_newline():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir='tests/test-generate-files'
)
newline_file = 'inputpizzä/simple-with-newline.txt'
assert os.path.isfile(newline_file)
with io.open(newline_file, 'r', encoding='utf-8') as f:
simple_text = f.read()
assert simple_text == u'I eat pizzä\n'
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_binaries():
generate.generate_files(
context={
'cookiecutter': {'binary_test': 'binary_files'}
},
repo_dir='tests/test-generate-binaries'
)
assert os.path.isfile('inputbinary_files/logo.png')
assert os.path.isfile('inputbinary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/readme.txt')
assert os.path.isfile('inputbinary_files/some_font.otf')
assert os.path.isfile('inputbinary_files/binary_files/logo.png')
assert os.path.isfile('inputbinary_files/binary_files/.DS_Store')
assert os.path.isfile('inputbinary_files/binary_files/readme.txt')
assert os.path.isfile('inputbinary_files/binary_files/some_font.otf')
assert os.path.isfile(
'inputbinary_files/binary_files/binary_files/logo.png'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_absolute_path():
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files')
)
assert os.path.isfile('inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_output_dir():
os.mkdir('tests/custom_output_dir')
generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert os.path.isfile('tests/custom_output_dir/inputpizzä/simple.txt')
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_return_rendered_project_dir():
os.mkdir('tests/custom_output_dir')
project_dir = generate.generate_files(
context={
'cookiecutter': {'food': 'pizzä'}
},
repo_dir=os.path.abspath('tests/test-generate-files'),
output_dir='tests/custom_output_dir'
)
assert project_dir == os.path.abspath(
'tests/custom_output_dir/inputpizzä/'
)
@pytest.mark.usefixtures('clean_system', 'remove_additional_folders')
def test_generate_files_permissions():
"""
simple.txt and script.sh should retain their respective 0o644 and
0o755 permissions
"""
generate.generate_files(
context={
'cookiecutter': {'permissions': 'permissions'}
},
repo_dir='tests/test-generate-files-permissions'
)
assert os.path.isfile('inputpermissions/simple.txt')
# simple.txt should still be 0o644
tests_simple_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'simple.txt'
)
tests_simple_file_mode = os.stat(tests_simple_file).st_mode & 0o777
input_simple_file = os.path.join(
'inputpermissions',
'simple.txt'
)
input_simple_file_mode = os.stat(input_simple_file).st_mode & 0o777
assert tests_simple_file_mode == input_simple_file_mode
assert os.path.isfile('inputpermissions/script.sh')
# script.sh should still be 0o755
tests_script_file = os.path.join(
'tests',
'test-generate-files-permissions',
'input{{cookiecutter.permissions}}',
'script.sh'
)
tests_script_file_mode = os.stat(tests_script_file).st_mode & 0o777
input_script_file = os.path.join(
'inputpermissions',
'script.sh'
)
input_script_file_mode = os.stat(input_script_file).st_mode & 0o777
assert tests_script_file_mode == input_script_file_mode
@pytest.fixture
def undefined_context():
return {
'cookiecutter': {
'project_slug': 'testproject',
'github_username': 'hackebrot'
}
}
def test_raise_undefined_variable_file_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file '{{cookiecutter.foobar}}'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_file_content(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/file-content/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
assert "Unable to create file 'README.rst'" == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_dir_name(tmpdir, undefined_context):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context=undefined_context
)
error = err.value
directory = os.path.join('testproject', '{{cookiecutter.foobar}}')
msg = "Unable to create directory '{}'".format(directory)
assert msg == error.message
assert error.context == undefined_context
assert not output_dir.join('testproject').exists()
def test_raise_undefined_variable_project_dir(tmpdir):
output_dir = tmpdir.mkdir('output')
with pytest.raises(exceptions.UndefinedVariableInTemplate) as err:
generate.generate_files(
repo_dir='tests/undefined-variable/dir-name/',
output_dir=str(output_dir),
context={}
)
error = err.value
msg = "Unable to create project directory '{{cookiecutter.project_slug}}'"
assert msg == error.message
assert error.context == {}
assert not output_dir.join('testproject').exists()
|
import inspect
import typing
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import default_code_filter # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
_all_type = " "
for _type in types:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace "typing."
# with a null string.
if inspect.getmodule(_type) == typing:
_type_to_string = str(_type)
_all_type += _type_to_string.replace('typing.', '') + ','
elif _type is torch.nn.parameter.Parameter:
# Check if the type is torch.nn.parameter.Parameter,
# use the entire quaalified name `torch.nn.parameter.Parameter`
# for type
_all_type += 'torch.nn.parameter.Parameter' + ','
else:
_all_type += _type.__name__ + ','
_all_type = _all_type.lstrip(" ") # Remove any trailing spaces
if len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""
Returns a JitCallTraceStoreLogger that logs to the configured
trace store.
"""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return default_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self):
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self):
pass
monkeytype_trace = None # noqa: F811
|
from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.DATA = CN()
_C.DATA.DATASET = 'Cifar10'
_C.DATA.BATCH_SIZE = 128
_C.MODEL = CN()
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.TRANS = CN()
_C.MODEL.TRANS.EMBED_DIM = 96
_C.MODEL.TRANS.DEPTHS = [2, 2, 6, 2]
_C.MODEL.TRANS.QKV_BIAS = False
def _update_config_from_file(config, cfg_file):
config.defrost()
config.merge_from_file(cfg_file)
#config.freeze()
def update_config(config, args):
if args.cfg:
_update_config_form_file(config, args.cfg)
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
return config
def get_config(cfg_file=None):
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
def main():
cfg = get_config()
print(cfg)
print('-----')
print(cfg.MODEL.NUM_CLASSES)
print('-----')
print(cfg.MODEL.TRANS.QKV_BIAS)
if __name__ == "__main__":
main()
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Base TestCase for proposal api."""
import ddt
from ggrc.models import all_models
from integration.ggrc import TestCase, generator
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from integration.ggrc.review import build_reviewer_acl
@ddt.ddt
class TestReviewApi(TestCase):
"""Base TestCase class proposal api tests."""
def setUp(self):
super(TestReviewApi, self).setUp()
self.api = Api()
self.api.client.get("/login")
self.generator = generator.ObjectGenerator()
def test_simple_get(self):
"""Test simple get"""
with factories.single_commit():
program = factories.ProgramFactory()
review = factories.ReviewFactory(
email_message="test email message",
notification_type="email",
reviewable=program,
status=all_models.Review.STATES.UNREVIEWED,
)
resp = self.api.get(all_models.Review, review.id)
self.assert200(resp)
self.assertIn("review", resp.json)
resp_review = resp.json["review"]
self.assertEqual(all_models.Review.STATES.UNREVIEWED,
resp_review["status"])
self.assertEqual(all_models.Review.NotificationTypes.EMAIL_TYPE,
resp_review["notification_type"])
self.assertEqual("test email message",
resp_review["email_message"])
def test_collection_get(self):
"""Test simple collection get"""
with factories.single_commit():
review1 = factories.ReviewFactory(
status=all_models.Review.STATES.UNREVIEWED
)
review2 = factories.ReviewFactory(
status=all_models.Review.STATES.REVIEWED
)
resp = self.api.get_collection(all_models.Review,
[review1.id, review2.id])
self.assert200(resp)
self.assertIn("reviews_collection", resp.json)
self.assertIn("reviews", resp.json["reviews_collection"])
self.assertEquals(2, len(resp.json["reviews_collection"]["reviews"]))
def test_create_review(self):
"""Create review via API, check that single relationship is created"""
program = factories.ProgramFactory()
program_id = program.id
resp = self.api.post(
all_models.Review,
{
"review": {
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"notification_type": "email",
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl()
},
},
)
self.assertEqual(201, resp.status_code)
review_id = resp.json["review"]["id"]
review = all_models.Review.query.get(review_id)
self.assertEqual(all_models.Review.STATES.UNREVIEWED, review.status)
self.assertEqual(program.type, review.reviewable_type)
self.assertEqual(program_id, review.reviewable_id)
control_review_rel_count = all_models.Relationship.query.filter(
all_models.Relationship.source_id == review.id,
all_models.Relationship.source_type == review.type,
all_models.Relationship.destination_id == program_id,
all_models.Relationship.destination_type == program.type,
).union(
all_models.Relationship.query.filter(
all_models.Relationship.destination_id == review.id,
all_models.Relationship.destination_type == review.type,
all_models.Relationship.source_id == program_id,
all_models.Relationship.source_type == program.type,
)
).count()
self.assertEqual(1, control_review_rel_count)
def test_delete_review(self):
"""Test delete review via API"""
with factories.single_commit():
program = factories.ProgramFactory()
program_id = program.id
review = factories.ReviewFactory(reviewable=program)
review_id = review.id
resp = self.api.delete(review)
self.assert200(resp)
review = all_models.Review.query.get(review_id)
program = all_models.Program.query.get(program_id)
self.assertIsNone(review)
self.assertEquals(0, len(program.related_objects(_types=["Review"])))
def test_last_reviewed(self):
"""last_reviewed_by, last_reviewed_by should be set if reviewed"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
review_id = review.id
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
self.assertIsNotNone(resp.json["review"]["last_reviewed_by"])
self.assertIsNotNone(resp.json["review"]["last_reviewed_at"])
review = all_models.Review.query.get(review_id)
self.assertIsNotNone(review.last_reviewed_by)
self.assertIsNotNone(review.last_reviewed_at)
def test_reviewable_revisions(self):
"""Check that proper revisions are created"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
program_id = program.id
reviewable = review.reviewable
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(2, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[0].content["review_status"])
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[1].content["review_status"])
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(3, len(program_revisions))
self.assertEquals(all_models.Review.STATES.REVIEWED,
program_revisions[2].content["review_status"])
resp = self.api.put(
reviewable,
{
"description": "some new description"
}
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(4, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[3].content["review_status"])
|
import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
|
"""
BT510 commands (serial or BLE).
This is a subset of the jtester commands used for verification.
"""
import time
import json
import random
import string
import logging
from jsonrpcclient.requests import Request
class jtester:
def __init__(self, fname="config.json"):
""" JSON tester that is independent of the transport """
print("jtester init")
self.protocol = None
self.inter_message_delay = 0.01
self.reset_delay = 10
self.reset_after_write_delay = 2
self.get_queue_timeout = 2.0
self.ok = 0
self.fail = 0
self._LoadConfig(fname)
self.logger = logging.getLogger('jtester')
def _LoadConfig(self, fname: str) -> None:
with open(fname, 'r') as f:
c = json.load(f)
if "inter_message_delay" in c:
self.inter_message_delay = c["inter_message_delay"]
if "reset_delay" in c:
self.reset_delay = c["reset_delay"]
def _send_json(self, text):
if self.protocol is not None:
self.logger.debug(text)
self.protocol.send_json(text, self.inter_message_delay)
else:
self.logger.warning("Transport not available")
def _get_json(self):
if self.protocol is not None:
result = self.protocol.get_json(self.get_queue_timeout)
self.logger.debug(json.dumps(result))
return result
else:
return None
def set_protocol(self, protocol) -> None:
self.protocol = protocol
def IncrementOkCount(self) -> None:
self.ok += 1
def IncrementFailCount(self) -> None:
self.fail += 1
self.logger.error("Test Fail")
def ExpectOk(self) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectError(self) -> None:
response = self._get_json()
if response is not None:
if "error" in response:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValue(self, name, value) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
if value is None:
if name in response:
self.IncrementOkCount()
return
elif isinstance(value, str):
if response[name] == value.strip('\"'):
self.IncrementOkCount()
return
else:
if response[name] == value:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectValues(self, **pairs) -> None:
responseFound = False
error = 0
response = self._get_json()
if response is not None:
if "result" in response:
if response["result"] == "ok":
responseFound = True
for (name, value) in pairs.items():
if value is None:
if name not in response:
error += 1
elif isinstance(value, str):
if response[name] != value.strip('\"'):
error += 1
elif response[name] != value:
error += 1
if not responseFound or error:
self.IncrementFailCount()
else:
self.IncrementOkCount()
def ExpectRange(self, name, imin, imax) -> None:
response = self._get_json()
if response is not None:
if "result" in response:
x = response["result"]
if isinstance(x, int):
if x >= imin and x <= imax:
self.IncrementOkCount()
return
self.IncrementFailCount()
def ExpectInt(self) -> int:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, int):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return -1
def ExpectStr(self) -> str:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, str):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return ""
def ExpectLog(self) -> list:
response = self._get_json()
if response is not None:
if "result" in response:
value = response["result"]
if isinstance(value, list):
self.IncrementOkCount()
return value
self.IncrementFailCount()
return [0, ""]
def SendFactoryReset(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("factoryReset")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendReboot(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot")))
self.ExpectOk()
time.sleep(self.reset_delay)
def SendEnterBootloader(self) -> None:
time.sleep(self.reset_after_write_delay)
self._send_json(str(Request("reboot", 1)))
self.ExpectOk()
time.sleep(self.reset_delay)
def EpochTest(self, epoch: int) -> None:
"""Test epoch commands"""
delay = 3
self._send_json(str(Request(f"setEpoch", epoch)))
self.ExpectOk()
time.sleep(delay)
self._send_json(str(Request("getEpoch")))
self.ExpectRange("epoch", epoch + delay - 1, epoch + delay + 1)
def LedTest(self) -> None:
self._send_json(str(Request("ledTest", 1000)))
self.ExpectOk()
def Dump(self) -> None:
""" Test dump command without any parameters """
self._send_json(str(Request("dump")))
response = self._get_json()
ok = False
if response is not None:
if "result" in response:
if response["result"] == "ok":
self.IncrementOkCount()
else:
self.IncrementFailCount()
def Unlock(self) -> None:
kwargs = {"lock": 0}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def Lock(self) -> None:
kwargs = {"lock": 1}
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def GetAttribute(self, name: str):
"""Get an attribute by its name - Doesn't affect test ok count"""
self._send_json(str(Request("get", name)))
response = self._get_json()
result = None
if response is not None:
if "result" in response:
if response["result"] == "ok":
if name in response:
value = response[name]
if isinstance(value, str):
result = value.strip('\"')
else:
result = value
self.logger.info(f'"{name}": {result}')
return result
def SetAttributes(self, **kwargs) -> None:
self._send_json(str(Request("set", **kwargs)))
self.ExpectOk()
def SetEpoch(self, epoch: int) -> None:
self._send_json(str(Request("setEpoch", epoch)))
self.ExpectOk()
def PrepareLog(self) -> int:
self._send_json(str(Request("prepareLog", 0))) # fifo mode
return self.ExpectInt()
def ReadLog(self, count: int) -> list:
self._send_json(str(Request("readLog", count)))
return self.ExpectLog()
def AckLog(self, count: int) -> int:
self._send_json(str(Request("ackLog", count)))
result = self.ExpectInt()
return result
def LogResults(self):
self.logger.info(f"Pass: {self.ok} Fail: {self.fail}")
if __name__ == "__main__":
pass
|
import datetime
import tensorflow as tf
import random
import wandb
from tensorflow_examples.models.pix2pix import pix2pix
from augmentation.dataflows.utils import create_paired_direct_dataflow, \
create_paired_parallel_dataflow_via_numpy
from augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator
from augmentation.utilities.optim import build_lr_scheduler
from augmentation.utilities.visualize import gallery
# Other places to look for training GANs
# https://github.com/eriklindernoren/Keras-GAN
def gradient_penalty(f, real, fake, mode, scale=10.0):
# https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/tf2gan/loss.py
def _gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
if b is None: # interpolation in DRAGAN
beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)
b = a + 0.5 * tf.math.reduce_std(a) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.shape)
return inter
x = _interpolate(real, fake)
with tf.GradientTape() as t:
t.watch(x)
pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)
grad = t.gradient(pred, x)
norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)
gp = tf.reduce_mean((norm - 1.) ** 2)
return gp
if mode == 'none':
gp = tf.constant(0, dtype=real.dtype)
elif mode == 'dragan':
gp = _gradient_penalty(f, real)
elif mode == 'wgan-gp':
gp = _gradient_penalty(f, real, fake)
else:
raise NotImplementedError
return gp * scale
class ReplayBuffer(object):
"""
Adapted from https://github.com/tensorflow/models/blob/master/research/pcl_rl/replay_buffer.py
"""
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.oldest_idx = 0
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, images):
idx = 0
while self.cur_size < self.max_size and idx < len(images):
self.buffer[self.cur_size] = images[idx]
self.cur_size += 1
idx += 1
if idx < len(images):
remove_idxs = self.remove_n(len(images) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = images[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
return random.sample(range(self.init_length, self.cur_size), n)
def get_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return [self.buffer[idx] for idx in idxs]
def get_tf_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])
def wgan_loss(targets, predictions):
return tf.reduce_mean((-2 * targets + 1.) * predictions)
def build_gan_loss_fn(loss_name):
if loss_name == 'bce':
return tf.keras.losses.BinaryCrossentropy(from_logits=True)
elif loss_name == 'lsgan':
return tf.keras.losses.MeanSquaredError()
elif loss_name == 'wgan':
return wgan_loss
else:
raise NotImplementedError
def discriminator_loss(real, generated, loss_fn):
# Classification loss for the discriminator, maximize log-prob of the real example
real_loss = loss_fn(tf.ones_like(real), real)
generated_loss = loss_fn(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated, loss_fn):
# The discriminator's probability (generated) for realness is maximized
return loss_fn(tf.ones_like(generated), generated)
def cycle_loss(real_image, cycled_image, scale):
# Cycle-consistency using an L! loss
return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))
def identity_loss(real_image, same_image, scale):
# Map the image to itself and compute the L1 loss
return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))
def build_cyclegan_models(n_channels, norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)
generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)
discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)
discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def build_mnist_cyclegan_models(norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = mnist_unet_generator(norm_type=norm_type)
generator_f = mnist_unet_generator(norm_type=norm_type)
discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)
discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):
if input_shape == (28, 28, 1):
# MNIST-like data
return mnist_unet_generator(norm_type=norm_type), \
mnist_discriminator(norm_type=norm_type, target=False)
elif input_shape == (256, 256, 3):
# TODO: just use our unet_generator fn
if residual_output is True or output_init != 0.02:
raise NotImplementedError
return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \
pix2pix.discriminator(norm_type=norm_type, target=False)
else:
return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,
output_init=output_init, residual_output=residual_output), \
pix2pix.discriminator(norm_type=norm_type, target=False)
def build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):
assert norm_type in ['instancenorm', 'batchnorm']
generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)
generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)
return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t
def build_optimizers(lr_gen=2e-4, lr_disc=2e-4,
beta_1_gen=0.5, beta_1_disc=0.5,
lr_scheduler='constant', lr_decay_steps=None):
generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer
def create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,
dataflow, cache_dir):
if dataflow == 'disk_cached':
cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')
# Shuffle hangs sometimes (e.g. for horse2zebra)
return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,
augmentations, x_only=True,
cache_dir1=cache_dir + '1',
cache_dir2=cache_dir + '2',
shuffle=True)
elif dataflow == 'in_memory':
return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,
batch_size, augmentations, x_only=True)
else:
raise NotImplementedError
def generate_and_log_one_image_batch(data_generator,
generator_g,
generator_f,
step):
# Grab a batch from the dataset
for real_x, real_y in data_generator:
# Convert to tensors
real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)
# Compute the fake examples
fake_y = generator_g(real_x, training=True)
fake_x = generator_f(real_y, training=True)
# Cycle the fake examples
cycled_x = generator_f(fake_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# Compute the identity examples
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
# Log everything to Weights and Biases
wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),
'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),
'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)
break
if __name__ == '__main__':
buffer = ReplayBuffer(1)
buffer.add([1])
buffer.add([2])
buffer.add([3])
print(buffer.get_batch(1))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add([4])
print(buffer.get_batch(1))
print(buffer.buffer)
buffer = ReplayBuffer(1)
buffer.add(tf.convert_to_tensor([1]))
buffer.add(tf.convert_to_tensor([2]))
buffer.add(tf.convert_to_tensor([3]))
print(tf.convert_to_tensor(buffer.get_batch(1)))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add(tf.convert_to_tensor([4]))
print(buffer.get_batch(1))
print(buffer.buffer)
|
import pickle
import cv2 as cv
if __name__ == "__main__":
with open('fer2013.pkl', 'rb') as file:
data = pickle.load(file)
train = data['train']
train = train[:10]
for i, sample in enumerate(train):
filename = sample['image_path']
img = cv.imread(filename)
new_name = 'images/{}.jpg'.format(i)
cv.imwrite(new_name, img)
label = sample['label']
print(label)
|
from .splitter import *
|
import sys
from django.core.management.commands.migrate import Command as MigrateCommand
from django.db import transaction
from tuiuiu.tuiuiutenant.utils import get_public_schema_name
def run_migrations(args, options, executor_codename, schema_name, allow_atomic=True):
from django.core.management import color
from django.core.management.base import OutputWrapper
from django.db import connection
style = color.color_style()
def style_func(msg):
return '[%s:%s] %s' % (
style.NOTICE(executor_codename),
style.NOTICE(schema_name),
msg
)
stdout = OutputWrapper(sys.stdout)
stdout.style_func = style_func
stderr = OutputWrapper(sys.stderr)
stderr.style_func = style_func
if int(options.get('verbosity', 1)) >= 1:
stdout.write(style.NOTICE("=== Running migrate for schema %s" % schema_name))
connection.set_schema(schema_name)
MigrateCommand(stdout=stdout, stderr=stderr).execute(*args, **options)
try:
transaction.commit()
connection.close()
connection.connection = None
except transaction.TransactionManagementError:
if not allow_atomic:
raise
# We are in atomic transaction, don't close connections
pass
connection.set_schema_to_public()
class MigrationExecutor(object):
codename = None
def __init__(self, args, options):
self.args = args
self.options = options
def run_migrations(self, tenants):
public_schema_name = get_public_schema_name()
if public_schema_name in tenants:
run_migrations(self.args, self.options, self.codename, public_schema_name)
tenants.pop(tenants.index(public_schema_name))
self.run_tenant_migrations(tenants)
def run_tenant_migrations(self, tenant):
raise NotImplementedError
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Distribution Learners Package """
from .qgan import (DiscriminativeNetwork,
GenerativeNetwork,
NumPyDiscriminator,
PyTorchDiscriminator,
QuantumGenerator,
QGAN)
__all__ = [
'DiscriminativeNetwork',
'GenerativeNetwork',
'NumPyDiscriminator',
'PyTorchDiscriminator',
'QuantumGenerator',
'QGAN',
]
|
"""web_tools URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from image_converter.views import upload_and_convert_image_file
__author__ = 'dominic'
urlpatterns = [
url(r'', upload_and_convert_image_file, name='upload_and_convert_image_file'),
]
|
from .main import UIS
|
from typing import Union
import math
AbstractText = Union[int, bytes]
def byte_length(i: int) -> int:
"""Returns the minimal amount of bytes needed to represent unsigned integer `i`."""
# we need to add 1 to correct the fact that a byte can only go up to 255, instead of 256:
# i.e math.log(0x100, 0x100) = 1 but needs 2 bytes
return math.ceil(math.log(i + 1, 0x100))
def bit_length(i: int) -> int:
"""Returns the minimal amount of bits needed to represent unsigned integer `i`."""
return math.ceil(math.log(i + 1, 2))
def int_to_bytes(i: int, length: int=-1) -> bytes:
"""Converts integer to a MSB-first byte sequence using the least amount of bytes possible"""
return i.to_bytes(byte_length(i) if length == -1 else length, "big")
def bytes_to_int(b: bytes) -> int:
"""Converts MSB-first byte sequence to an integer"""
return int.from_bytes(b, "big")
|
from .standard import Urllib2Transport
from .curl import PycurlTransport
import os
def get_transport(transport_type=None, os_module=os):
transport_type = __get_transport_type(transport_type, os_module)
if transport_type == 'urllib':
transport = Urllib2Transport()
else:
transport = PycurlTransport()
return transport
def __get_transport_type(transport_type, os_module):
if not transport_type:
use_curl = os_module.getenv('LWR_CURL_TRANSPORT', "0")
# If LWR_CURL_TRANSPORT is unset or set to 0, use default,
# else use curl.
if use_curl.isdigit() and not int(use_curl):
transport_type = 'urllib'
else:
transport_type = 'curl'
return transport_type
# TODO: Provide urllib implementation if these unavailable,
# also explore a requests+poster option.
from .curl import get_file
from .curl import post_file
__all__ = [get_transport, get_file, post_file]
|
# Generated by Django 3.0.4 on 2020-04-11 03:48
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
replaces = [('contests', '0001_initial'), ('contests', '0002_auto_20200410_1324'), ('contests', '0003_auto_20200410_1511')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contests',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
],
options={
'db_table': 'contests',
},
),
migrations.CreateModel(
name='Tasks',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('sub_title', models.CharField(max_length=50)),
('text', models.CharField(max_length=2048)),
],
options={
'db_table': 'tasks',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Teams',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('ip_address', models.CharField(max_length=15)),
('vs_liveshare_link', models.CharField(max_length=2048)),
('github_branch_name', models.CharField(max_length=50)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contests.Contests')),
],
options={
'db_table': 'teams',
'ordering': ['id'],
},
),
migrations.AddField(
model_name='contests',
name='tasks',
field=models.ManyToManyField(to='contests.Tasks'),
),
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Gets the Tag ID
:vartype id: str
:param name: Gets or sets the name of the tag
:type name: str
:param description: Gets or sets the description of the tag
:type description: str
:ivar image_count: Gets the number of images with this tag
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Tag, self).__init__(**kwargs)
self.id = None
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.image_count = None
|
'''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
|
import click
from arrow.cli import pass_context
from arrow.decorators import custom_exception, dict_output
@click.command('findAllValues')
@pass_context
@custom_exception
@dict_output
def cli(ctx):
"""TODO: Undocumented
Output:
???
"""
return ctx.gi.cannedvalues.findAllValues()
|
from unittest import TestCase
from pyhocon import ConfigTree
from mist.models import Deployment
class DeploymentTest(TestCase):
def test_create_deployment(self):
Deployment('test', 'Artifact', ConfigTree(), '0.0.1')
def test_get_name(self):
d = Deployment('test', 'Artifact', ConfigTree({
'file-path': 'test-name.py'
}), '0.0.1')
self.assertEqual(d.get_name(), 'test_0.0.1.py')
def test_with_user_name(self):
d = Deployment('test', 'Function', ConfigTree({
'context': 'foo',
'path': 'test-name.jar'
}), '0.0.1')
d.with_user('test_name')
self.assertEqual(d.name, 'test_name_test')
self.assertEqual(d.data['path'], 'test_name_test-name.jar')
self.assertEqual(d.data['context'], 'test_name_foo')
|
#!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
import time
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
while (1):
time.sleep(1)
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1)
|
'''The thought follows a simple rule:
If the sum of a subarray is positive, it has possible to make the next value bigger, so we keep do it until it turn to negative.
If the sum is negative, it has no use to the next element, so we break.
it is a game of sum, not the elements.'''
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
def Solution(nums):
for i in range(1, len(nums)):
if nums[i - 1] > 0:
nums[i] += nums[i - 1]
print(nums)
return max(nums)
print(Solution(nums))
|
"""
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsmr']
from numpy import zeros, infty
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
.. versionadded:: 0.11.0
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : (m,) ndarray
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool
Print iterations logs if ``show=True``.
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
http://arxiv.org/abs/1006.0758
.. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html
"""
A = aslinearoperator(A)
b = b.squeeze()
msg=('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm A''r'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print('The matrix A has %8g rows and %8g cols' % (m, n))
print('damp = %20.14e\n' % (damp))
print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim))
print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
# Initialize variables for 1st iteration.
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
# Initialize variables for estimation of ||r||.
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
# Initialize variables for estimation of ||A|| and cond(A)
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
# Items for use in stopping rules.
normb = beta
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(''.join([str1, str2, str3]))
# Main iteration loop.
while itn < maxiter:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alpha, v. These satisfy the relations
# beta*u = a*v - alpha*u,
# alpha*v = A'*u - beta*v.
u = A.matvec(v) - alpha * u
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u) - beta * v
alpha = norm(v)
if alpha > 0:
v = (1 / alpha) * v
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
x = x + (zeta / (rho * rhobar)) * hbar
h = v - (thetanew / rho) * h
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold* rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar= min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = infty
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (normA, condA)
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print('istop =%8g normr =%8.1e' % (istop, normr))
print(' normA =%8.1e normAr =%8.1e' % (normA, normar))
print('itn =%8g condA =%8.1e' % (itn, condA))
print(' normx =%8.1e' % (normx))
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import os
import random
import re
import subprocess
import string
import sys
import types
from lib.core.datatype import AttribDict
from lib.core.enums import DBMS
from lib.core.enums import DBMS_DIRECTORY_NAME
from lib.core.enums import OS
# sqlmap version (<major>.<minor>.<month>.<monthly commit>)
VERSION = "1.2.11.12"
TYPE = "dev" if VERSION.count('.') > 2 and VERSION.split('.')[-1] != '0' else "stable"
TYPE_COLORS = {"dev": 33, "stable": 90, "pip": 34}
VERSION_STRING = "sqlmap/%s#%s" % ('.'.join(VERSION.split('.')[:-1]) if VERSION.count('.') > 2 and VERSION.split('.')[-1] == '0' else VERSION, TYPE)
DESCRIPTION = "automatic SQL injection and database takeover tool"
SITE = "http://sqlmap.org"
DEV_EMAIL_ADDRESS = "dev@sqlmap.org"
ISSUES_PAGE = "https://github.com/sqlmapproject/sqlmap/issues/new"
GIT_REPOSITORY = "https://github.com/sqlmapproject/sqlmap.git"
GIT_PAGE = "https://github.com/sqlmapproject/sqlmap"
ZIPBALL_PAGE = "https://github.com/sqlmapproject/sqlmap/zipball/master"
# colorful banner
BANNER = """\033[01;33m\
___
__H__
___ ___[.]_____ ___ ___ \033[01;37m{\033[01;%dm%s\033[01;37m}\033[01;33m
|_ -| . [.] | .'| . |
|___|_ [.]_|_|_|__,| _|
|_|V |_| \033[0m\033[4;37m%s\033[0m\n
""" % (TYPE_COLORS.get(TYPE, 31), VERSION_STRING.split('/')[-1], SITE)
# Minimum distance of ratio from kb.matchRatio to result in True
DIFF_TOLERANCE = 0.05
CONSTANT_RATIO = 0.9
# Ratio used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_RATIO = 0.5
# Timeout used in heuristic check for WAF/IPS protected targets
IDS_WAF_CHECK_TIMEOUT = 10
# Lower and upper values for match ratio in case of stable page
LOWER_RATIO_BOUND = 0.02
UPPER_RATIO_BOUND = 0.98
# Markers for special cases when parameter values contain html encoded characters
PARAMETER_AMP_MARKER = "__AMP__"
PARAMETER_SEMICOLON_MARKER = "__SEMICOLON__"
BOUNDARY_BACKSLASH_MARKER = "__BACKSLASH__"
PARTIAL_VALUE_MARKER = "__PARTIAL_VALUE__"
PARTIAL_HEX_VALUE_MARKER = "__PARTIAL_HEX_VALUE__"
URI_QUESTION_MARKER = "__QUESTION_MARK__"
ASTERISK_MARKER = "__ASTERISK_MARK__"
REPLACEMENT_MARKER = "__REPLACEMENT_MARK__"
BOUNDED_INJECTION_MARKER = "__BOUNDED_INJECTION_MARK__"
SAFE_VARIABLE_MARKER = "__SAFE__"
RANDOM_INTEGER_MARKER = "[RANDINT]"
RANDOM_STRING_MARKER = "[RANDSTR]"
SLEEP_TIME_MARKER = "[SLEEPTIME]"
INFERENCE_MARKER = "[INFERENCE]"
SINGLE_QUOTE_MARKER = "[SINGLE_QUOTE]"
PAYLOAD_DELIMITER = "__PAYLOAD_DELIMITER__"
CHAR_INFERENCE_MARK = "%c"
PRINTABLE_CHAR_REGEX = r"[^\x00-\x1f\x7f-\xff]"
# Regular expression used for extraction of table names (useful for (e.g.) MsAccess)
SELECT_FROM_TABLE_REGEX = r"\bSELECT\b.+?\bFROM\s+(?P<result>([\w.]|`[^`<>]+`)+)"
# Regular expression used for recognition of textual content-type
TEXT_CONTENT_TYPE_REGEX = r"(?i)(text|form|message|xml|javascript|ecmascript|json)"
# Regular expression used for recognition of generic permission messages
PERMISSION_DENIED_REGEX = r"(?P<result>(command|permission|access)\s*(was|is)?\s*denied)"
# Regular expression used in recognition of generic protection mechanisms
GENERIC_PROTECTION_REGEX = r"(?i)\b(rejected|blocked|protection|incident|denied|detected|dangerous|firewall)\b"
# Regular expression used for recognition of generic maximum connection messages
MAX_CONNECTIONS_REGEX = r"\bmax.+?\bconnection"
# Maximum consecutive connection errors before asking the user if he wants to continue
MAX_CONSECUTIVE_CONNECTION_ERRORS = 15
# Timeout before the pre-connection candidate is being disposed (because of high probability that the web server will reset it)
PRECONNECT_CANDIDATE_TIMEOUT = 10
# Servers known to cause issue with pre-connection mechanism (because of lack of multi-threaded support)
PRECONNECT_INCOMPATIBLE_SERVERS = ("SimpleHTTP",)
# Maximum sleep time in "Murphy" (testing) mode
MAX_MURPHY_SLEEP_TIME = 3
# Regular expression used for extracting results from Google search
GOOGLE_REGEX = r"webcache\.googleusercontent\.com/search\?q=cache:[^:]+:([^+]+)\+&cd=|url\?\w+=((?![^>]+webcache\.googleusercontent\.com)http[^>]+)&(sa=U|rct=j)"
# Regular expression used for extracting results from DuckDuckGo search
DUCKDUCKGO_REGEX = r'"u":"([^"]+)'
# Regular expression used for extracting results from Bing search
BING_REGEX = r'<h2><a href="([^"]+)" h='
# Dummy user agent for search (if default one returns different results)
DUMMY_SEARCH_USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"
# Regular expression used for extracting content from "textual" tags
TEXT_TAG_REGEX = r"(?si)<(abbr|acronym|b|blockquote|br|center|cite|code|dt|em|font|h\d|i|li|p|pre|q|strong|sub|sup|td|th|title|tt|u)(?!\w).*?>(?P<result>[^<]+)"
# Regular expression used for recognition of IP addresses
IP_ADDRESS_REGEX = r"\b(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\b"
# Regular expression used for recognition of generic "your ip has been blocked" messages
BLOCKED_IP_REGEX = r"(?i)(\A|\b)ip\b.*\b(banned|blocked|block list|firewall)"
# Dumping characters used in GROUP_CONCAT MySQL technique
CONCAT_ROW_DELIMITER = ','
CONCAT_VALUE_DELIMITER = '|'
# Coefficient used for a time-based query delay checking (must be >= 7)
TIME_STDEV_COEFF = 7
# Minimum response time that can be even considered as delayed (not a complete requirement)
MIN_VALID_DELAYED_RESPONSE = 0.5
# Standard deviation after which a warning message should be displayed about connection lags
WARN_TIME_STDEV = 0.5
# Minimum length of usable union injected response (quick defense against substr fields)
UNION_MIN_RESPONSE_CHARS = 10
# Coefficient used for a union-based number of columns checking (must be >= 7)
UNION_STDEV_COEFF = 7
# Length of queue for candidates for time delay adjustment
TIME_DELAY_CANDIDATES = 3
# Default value for HTTP Accept header
HTTP_ACCEPT_HEADER_VALUE = "*/*"
# Default value for HTTP Accept-Encoding header
HTTP_ACCEPT_ENCODING_HEADER_VALUE = "gzip,deflate"
# Default timeout for running commands over backdoor
BACKDOOR_RUN_CMD_TIMEOUT = 5
# Number of seconds to wait for thread finalization at program end
THREAD_FINALIZATION_TIMEOUT = 1
# Maximum number of techniques used in inject.py/getValue() per one value
MAX_TECHNIQUES_PER_VALUE = 2
# In case of missing piece of partial union dump, buffered array must be flushed after certain size
MAX_BUFFERED_PARTIAL_UNION_LENGTH = 1024
# Suffix used for naming meta databases in DBMS(es) without explicit database name
METADB_SUFFIX = "_masterdb"
# Number of times to retry the pushValue during the exceptions (e.g. KeyboardInterrupt)
PUSH_VALUE_EXCEPTION_RETRY_COUNT = 3
# Minimum time response set needed for time-comparison based on standard deviation
MIN_TIME_RESPONSES = 30
# Minimum comparison ratio set needed for searching valid union column number based on standard deviation
MIN_UNION_RESPONSES = 5
# After these number of blanks at the end inference should stop (just in case)
INFERENCE_BLANK_BREAK = 10
# Use this replacement character for cases when inference is not able to retrieve the proper character value
INFERENCE_UNKNOWN_CHAR = '?'
# Character used for operation "greater" in inference
INFERENCE_GREATER_CHAR = ">"
# Character used for operation "greater or equal" in inference
INFERENCE_GREATER_EQUALS_CHAR = ">="
# Character used for operation "equals" in inference
INFERENCE_EQUALS_CHAR = "="
# Character used for operation "not-equals" in inference
INFERENCE_NOT_EQUALS_CHAR = "!="
# String used for representation of unknown DBMS
UNKNOWN_DBMS = "Unknown"
# String used for representation of unknown DBMS version
UNKNOWN_DBMS_VERSION = "Unknown"
# Dynamicity boundary length used in dynamicity removal engine
DYNAMICITY_BOUNDARY_LENGTH = 20
# Dummy user prefix used in dictionary attack
DUMMY_USER_PREFIX = "__dummy__"
# Reference: http://en.wikipedia.org/wiki/ISO/IEC_8859-1
DEFAULT_PAGE_ENCODING = "iso-8859-1"
try:
unicode(DEFAULT_PAGE_ENCODING, DEFAULT_PAGE_ENCODING)
except LookupError:
DEFAULT_PAGE_ENCODING = "utf8"
# URL used in dummy runs
DUMMY_URL = "http://foo/bar?id=1"
# System variables
IS_WIN = subprocess.mswindows
# The name of the operating system dependent module imported. The following names have currently been registered: 'posix', 'nt', 'mac', 'os2', 'ce', 'java', 'riscos'
PLATFORM = os.name
PYVERSION = sys.version.split()[0]
# DBMS system databases
MSSQL_SYSTEM_DBS = ("Northwind", "master", "model", "msdb", "pubs", "tempdb")
MYSQL_SYSTEM_DBS = ("information_schema", "mysql", "performance_schema")
PGSQL_SYSTEM_DBS = ("information_schema", "pg_catalog", "pg_toast", "pgagent")
ORACLE_SYSTEM_DBS = ('ANONYMOUS', 'APEX_030200', 'APEX_PUBLIC_USER', 'APPQOSSYS', 'BI', 'CTXSYS', 'DBSNMP', 'DIP', 'EXFSYS', 'FLOWS_%', 'FLOWS_FILES', 'HR', 'IX', 'LBACSYS', 'MDDATA', 'MDSYS', 'MGMT_VIEW', 'OC', 'OE', 'OLAPSYS', 'ORACLE_OCM', 'ORDDATA', 'ORDPLUGINS', 'ORDSYS', 'OUTLN', 'OWBSYS', 'PM', 'SCOTT', 'SH', 'SI_INFORMTN_SCHEMA', 'SPATIAL_CSW_ADMIN_USR', 'SPATIAL_WFS_ADMIN_USR', 'SYS', 'SYSMAN', 'SYSTEM', 'WKPROXY', 'WKSYS', 'WK_TEST', 'WMSYS', 'XDB', 'XS$NULL')
SQLITE_SYSTEM_DBS = ("sqlite_master", "sqlite_temp_master")
ACCESS_SYSTEM_DBS = ("MSysAccessObjects", "MSysACEs", "MSysObjects", "MSysQueries", "MSysRelationships", "MSysAccessStorage", "MSysAccessXML", "MSysModules", "MSysModules2")
FIREBIRD_SYSTEM_DBS = ("RDB$BACKUP_HISTORY", "RDB$CHARACTER_SETS", "RDB$CHECK_CONSTRAINTS", "RDB$COLLATIONS", "RDB$DATABASE", "RDB$DEPENDENCIES", "RDB$EXCEPTIONS", "RDB$FIELDS", "RDB$FIELD_DIMENSIONS", " RDB$FILES", "RDB$FILTERS", "RDB$FORMATS", "RDB$FUNCTIONS", "RDB$FUNCTION_ARGUMENTS", "RDB$GENERATORS", "RDB$INDEX_SEGMENTS", "RDB$INDICES", "RDB$LOG_FILES", "RDB$PAGES", "RDB$PROCEDURES", "RDB$PROCEDURE_PARAMETERS", "RDB$REF_CONSTRAINTS", "RDB$RELATIONS", "RDB$RELATION_CONSTRAINTS", "RDB$RELATION_FIELDS", "RDB$ROLES", "RDB$SECURITY_CLASSES", "RDB$TRANSACTIONS", "RDB$TRIGGERS", "RDB$TRIGGER_MESSAGES", "RDB$TYPES", "RDB$USER_PRIVILEGES", "RDB$VIEW_RELATIONS")
MAXDB_SYSTEM_DBS = ("SYSINFO", "DOMAIN")
SYBASE_SYSTEM_DBS = ("master", "model", "sybsystemdb", "sybsystemprocs")
DB2_SYSTEM_DBS = ("NULLID", "SQLJ", "SYSCAT", "SYSFUN", "SYSIBM", "SYSIBMADM", "SYSIBMINTERNAL", "SYSIBMTS", "SYSPROC", "SYSPUBLIC", "SYSSTAT", "SYSTOOLS")
HSQLDB_SYSTEM_DBS = ("INFORMATION_SCHEMA", "SYSTEM_LOB")
H2_SYSTEM_DBS = ("INFORMATION_SCHEMA")
INFORMIX_SYSTEM_DBS = ("sysmaster", "sysutils", "sysuser", "sysadmin")
MSSQL_ALIASES = ("microsoft sql server", "mssqlserver", "mssql", "ms")
MYSQL_ALIASES = ("mysql", "my", "mariadb", "maria")
PGSQL_ALIASES = ("postgresql", "postgres", "pgsql", "psql", "pg")
ORACLE_ALIASES = ("oracle", "orcl", "ora", "or")
SQLITE_ALIASES = ("sqlite", "sqlite3")
ACCESS_ALIASES = ("msaccess", "access", "jet", "microsoft access")
FIREBIRD_ALIASES = ("firebird", "mozilla firebird", "interbase", "ibase", "fb")
MAXDB_ALIASES = ("maxdb", "sap maxdb", "sap db")
SYBASE_ALIASES = ("sybase", "sybase sql server")
DB2_ALIASES = ("db2", "ibm db2", "ibmdb2")
HSQLDB_ALIASES = ("hsql", "hsqldb", "hs", "hypersql")
H2_ALIASES = ("h2",)
INFORMIX_ALIASES = ("informix", "ibm informix", "ibminformix")
DBMS_DIRECTORY_DICT = dict((getattr(DBMS, _), getattr(DBMS_DIRECTORY_NAME, _)) for _ in dir(DBMS) if not _.startswith("_"))
SUPPORTED_DBMS = MSSQL_ALIASES + MYSQL_ALIASES + PGSQL_ALIASES + ORACLE_ALIASES + SQLITE_ALIASES + ACCESS_ALIASES + FIREBIRD_ALIASES + MAXDB_ALIASES + SYBASE_ALIASES + DB2_ALIASES + HSQLDB_ALIASES + H2_ALIASES + INFORMIX_ALIASES
SUPPORTED_OS = ("linux", "windows")
DBMS_ALIASES = ((DBMS.MSSQL, MSSQL_ALIASES), (DBMS.MYSQL, MYSQL_ALIASES), (DBMS.PGSQL, PGSQL_ALIASES), (DBMS.ORACLE, ORACLE_ALIASES), (DBMS.SQLITE, SQLITE_ALIASES), (DBMS.ACCESS, ACCESS_ALIASES), (DBMS.FIREBIRD, FIREBIRD_ALIASES), (DBMS.MAXDB, MAXDB_ALIASES), (DBMS.SYBASE, SYBASE_ALIASES), (DBMS.DB2, DB2_ALIASES), (DBMS.HSQLDB, HSQLDB_ALIASES), (DBMS.H2, H2_ALIASES), (DBMS.INFORMIX, INFORMIX_ALIASES))
USER_AGENT_ALIASES = ("ua", "useragent", "user-agent")
REFERER_ALIASES = ("ref", "referer", "referrer")
HOST_ALIASES = ("host",)
H2_DEFAULT_SCHEMA = HSQLDB_DEFAULT_SCHEMA = "PUBLIC"
# Names that can't be used to name files on Windows OS
WINDOWS_RESERVED_NAMES = ("CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9")
# Items displayed in basic help (-h) output
BASIC_HELP_ITEMS = (
"url",
"googleDork",
"data",
"cookie",
"randomAgent",
"proxy",
"testParameter",
"dbms",
"level",
"risk",
"tech",
"getAll",
"getBanner",
"getCurrentUser",
"getCurrentDb",
"getPasswordHashes",
"getTables",
"getColumns",
"getSchema",
"dumpTable",
"dumpAll",
"db",
"tbl",
"col",
"osShell",
"osPwn",
"batch",
"checkTor",
"flushSession",
"tor",
"sqlmapShell",
"wizard",
)
# Tags used for value replacements inside shell scripts
SHELL_WRITABLE_DIR_TAG = "%WRITABLE_DIR%"
SHELL_RUNCMD_EXE_TAG = "%RUNCMD_EXE%"
# String representation for NULL value
NULL = "NULL"
# String representation for blank ('') value
BLANK = "<blank>"
# String representation for current database
CURRENT_DB = "CD"
# Name of SQLite file used for storing session data
SESSION_SQLITE_FILE = "session.sqlite"
# Regular expressions used for finding file paths in error messages
FILE_PATH_REGEXES = (r"<b>(?P<result>[^<>]+?)</b> on line \d+", r"in (?P<result>[^<>'\"]+?)['\"]? on line \d+", r"(?:[>(\[\s])(?P<result>[A-Za-z]:[\\/][\w. \\/-]*)", r"(?:[>(\[\s])(?P<result>/\w[/\w.~-]+)", r"href=['\"]file://(?P<result>/[^'\"]+)")
# Regular expressions used for parsing error messages (--parse-errors)
ERROR_PARSING_REGEXES = (
r"\[Microsoft\]\[ODBC SQL Server Driver\]\[SQL Server\](?P<result>[^<]+)",
r"<b>[^<]*(fatal|error|warning|exception)[^<]*</b>:?\s*(?P<result>.+?)<br\s*/?\s*>",
r"(?m)^\s*(fatal|error|warning|exception):?\s*(?P<result>[^\n]+?)$",
r"(?P<result>[^\n>]*SQL Syntax[^\n<]+)",
r"<li>Error Type:<br>(?P<result>.+?)</li>",
r"CDbCommand (?P<result>[^<>\n]*SQL[^<>\n]+)",
r"error '[0-9a-f]{8}'((<[^>]+>)|\s)+(?P<result>[^<>]+)",
r"\[[^\n\]]+(ODBC|JDBC)[^\n\]]+\](\[[^\]]+\])?(?P<result>[^\n]+(in query expression|\(SQL| at /[^ ]+pdo)[^\n<]+)"
)
# Regular expression used for parsing charset info from meta html headers
META_CHARSET_REGEX = r'(?si)<head>.*<meta[^>]+charset="?(?P<result>[^"> ]+).*</head>'
# Regular expression used for parsing refresh info from meta html headers
META_REFRESH_REGEX = r'(?si)<head>(?!.*?<noscript.*?</head).*?<meta http-equiv="?refresh"?[^>]+content="?[^">]+url=["\']?(?P<result>[^\'">]+).*</head>'
# Regular expression used for parsing empty fields in tested form data
EMPTY_FORM_FIELDS_REGEX = r'(&|\A)(?P<result>[^=]+=(&|\Z))'
# Reference: http://www.cs.ru.nl/bachelorscripties/2010/Martin_Devillers___0437999___Analyzing_password_strength.pdf
COMMON_PASSWORD_SUFFIXES = ("1", "123", "2", "12", "3", "13", "7", "11", "5", "22", "23", "01", "4", "07", "21", "14", "10", "06", "08", "8", "15", "69", "16", "6", "18")
# Reference: http://www.the-interweb.com/serendipity/index.php?/archives/94-A-brief-analysis-of-40,000-leaked-MySpace-passwords.html
COMMON_PASSWORD_SUFFIXES += ("!", ".", "*", "!!", "?", ";", "..", "!!!", ", ", "@")
# Splitter used between requests in WebScarab log files
WEBSCARAB_SPLITTER = "### Conversation"
# Splitter used between requests in BURP log files
BURP_REQUEST_REGEX = r"={10,}\s+[^=]+={10,}\s(.+?)\s={10,}"
# Regex used for parsing XML Burp saved history items
BURP_XML_HISTORY_REGEX = r'<port>(\d+)</port>.+?<request base64="true"><!\[CDATA\[([^]]+)'
# Encoding used for Unicode data
UNICODE_ENCODING = "utf8"
# Reference: http://www.w3.org/Protocols/HTTP/Object_Headers.html#uri
URI_HTTP_HEADER = "URI"
# Uri format which could be injectable (e.g. www.site.com/id82)
URI_INJECTABLE_REGEX = r"//[^/]*/([^\.*?]+)\Z"
# Regex used for masking sensitive data
SENSITIVE_DATA_REGEX = r"(\s|=)(?P<result>[^\s=]*%s[^\s]*)\s"
# Options to explicitly mask in anonymous (unhandled exception) reports (along with anything carrying the <hostname> inside)
SENSITIVE_OPTIONS = ("hostname", "answers", "data", "dnsDomain", "googleDork", "authCred", "proxyCred", "tbl", "db", "col", "user", "cookie", "proxy", "fileRead", "fileWrite", "fileDest", "testParameter", "authCred")
# Maximum number of threads (avoiding connection issues and/or DoS)
MAX_NUMBER_OF_THREADS = 10
# Minimum range between minimum and maximum of statistical set
MIN_STATISTICAL_RANGE = 0.01
# Minimum value for comparison ratio
MIN_RATIO = 0.0
# Maximum value for comparison ratio
MAX_RATIO = 1.0
# Minimum length of sentence for automatic choosing of --string (in case of high matching ratio)
CANDIDATE_SENTENCE_MIN_LENGTH = 10
# Character used for marking injectable position inside provided data
CUSTOM_INJECTION_MARK_CHAR = '*'
# Other way to declare injection position
INJECT_HERE_REGEX = r"(?i)%INJECT[_ ]?HERE%"
# Minimum chunk length used for retrieving data over error based payloads
MIN_ERROR_CHUNK_LENGTH = 8
# Maximum chunk length used for retrieving data over error based payloads
MAX_ERROR_CHUNK_LENGTH = 1024
# Do not escape the injected statement if it contains any of the following SQL keywords
EXCLUDE_UNESCAPE = ("WAITFOR DELAY ", " INTO DUMPFILE ", " INTO OUTFILE ", "CREATE ", "BULK ", "EXEC ", "RECONFIGURE ", "DECLARE ", "'%s'" % CHAR_INFERENCE_MARK)
# Mark used for replacement of reflected values
REFLECTED_VALUE_MARKER = "__REFLECTED_VALUE__"
# Regular expression used for replacing border non-alphanum characters
REFLECTED_BORDER_REGEX = r"[^A-Za-z]+"
# Regular expression used for replacing non-alphanum characters
REFLECTED_REPLACEMENT_REGEX = r"[^\n]{1,100}"
# Maximum time (in seconds) spent per reflective value(s) replacement
REFLECTED_REPLACEMENT_TIMEOUT = 3
# Maximum number of alpha-numerical parts in reflected regex (for speed purposes)
REFLECTED_MAX_REGEX_PARTS = 10
# Chars which can be used as a failsafe values in case of too long URL encoding value
URLENCODE_FAILSAFE_CHARS = "()|,"
# Maximum length of URL encoded value after which failsafe procedure takes away
URLENCODE_CHAR_LIMIT = 2000
# Default schema for Microsoft SQL Server DBMS
DEFAULT_MSSQL_SCHEMA = "dbo"
# Display hash attack info every mod number of items
HASH_MOD_ITEM_DISPLAY = 11
# Display marker for (cracked) empty password
HASH_EMPTY_PASSWORD_MARKER = "<empty>"
# Maximum integer value
MAX_INT = sys.maxint
# Replacement for unsafe characters in dump table filenames
UNSAFE_DUMP_FILEPATH_REPLACEMENT = '_'
# Options that need to be restored in multiple targets run mode
RESTORE_MERGED_OPTIONS = ("col", "db", "dnsDomain", "privEsc", "tbl", "regexp", "string", "textOnly", "threads", "timeSec", "tmpPath", "uChar", "user")
# Parameters to be ignored in detection phase (upper case)
IGNORE_PARAMETERS = ("__VIEWSTATE", "__VIEWSTATEENCRYPTED", "__VIEWSTATEGENERATOR", "__EVENTARGUMENT", "__EVENTTARGET", "__EVENTVALIDATION", "ASPSESSIONID", "ASP.NET_SESSIONID", "JSESSIONID", "CFID", "CFTOKEN")
# Regular expression used for recognition of ASP.NET control parameters
ASP_NET_CONTROL_REGEX = r"(?i)\Actl\d+\$"
# Prefix for Google analytics cookie names
GOOGLE_ANALYTICS_COOKIE_PREFIX = "__UTM"
# Prefix for configuration overriding environment variables
SQLMAP_ENVIRONMENT_PREFIX = "SQLMAP_"
# Turn off resume console info to avoid potential slowdowns
TURN_OFF_RESUME_INFO_LIMIT = 20
# Strftime format for results file used in multiple target mode
RESULTS_FILE_FORMAT = "results-%m%d%Y_%I%M%p.csv"
# Official web page with the list of Python supported codecs
CODECS_LIST_PAGE = "http://docs.python.org/library/codecs.html#standard-encodings"
# Simple regular expression used to distinguish scalar from multiple-row commands (not sole condition)
SQL_SCALAR_REGEX = r"\A(SELECT(?!\s+DISTINCT\(?))?\s*\w*\("
# Option/switch values to ignore during configuration save
IGNORE_SAVE_OPTIONS = ("saveConfig",)
# IP address of the localhost
LOCALHOST = "127.0.0.1"
# Default SOCKS ports used by Tor
DEFAULT_TOR_SOCKS_PORTS = (9050, 9150)
# Default HTTP ports used by Tor
DEFAULT_TOR_HTTP_PORTS = (8123, 8118)
# Percentage below which comparison engine could have problems
LOW_TEXT_PERCENT = 20
# These MySQL keywords can't go (alone) into versioned comment form (/*!...*/)
# Reference: http://dev.mysql.com/doc/refman/5.1/en/function-resolution.html
IGNORE_SPACE_AFFECTED_KEYWORDS = ("CAST", "COUNT", "EXTRACT", "GROUP_CONCAT", "MAX", "MID", "MIN", "SESSION_USER", "SUBSTR", "SUBSTRING", "SUM", "SYSTEM_USER", "TRIM")
# Keywords expected to be in UPPERCASE in getValue()
GET_VALUE_UPPERCASE_KEYWORDS = ("SELECT", "FROM", "WHERE", "DISTINCT", "COUNT")
LEGAL_DISCLAIMER = "Usage of sqlmap for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local, state and federal laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program"
# After this number of misses reflective removal mechanism is turned off (for speed up reasons)
REFLECTIVE_MISS_THRESHOLD = 20
# Regular expression used for extracting HTML title
HTML_TITLE_REGEX = r"<title>(?P<result>[^<]+)</title>"
# Table used for Base64 conversion in WordPress hash cracking routine
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
PICKLE_REDUCE_WHITELIST = (types.BooleanType, types.DictType, types.FloatType, types.IntType, types.ListType, types.LongType, types.NoneType, types.StringType, types.TupleType, types.UnicodeType, types.XRangeType, type(AttribDict()), type(set()))
# Chars used to quickly distinguish if the user provided tainted parameter values
DUMMY_SQL_INJECTION_CHARS = ";()'"
# Simple check against dummy users
DUMMY_USER_INJECTION = r"(?i)[^\w](AND|OR)\s+[^\s]+[=><]|\bUNION\b.+\bSELECT\b|\bSELECT\b.+\bFROM\b|\b(CONCAT|information_schema|SLEEP|DELAY|FLOOR\(RAND)\b"
# Extensions skipped by crawler
CRAWL_EXCLUDE_EXTENSIONS = ("3ds", "3g2", "3gp", "7z", "DS_Store", "a", "aac", "adp", "ai", "aif", "aiff", "apk", "ar", "asf", "au", "avi", "bak", "bin", "bk", "bmp", "btif", "bz2", "cab", "caf", "cgm", "cmx", "cpio", "cr2", "dat", "deb", "djvu", "dll", "dmg", "dmp", "dng", "doc", "docx", "dot", "dotx", "dra", "dsk", "dts", "dtshd", "dvb", "dwg", "dxf", "ear", "ecelp4800", "ecelp7470", "ecelp9600", "egg", "eol", "eot", "epub", "exe", "f4v", "fbs", "fh", "fla", "flac", "fli", "flv", "fpx", "fst", "fvt", "g3", "gif", "gz", "h261", "h263", "h264", "ico", "ief", "image", "img", "ipa", "iso", "jar", "jpeg", "jpg", "jpgv", "jpm", "jxr", "ktx", "lvp", "lz", "lzma", "lzo", "m3u", "m4a", "m4v", "mar", "mdi", "mid", "mj2", "mka", "mkv", "mmr", "mng", "mov", "movie", "mp3", "mp4", "mp4a", "mpeg", "mpg", "mpga", "mxu", "nef", "npx", "o", "oga", "ogg", "ogv", "otf", "pbm", "pcx", "pdf", "pea", "pgm", "pic", "png", "pnm", "ppm", "pps", "ppt", "pptx", "ps", "psd", "pya", "pyc", "pyo", "pyv", "qt", "rar", "ras", "raw", "rgb", "rip", "rlc", "rz", "s3m", "s7z", "scm", "scpt", "sgi", "shar", "sil", "smv", "so", "sub", "swf", "tar", "tbz2", "tga", "tgz", "tif", "tiff", "tlz", "ts", "ttf", "uvh", "uvi", "uvm", "uvp", "uvs", "uvu", "viv", "vob", "war", "wav", "wax", "wbmp", "wdp", "weba", "webm", "webp", "whl", "wm", "wma", "wmv", "wmx", "woff", "woff2", "wvx", "xbm", "xif", "xls", "xlsx", "xlt", "xm", "xpi", "xpm", "xwd", "xz", "z", "zip", "zipx")
# Patterns often seen in HTTP headers containing custom injection marking character '*'
PROBLEMATIC_CUSTOM_INJECTION_PATTERNS = r"(;q=[^;']+)|(\*/\*)"
# Template used for common table existence check
BRUTE_TABLE_EXISTS_TEMPLATE = "EXISTS(SELECT %d FROM %s)"
# Template used for common column existence check
BRUTE_COLUMN_EXISTS_TEMPLATE = "EXISTS(SELECT %s FROM %s)"
# Payload used for checking of existence of IDS/IPS/WAF (dummier the better)
IDS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,NULL,'<script>alert(\"XSS\")</script>',table_name FROM information_schema.tables WHERE 2>1--/**/; EXEC xp_cmdshell('cat ../../../etc/passwd')#"
# Data inside shellcodeexec to be filled with random string
SHELLCODEEXEC_RANDOM_STRING_MARKER = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# Generic address for checking the Internet connection while using switch --check-internet
CHECK_INTERNET_ADDRESS = "https://ipinfo.io/"
# Value to look for in response to CHECK_INTERNET_ADDRESS
CHECK_INTERNET_VALUE = "IP Address Details"
# Vectors used for provoking specific WAF/IPS behavior(s)
WAF_ATTACK_VECTORS = (
"", # NIL
"search=<script>alert(1)</script>",
"file=../../../../etc/passwd",
"q=<invalid>foobar",
"id=1 %s" % IDS_WAF_CHECK_PAYLOAD
)
# Used for status representation in dictionary attack phase
ROTATING_CHARS = ('\\', '|', '|', '/', '-')
# Approximate chunk length (in bytes) used by BigArray objects (only last chunk and cached one are held in memory)
BIGARRAY_CHUNK_SIZE = 1024 * 1024
# Compress level used for storing BigArray chunks to disk (0-9)
BIGARRAY_COMPRESS_LEVEL = 9
# Maximum number of socket pre-connects
SOCKET_PRE_CONNECT_QUEUE_SIZE = 3
# Only console display last n table rows
TRIM_STDOUT_DUMP_SIZE = 256
# Reference: http://stackoverflow.com/a/3168436
# Reference: https://support.microsoft.com/en-us/kb/899149
DUMP_FILE_BUFFER_SIZE = 1024
# Parse response headers only first couple of times
PARSE_HEADERS_LIMIT = 3
# Step used in ORDER BY technique used for finding the right number of columns in UNION query injections
ORDER_BY_STEP = 10
# Maximum number of times for revalidation of a character in inference (as required)
MAX_REVALIDATION_STEPS = 5
# Characters that can be used to split parameter values in provided command line (e.g. in --tamper)
PARAMETER_SPLITTING_REGEX = r"[,|;]"
# Regular expression describing possible union char value (e.g. used in --union-char)
UNION_CHAR_REGEX = r"\A\w+\Z"
# Attribute used for storing original parameter value in special cases (e.g. POST)
UNENCODED_ORIGINAL_VALUE = "original"
# Common column names containing usernames (used for hash cracking in some cases)
COMMON_USER_COLUMNS = ("login", "user", "username", "user_name", "user_login", "benutzername", "benutzer", "utilisateur", "usager", "consommateur", "utente", "utilizzatore", "usufrutuario", "korisnik", "usuario", "consumidor", "client", "cuser")
# Default delimiter in GET/POST values
DEFAULT_GET_POST_DELIMITER = '&'
# Default delimiter in cookie values
DEFAULT_COOKIE_DELIMITER = ';'
# Unix timestamp used for forcing cookie expiration when provided with --load-cookies
FORCE_COOKIE_EXPIRATION_TIME = "9999999999"
# Github OAuth token used for creating an automatic Issue for unhandled exceptions
GITHUB_REPORT_OAUTH_TOKEN = "NTYzYjhmZWJjYzc0Njg2ODJhNzhmNDg1YzM0YzlkYjk3N2JiMzE3Nw=="
# Skip unforced HashDB flush requests below the threshold number of cached items
HASHDB_FLUSH_THRESHOLD = 32
# Number of retries for unsuccessful HashDB flush attempts
HASHDB_FLUSH_RETRIES = 3
# Number of retries for unsuccessful HashDB retrieve attempts
HASHDB_RETRIEVE_RETRIES = 3
# Number of retries for unsuccessful HashDB end transaction attempts
HASHDB_END_TRANSACTION_RETRIES = 3
# Unique milestone value used for forced deprecation of old HashDB values (e.g. when changing hash/pickle mechanism)
HASHDB_MILESTONE_VALUE = "BZzRotigLX" # python -c 'import random, string; print "".join(random.sample(string.ascii_letters, 10))'
# Warn user of possible delay due to large page dump in full UNION query injections
LARGE_OUTPUT_THRESHOLD = 1024 ** 2
# On huge tables there is a considerable slowdown if every row retrieval requires ORDER BY (most noticable in table dumping using ERROR injections)
SLOW_ORDER_COUNT_THRESHOLD = 10000
# Give up on hash recognition if nothing was found in first given number of rows
HASH_RECOGNITION_QUIT_THRESHOLD = 10000
# Maximum number of redirections to any single URL - this is needed because of the state that cookies introduce
MAX_SINGLE_URL_REDIRECTIONS = 4
# Maximum total number of redirections (regardless of URL) - before assuming we're in a loop
MAX_TOTAL_REDIRECTIONS = 10
# Reference: http://www.tcpipguide.com/free/t_DNSLabelsNamesandSyntaxRules.htm
MAX_DNS_LABEL = 63
# Alphabet used for prefix and suffix strings of name resolution requests in DNS technique (excluding hexadecimal chars for not mixing with inner content)
DNS_BOUNDARIES_ALPHABET = re.sub(r"[a-fA-F]", "", string.ascii_letters)
# Alphabet used for heuristic checks
HEURISTIC_CHECK_ALPHABET = ('"', '\'', ')', '(', ',', '.')
# Minor artistic touch
BANNER = re.sub(r"\[.\]", lambda _: "[\033[01;41m%s\033[01;49m]" % random.sample(HEURISTIC_CHECK_ALPHABET, 1)[0], BANNER)
# String used for dummy non-SQLi (e.g. XSS) heuristic checks of a tested parameter value
DUMMY_NON_SQLI_CHECK_APPENDIX = "<'\">"
# Regular expression used for recognition of file inclusion errors
FI_ERROR_REGEX = r"(?i)[^\n]{0,100}(no such file|failed (to )?open)[^\n]{0,100}"
# Length of prefix and suffix used in non-SQLI heuristic checks
NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH = 6
# Connection chunk size (processing large responses in chunks to avoid MemoryError crashes - e.g. large table dump in full UNION injections)
MAX_CONNECTION_CHUNK_SIZE = 10 * 1024 * 1024
# Maximum response total page size (trimmed if larger)
MAX_CONNECTION_TOTAL_SIZE = 100 * 1024 * 1024
# For preventing MemoryError exceptions (caused when using large sequences in difflib.SequenceMatcher)
MAX_DIFFLIB_SEQUENCE_LENGTH = 10 * 1024 * 1024
# Maximum (multi-threaded) length of entry in bisection algorithm
MAX_BISECTION_LENGTH = 50 * 1024 * 1024
# Mark used for trimming unnecessary content in large chunks
LARGE_CHUNK_TRIM_MARKER = "__TRIMMED_CONTENT__"
# Generic SQL comment formation
GENERIC_SQL_COMMENT = "-- [RANDSTR]"
# Threshold value for turning back on time auto-adjustment mechanism
VALID_TIME_CHARS_RUN_THRESHOLD = 100
# Check for empty columns only if table is sufficiently large
CHECK_ZERO_COLUMNS_THRESHOLD = 10
# Boldify all logger messages containing these "patterns"
BOLD_PATTERNS = ("' injectable", "provided empty", "leftover chars", "might be injectable", "' is vulnerable", "is not injectable", "does not seem to be", "test failed", "test passed", "live test final result", "test shows that", "the back-end DBMS is", "created Github", "blocked by the target server", "protection is involved", "CAPTCHA", "specific response", "NULL connection is supported")
# Generic www root directory names
GENERIC_DOC_ROOT_DIRECTORY_NAMES = ("htdocs", "httpdocs", "public", "wwwroot", "www")
# Maximum length of a help part containing switch/option name(s)
MAX_HELP_OPTION_LENGTH = 18
# Maximum number of connection retries (to prevent problems with recursion)
MAX_CONNECT_RETRIES = 100
# Strings for detecting formatting errors
FORMAT_EXCEPTION_STRINGS = ("Type mismatch", "Error converting", "Conversion failed", "String or binary data would be truncated", "Failed to convert", "unable to interpret text value", "Input string was not in a correct format", "System.FormatException", "java.lang.NumberFormatException", "ValueError: invalid literal", "TypeMismatchException", "CF_SQL_INTEGER", " for CFSQLTYPE ", "cfqueryparam cfsqltype", "InvalidParamTypeException", "Invalid parameter type", "is not of type numeric", "<cfif Not IsNumeric(", "invalid input syntax for integer", "invalid input syntax for type", "invalid number", "character to number conversion error", "unable to interpret text value", "String was not recognized as a valid", "Convert.ToInt", "cannot be converted to a ", "InvalidDataException")
# Regular expression used for extracting ASP.NET view state values
VIEWSTATE_REGEX = r'(?i)(?P<name>__VIEWSTATE[^"]*)[^>]+value="(?P<result>[^"]+)'
# Regular expression used for extracting ASP.NET event validation values
EVENTVALIDATION_REGEX = r'(?i)(?P<name>__EVENTVALIDATION[^"]*)[^>]+value="(?P<result>[^"]+)'
# Number of rows to generate inside the full union test for limited output (mustn't be too large to prevent payload length problems)
LIMITED_ROWS_TEST_NUMBER = 15
# Default adapter to use for bottle server
RESTAPI_DEFAULT_ADAPTER = "wsgiref"
# Default REST-JSON API server listen address
RESTAPI_DEFAULT_ADDRESS = "127.0.0.1"
# Default REST-JSON API server listen port
RESTAPI_DEFAULT_PORT = 8775
# Format used for representing invalid unicode characters
INVALID_UNICODE_CHAR_FORMAT = r"\x%02x"
# Regular expression for XML POST data
XML_RECOGNITION_REGEX = r"(?s)\A\s*<[^>]+>(.+>)?\s*\Z"
# Regular expression used for detecting JSON POST data
JSON_RECOGNITION_REGEX = r'(?s)\A(\s*\[)*\s*\{.*"[^"]+"\s*:\s*("[^"]*"|\d+|true|false|null).*\}\s*(\]\s*)*\Z'
# Regular expression used for detecting JSON-like POST data
JSON_LIKE_RECOGNITION_REGEX = r"(?s)\A(\s*\[)*\s*\{.*'[^']+'\s*:\s*('[^']+'|\d+).*\}\s*(\]\s*)*\Z"
# Regular expression used for detecting multipart POST data
MULTIPART_RECOGNITION_REGEX = r"(?i)Content-Disposition:[^;]+;\s*name="
# Regular expression used for detecting Array-like POST data
ARRAY_LIKE_RECOGNITION_REGEX = r"(\A|%s)(\w+)\[\]=.+%s\2\[\]=" % (DEFAULT_GET_POST_DELIMITER, DEFAULT_GET_POST_DELIMITER)
# Default POST data content-type
DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=utf-8"
# Raw text POST data content-type
PLAIN_TEXT_CONTENT_TYPE = "text/plain; charset=utf-8"
# Length used while checking for existence of Suhosin-patch (like) protection mechanism
SUHOSIN_MAX_VALUE_LENGTH = 512
# Minimum size of an (binary) entry before it can be considered for dumping to disk
MIN_BINARY_DISK_DUMP_SIZE = 100
# Filenames of payloads xml files (in order of loading)
PAYLOAD_XML_FILES = ("boolean_blind.xml", "error_based.xml", "inline_query.xml", "stacked_queries.xml", "time_blind.xml", "union_query.xml")
# Regular expression used for extracting form tags
FORM_SEARCH_REGEX = r"(?si)<form(?!.+<form).+?</form>"
# Maximum number of lines to save in history file
MAX_HISTORY_LENGTH = 1000
# Minimum field entry length needed for encoded content (hex, base64,...) check
MIN_ENCODED_LEN_CHECK = 5
# Timeout in seconds in which Metasploit remote session has to be initialized
METASPLOIT_SESSION_TIMEOUT = 120
# Reference: http://www.postgresql.org/docs/9.0/static/catalog-pg-largeobject.html
LOBLKSIZE = 2048
# Suffix used to mark variables having keyword names
EVALCODE_KEYWORD_SUFFIX = "_KEYWORD"
# Reference: http://www.cookiecentral.com/faq/#3.5
NETSCAPE_FORMAT_HEADER_COOKIES = "# Netscape HTTP Cookie File."
# Infixes used for automatic recognition of parameters carrying anti-CSRF tokens
CSRF_TOKEN_PARAMETER_INFIXES = ("csrf", "xsrf", "token")
# Prefixes used in brute force search for web server document root
BRUTE_DOC_ROOT_PREFIXES = {
OS.LINUX: ("/var/www", "/usr/local/apache", "/usr/local/apache2", "/usr/local/www/apache22", "/usr/local/www/apache24", "/usr/local/httpd", "/var/www/nginx-default", "/srv/www", "/var/www/%TARGET%", "/var/www/vhosts/%TARGET%", "/var/www/virtual/%TARGET%", "/var/www/clients/vhosts/%TARGET%", "/var/www/clients/virtual/%TARGET%"),
OS.WINDOWS: ("/xampp", "/Program Files/xampp", "/wamp", "/Program Files/wampp", "/apache", "/Program Files/Apache Group/Apache", "/Program Files/Apache Group/Apache2", "/Program Files/Apache Group/Apache2.2", "/Program Files/Apache Group/Apache2.4", "/Inetpub/wwwroot", "/Inetpub/wwwroot/%TARGET%", "/Inetpub/vhosts/%TARGET%")
}
# Suffixes used in brute force search for web server document root
BRUTE_DOC_ROOT_SUFFIXES = ("", "html", "htdocs", "httpdocs", "php", "public", "src", "site", "build", "web", "www", "data", "sites/all", "www/build")
# String used for marking target name inside used brute force web server document root
BRUTE_DOC_ROOT_TARGET_MARK = "%TARGET%"
# Character used as a boundary in kb.chars (preferably less frequent letter)
KB_CHARS_BOUNDARY_CHAR = 'q'
# Letters of lower frequency used in kb.chars
KB_CHARS_LOW_FREQUENCY_ALPHABET = "zqxjkvbp"
# CSS style used in HTML dump format
HTML_DUMP_CSS_STYLE = """<style>
table{
margin:10;
background-color:#FFFFFF;
font-family:verdana;
font-size:12px;
align:center;
}
thead{
font-weight:bold;
background-color:#4F81BD;
color:#FFFFFF;
}
tr:nth-child(even) {
background-color: #D3DFEE
}
td{
font-size:12px;
}
th{
font-size:12px;
}
</style>"""
|
from fabric.operations import prompt
from loader import get_wrapper
from pios import run_local
import difflib, color
def print_diff(str1, str2):
diff = difflib.ndiff(str1.splitlines(1),
str2.splitlines(1))
for line in list(diff):
if line.startswith('+'):
with color.green():
print line[2:]
elif line.startswith('-'):
with color.red():
print line[2:]
def select_card():
os = get_wrapper()
prompt("\nplease remove your sd card and press enter")
with color.for_run():
str1 = run_local(os.list_devices())
prompt("\nplease insert your sd card and press enter")
with color.for_run():
str2 = run_local(os.list_devices())
print '+/-'
print_diff(str1, str2)
return prompt("\nwhich device do you want to use: ")
def select_image(msg="the image file you want to use: "):
with color.for_run():
run_local("ls ./images")
return prompt('\n'+msg)
def backup(card_device=None, file_name=None):
"""store a bootable image from a sd device to ./images (run with sudo), run cd ./images && watch -n 1 'ls -al' to see changes """
os = get_wrapper()
if not card_device : card_device = select_card()
if not file_name: file_name = select_image("giva a name for the image to store under ./images/[?]: ")
image_path = "./images/%s" % file_name
with color.yellow():
run_local(os.unmount(card_device))
with color.green():
run_local(os.dd(card_device, image_path))
def restore(card_device=None, file_name=None):
"""write a bootable image from ./images/ to sd device"""
os = get_wrapper()
if not card_device: card_device = select_card()
if not file_name: file_name = select_image("the image file you want to write to %s: " % card_device)
image_path = "./images/%s" % file_name
with color.yellow():
run_local(os.unmount(card_device))
run_local(os.mk_fat_32(card_device))
with color.green():
run_local(os.dd(image_path, card_device))
|
# File: rigidbody.py
import abc
import torch
from mechamodlearn import nn, utils
from mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet
class AbstractRigidBody:
@property
@abc.abstractmethod
def thetamask(self):
"""Returns theta mask for configuration q.
These should use utils.diffangles to compute differences
"""
@abc.abstractmethod
def mass_matrix(self, q):
"""Return mass matrix for configuration q"""
@abc.abstractmethod
def potential(self, q):
"""Return potential for configuration q"""
@abc.abstractmethod
def generalized_force(self, q, v, u):
"""Return generalized force for configuration q, velocity v, external torque u"""
def kinetic_energy(self, q, v):
mass_matrix = self.mass_matrix(q)
# TODO(jkg): Check if this works correctly for batched
kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)
return kenergy
def lagrangian(self, q, v):
""" Returns the Lagrangian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
lag = kenergy - pot
return lag
def hamiltonian(self, q, v):
""" Returns the Hamiltonian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
ham = kenergy + pot
return ham
def corriolisforce(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix times v
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
Mv = mass_matrix @ v.unsqueeze(2)
KE = 0.5 * v.unsqueeze(1) @ Mv
Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]
gMv = torch.stack([
torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]
for i in range(q.size(1))
], dim=1)
Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)
return Cv
def corriolis(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
qdim = q.size(1)
B = mass_matrix.size(0)
mass_matrix = mass_matrix.reshape(-1, qdim, qdim)
# TODO vectorize
rows = []
for i in range(qdim):
cols = []
for j in range(qdim):
qgrad = torch.autograd.grad(
torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]
cols.append(qgrad)
rows.append(torch.stack(cols, dim=1))
dMijk = torch.stack(rows, dim=1)
corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)
) @ v.reshape(B, 1, qdim, 1)).squeeze(3)
return corriolis
def gradpotential(self, q):
""" Returns the conservative forces acting on the system
"""
with torch.enable_grad():
pot = self.potential(q)
gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]
return gvec
def solve_euler_lagrange(self, q, v, u=None):
""" Computes `qddot` (generalized acceleration) by solving
the Euler-Lagrange equation (Eq 7 in the paper)
\qddot = M^-1 (F - Cv - G)
"""
with torch.enable_grad():
with utils.temp_require_grad((q, v)):
M = self.mass_matrix(q)
Cv = self.corriolisforce(q, v, M)
G = self.gradpotential(q)
F = torch.zeros_like(Cv)
if u is not None:
F = self.generalized_force(q, v, u)
# Solve M \qddot = F - Cv - G
qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)
return qddot
class LearnedRigidBody(AbstractRigidBody, torch.nn.Module):
def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,
potential=None, generalized_force=None, hidden_sizes=None):
"""
Arguments:
- `qdim`:
- `udim`: [int]
- `thetamask`: [torch.Tensor (1, qdim)] 1 if angle, 0 otherwise
- `mass_matrix`: [torch.nn.Module]
- `potential`: [torch.nn.Module]
- `generalized_force`: [torch.nn.Module]
- hidden_sizes: [list]
"""
self._qdim = qdim
self._udim = udim
self._thetamask = thetamask
super().__init__()
if mass_matrix is None:
mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)
self._mass_matrix = mass_matrix
if potential is None:
potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
self._potential = potential
if generalized_force is None:
generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)
self._generalized_force = generalized_force
def mass_matrix(self, q):
return self._mass_matrix(q)
def potential(self, q):
return self._potential(q)
def generalized_force(self, q, v, u):
return self._generalized_force(q, v, u)
@property
def thetamask(self):
return self._thetamask
def forward(self, q, v, u=None):
return self.solve_euler_lagrange(q, v, u)
|
"""
Validation utilities.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import eta.core.utils as etau
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
foc = fou.lazy_import("fiftyone.core.collections")
fov = fou.lazy_import("fiftyone.core.video")
def validate_image_sample(sample):
"""Validates that the sample's media is an image.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
Raises:
ValueError if the sample's media is not an image
"""
if sample.media_type != fom.IMAGE:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.IMAGE, sample.media_type, sample.filepath)
)
if isinstance(sample, fov.FrameView):
_validate_image(sample.filepath)
def validate_video_sample(sample):
"""Validates that the sample's media is a video.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
Raises:
ValueError if the sample's media is not a video
"""
if sample.media_type != fom.VIDEO:
raise ValueError(
"Expected media type '%s' but found '%s' for filepath '%s'"
% (fom.VIDEO, sample.media_type, sample.filepath)
)
def validate_collection(sample_collection):
"""Validates that the provided samples are a
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not a
:class:`fiftyone.core.collections.SampleCollection`
"""
if not isinstance(sample_collection, foc.SampleCollection):
raise ValueError(
"Expected samples to be a %s; found %s"
% (foc.SampleCollection, sample_collection.__class__)
)
def validate_image_collection(sample_collection):
"""Validates that the provided samples are an image
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not an image
:class:`fiftyone.core.collections.SampleCollection`
"""
validate_collection(sample_collection)
if sample_collection.media_type != fom.IMAGE:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.IMAGE, sample_collection.media_type)
)
if sample_collection._dataset._is_frames:
try:
filepath = sample_collection[:1].values("filepath")[0]
except:
return # empty
_validate_image(filepath)
def validate_video_collection(sample_collection):
"""Validates that the provided samples are a video
:class:`fiftyone.core.collections.SampleCollection`.
Args:
sample_collection: a sample collection
Raises:
ValueError: if ``samples`` is not a video
:class:`fiftyone.core.collections.SampleCollection`
"""
validate_collection(sample_collection)
if sample_collection.media_type != fom.VIDEO:
raise ValueError(
"Expected collection to have media type %s; found %s"
% (fom.VIDEO, sample_collection.media_type)
)
def validate_collection_label_fields(
sample_collection, field_names, allowed_label_types, same_type=False
):
"""Validates that the :class:`fiftyone.core.collections.SampleCollection`
has fields with the specified :class:`fiftyone.core.labels.Label` types.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
field_names: a field name or iterable of field names
allowed_label_types: a :class:`fiftyone.core.labels.Label` type or
iterable of allowed :class:`fiftyone.core.labels.Label` types
same_type (False): whether to enforce that all fields have same type.
This condition is enforced separately for sample- and frame-level
fields
Raises:
ValueError if the required conditions are not met
"""
validate_collection(sample_collection)
if etau.is_str(field_names):
field_names = [field_names]
if not etau.is_container(allowed_label_types):
allowed_label_types = [allowed_label_types]
if sample_collection.media_type == fom.VIDEO:
sample_fields, frame_fields = fou.split_frame_fields(field_names)
else:
sample_fields = field_names
frame_fields = []
if frame_fields:
_validate_fields(
sample_collection,
frame_fields,
allowed_label_types,
same_type,
frames=True,
)
if sample_fields:
_validate_fields(
sample_collection,
sample_fields,
allowed_label_types,
same_type,
)
def _validate_image(filepath):
actual_media_type = fom.get_media_type(filepath)
if actual_media_type != fom.IMAGE:
raise ValueError(
"The requested operation requires samples whose filepaths are "
"images, but we found a sample whose filepath '%s' has media type "
"'%s'.\n\nIf you are working with a frames view that was created "
"via `to_frames(..., sample_frames=False)`, then re-create the "
"view without `sample_frames=False` so that the necessary images "
"will be available." % (filepath, actual_media_type)
)
def _validate_fields(
sample_collection,
field_names,
allowed_label_types,
same_type,
frames=False,
):
if frames:
schema = sample_collection.get_frame_field_schema()
else:
schema = sample_collection.get_field_schema()
label_types = {}
for field_name in field_names:
if field_name not in schema:
ftype = "frame field" if frames else "sample field"
raise ValueError(
"%s has no %s '%s'"
% (sample_collection.__class__.__name__, ftype, field_name)
)
field = schema[field_name]
try:
label_type = field.document_type
except:
label_type = field
if label_type not in allowed_label_types:
ftype = "Frame field" if frames else "Sample field"
raise ValueError(
"%s '%s' is not a %s instance; found %s"
% (ftype, field_name, allowed_label_types, label_type)
)
label_types[field_name] = label_type
if same_type and len(set(label_types.values())) > 1:
ftype = "Frame fields" if frames else "Sample fields"
raise ValueError(
"%s %s must have the same type; found %s"
% (ftype, field_names, label_types)
)
def get_field(sample, field_name, allowed_types=None, allow_none=True):
"""Gets the given sample field and optionally validates its type and value.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
field_name: the name of the field to get
allowed_types (None): an optional iterable of
:class:`fiftyone.core.labels.Label` types to enforce that the field
value has
allow_none (True): whether to allow the field to be None
Returns:
the field value
Raises:
ValueError if the field does not exist or does not meet the specified
criteria
"""
try:
value = sample[field_name]
except KeyError:
raise ValueError(
"Sample '%s' has no field '%s'" % (sample.id, field_name)
)
if not allow_none and value is None:
raise ValueError(
"Sample '%s' field '%s' is None" % (sample.id, field_name)
)
if allowed_types is not None:
field_type = type(value)
if field_type not in allowed_types:
raise ValueError(
"Sample '%s' field '%s' is not a %s instance; found %s"
% (sample.id, field_name, allowed_types, field_type)
)
return value
def get_fields(
sample, field_names, allowed_types=None, same_type=False, allow_none=True
):
"""Gets the given sample fields and optionally validates their types and
values.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
field_names: an iterable of field names to get
allowed_types (None): an optional iterable of
:class:`fiftyone.core.labels.Label` types to enforce that the
field values have
same_type (False): whether to enforce that all fields have same type
allow_none (True): whether to allow the fields to be None
Returns:
a tuple of field values
Raises:
ValueError if a field does not exist or does not meet the specified
criteria
"""
label_types = {}
values = []
for field_name in field_names:
value = get_field(
sample,
field_name,
allowed_types=allowed_types,
allow_none=allow_none,
)
if same_type:
label_types[field_name] = type(value)
values.append(value)
if same_type and len(set(label_types.values())) > 1:
raise ValueError(
"Sample '%s' fields %s must have the same type; found %s"
% (sample.id, field_names, label_types)
)
return tuple(values)
|
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
The default path names are returned as str. If you supply bytes as
input, all return values will be in bytes. Ex:
>>> tempfile.mkstemp()
(4, '/tmp/tmptpu9nin8')
>>> tempfile.mkdtemp(suffix=b'')
b'/tmp/tmppbi8f0hy'
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
# Imports.
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
try:
import shutil as _shutil
_rmtree = _shutil.rmtree
except ImportError:
import sys as _sys
import stat as _stat
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if _os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = _os.listdir(path)
except OSError:
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
mode = _os.lstat(fullname).st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
_os.unlink(fullname)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = _os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(_os.listdir, path, _sys.exc_info())
for name in names:
fullname = _os.path.join(path, name)
try:
orig_st = _os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if _stat.S_ISDIR(mode):
try:
dirfd = _os.open(name, _os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(_os.open, fullname, _sys.exc_info())
else:
try:
if _os.path.samestat(orig_st, _os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
_os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(_os.rmdir, fullname, _sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(_os.path.islink, fullname, _sys.exc_info())
finally:
_os.close(dirfd)
else:
try:
_os.unlink(name, dir_fd=topfd)
except OSError:
onerror(_os.unlink, fullname, _sys.exc_info())
_use_fd_functions = ({_os.open, _os.stat, _os.unlink, _os.rmdir} <=
_os.supports_dir_fd and
_os.listdir in _os.supports_fd and
_os.stat in _os.supports_follow_symlinks)
def _rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = _os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = _os.lstat(path)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
fd = _os.open(path, _os.O_RDONLY)
except Exception:
onerror(_os.lstat, path, _sys.exc_info())
return
try:
if _os.path.samestat(orig_st, _os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
_os.rmdir(path)
except OSError:
onerror(_os.rmdir, path, _sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(_os.path.islink, path, _sys.exc_info())
finally:
_os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
"""Look at the type of all args and divine their implied return type."""
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str # tempfile APIs return a str by default.
return return_type
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is eight characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
_os.path.expandvars(r'%SYSTEMROOT%\Temp'),
r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
break # no point trying more names in this directory
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""The default prefix for temporary directories."""
return template
def gettempprefixb():
"""The default prefix for temporary directories as bytes."""
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
"""A bytes version of tempfile.gettempdir()."""
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is not None, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is not None, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is not None, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
same type. If they are bytes, the returned name will be bytes; str
otherwise.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if (_os.name == 'nt' and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are similar to mkstemp, except that the 'text' argument is
not accepted, and suffix=None, prefix=None and bytes file names are not
supported.
THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
refer to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
_os.unlink(name)
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
# Is the O_TMPFILE flag available and does it work?
# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
# IsADirectoryError exception
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
# Linux kernel older than 3.11 ignores the O_TMPFILE flag:
# O_TMPFILE is read as O_DIRECTORY. Trying to open a directory
# with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a
# directory cannot be open to write. Set flag to False to not
# try again.
_O_TMPFILE_WORKS = False
except OSError:
# The filesystem of the directory does not support O_TMPFILE.
# For example, OSError(95, 'Operation not supported').
#
# On Linux kernel older than 3.11, trying to open a regular
# file (or a symbolic link to a regular file) with O_TMPFILE
# fails with NotADirectoryError, because O_TMPFILE is read as
# O_DIRECTORY.
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
# Fallback to _mkstemp_inner().
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_rmtree(self.name)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.contrib import admin
from .models import IndivRecord, BesteIndivRecords
class IndivRecordAdmin(admin.ModelAdmin):
""" Admin configuratie voor Records.IndivRecord klasse """
search_fields = ('naam', 'plaats', 'score', 'volg_nr')
# filter mogelijkheid
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse',
'is_european_record', 'is_world_record')
autocomplete_fields = ('sporter',)
class BesteIndivRecordsAdmin(admin.ModelAdmin):
# filter mogelijkheid
list_filter = ('discipline', 'soort_record', 'geslacht', 'leeftijdscategorie', 'materiaalklasse')
admin.site.register(IndivRecord, IndivRecordAdmin)
admin.site.register(BesteIndivRecords, BesteIndivRecordsAdmin)
# end of file
|
import re
class Key(object):
"Key used in keycards and locks"
pass
class KeyCard(object):
"Keycard used to open a lock"
def __init__(self, first_key, second_key):
"""
Constructs a KeyCard with the given keys
Args:
first_key: in the keycard to be created
second_key: in the keycard to be created
Raises:
ValueError if any of the keys are not of type Key
"""
if not isinstance(first_key, Key):
raise ValueError("First key is not of Key type")
if not isinstance(second_key, Key):
raise ValueError("Second key is not of Key type")
self._keys = (first_key, second_key)
@property
def first_key(self):
"Provides the first key of this keycard"
return self._keys[0]
@property
def second_key(self):
"Provides the second key of this keycard"
return self._keys[1]
class Lock(object):
"Lock on a room door"
def __init__(self, first_key, second_key):
"""
Constructs a Lock with the given keys
Args:
first_key: in the lock to be created
second_key: in the lock to be created
Raises:
ValueError if any of the keys are not of type Key
"""
if not isinstance(first_key, Key):
raise ValueError("First key is not of Key type")
if not isinstance(second_key, Key):
raise ValueError("Second key is not of Key type")
self._keys = (first_key, second_key)
def can_be_unlocked(self, keycard):
"""
Checks if this lock can be unlocked with the given keycard
Return:
True if the lock can be unlocked; False otherwise
Raises:
ValueError if keycard is not of KeyCard Type
"""
if not isinstance(keycard, KeyCard):
raise ValueError("keycard is not of KeyCard type")
return self._keys[0] == keycard.first_key and \
self._keys[1] == keycard.second_key
class Room(object):
"Room in a hotel"
def __init__(self, room_number, lock):
"""
Constructs a Room with given number and lock
Args:
room_number: of this room. This has be to greater than 0.
lock: of this room.
Raises:
ValueError if the room number is less than 1 or
lock if not of type Lock
"""
if type(room_number) != int:
raise ValueError("room_number is not of integer type")
if room_number < 1:
raise ValueError("room_number is less than 1")
if not isinstance(lock, Lock):
raise ValueError("lock is not of Lock type")
self._number = room_number
self._lock = lock
@property
def last_key(self):
return self._last_key
@last_key.setter
def last_key(self, key):
self._last_key = key
@property
def keys(self):
k = self.last_key
self.last_key = Key()
return (k, self.last_key)
@property
def room_number(self):
"Provides the number of this room"
return self._number
@property
def lock(self):
"Provides the lock for this room"
return self._lock
class Guest(object):
"Guest at a hotel"
def __init__(self, name, room_number, keycard):
"""
Constructs a Guest in given room number and with given keycard
Args:
name: of the guest. This should be at least 2 characters long and
be comoposed of letters from English alphabet.
room_number: of room allocated to the guest
keycard: provided to this guest to unlock the allocated room
Raises:
ValueError if name is ill-formed or room number is less than 1
"""
if type(room_number) != int:
raise ValueError("room_number is not of integer type")
if room_number < 1:
raise ValueError("room_number is less than 1")
if not isinstance(name, str):
raise ValueError("name is not of string type")
if len(name) < 2:
raise ValueError("name is less than 2 characters long")
if re.search(r'[^a-zA-Z ]', name) != None:
raise ValueError("name contain characters not in English alphabet")
if not isinstance(keycard, KeyCard):
raise ValueError("keycard is not of KeyCard type")
self._guest_name = name
self._room_number = room_number
self._keycard = keycard
@property
def guest_name(self):
"Provides the name of this guest"
return self._guest_name
@property
def keycard(self):
"Provides the keycard of this guest"
return self._keycard
@property
def room_number(self):
"Provides the number of the room occupied by this guest"
return self._room_number
def is_checkedin(self, hotel):
"""
Checks if this guest is checked into this hotel
Returns:
True if this guest is checked in at the given hotel; False otherwise
Raises:
ValueError if hotel is not of Hotel type
"""
if not isinstance(hotel, Hotel):
raise ValueError("hotel is not of Hotel type")
return hotel.is_checkedin(self._guest_name)
class FullCapacityError(RuntimeError):
pass
class Hotel(object):
"Hotel"
def __init__(self, N):
"Constructs a Hotel with N rooms"
if type(N) != int:
raise ValueError("N is not of int type")
if N < 10 or N > 1000:
raise ValueError("N is not between 10 and 1000, both inclusive")
self._name2guest = {}
self._name2room = {}
self._capacity = N
self._empty_rooms = []
for i in range(1, N + 1):
k = Key()
r = Room(i, Lock(k, k))
r.last_key = k
self._empty_rooms.append(r)
def checkin(self, guest_name):
"""
Checks the guest into the hotel by allocating a room
Return:
the corresponding Guest
Raises:
ValueError if guest name is not of str type or
is already checked in at this hotel
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name in self._name2guest:
raise ValueError(
"guest named {0} is already checked in".format(guest_name))
if len(self._name2guest) >= self._capacity:
raise FullCapacityError()
room = self._empty_rooms.pop()
last_key, new_key = room.keys
guest = Guest(guest_name, room.room_number, KeyCard(last_key, new_key))
self._name2guest[guest_name] = guest
self._name2room[guest_name] = room
return guest
def is_checkedin(self, guest_name):
"""
Checks if the guest is a guest at this Hotel
Return:
True if the guest is checked in at this Hotel; False otherwise
Raises:
ValueError if guest name is not of str type
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
return guest_name in self._name2guest
def checkout(self, guest_name):
"""
Checks out the guest from the hotel
Raises:
ValueError if guest name is not of str type
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name in self._name2guest:
del self._name2guest[guest_name]
room = self._name2room.pop(guest_name)
self._empty_rooms.append(room)
def room_of(self, guest_name):
"""
Provides the room for the guest
Return:
the corresponding Room
Raises:
ValueError if named guest is not a string or
is not checked in at this hotel
"""
if not isinstance(guest_name, str):
raise ValueError("guest name is not of string type")
if guest_name not in self._name2room:
raise ValueError(
"guest {0} is not checked in at this hotel".format(guest_name))
return self._name2room[guest_name]
|
""" Make comparing version strings super simple.
If you want codes using your package to be able to verify its version as easily as::
# user_code.py
import my_package
assert my_package.__version__ >= "1.1"
Just customize your package as follows::
# my_package/__init__.py
import verstr
__version__ = verstr.verstr("1.2.4")
"""
import collections
import packaging.version
__all__ = ['verstr']
def verstr(str_version, mode="str"):
""" returns a comparable version object.
verstr(str_version)
verstr(str_version, mode)
Parameters
----------
str_version: str
A string that follows PEP 440, the standard version scheme for Python packages
mode: str
A string to select the type of the returned value.
Returns
-------
VersionCompareMixin
The comparable version object.
Its type depends on the `mode` argument:
'str' -> VersionString
'userstr' -> VersionUserString
'interface' -> VersionInterface
"""
modes = dict(
str=VersionString,
userstr=VersionUserString,
interface=VersionInterface
)
try:
cls = modes[mode]
except KeyError:
raise ValueError(
f"'mode' argument must be in {list(modes)}, "
f"get {mode!r} instead."
)
return cls(str_version)
def to_version(str_version):
return packaging.version.Version(str(str_version))
class VersionCompareMixin:
def _comp_op(str_op):
def op(self, other):
return getattr(to_version(self), str_op)(to_version(other))
op.__name__ = str_op
return op
__eq__ = _comp_op("__eq__")
__lt__ = _comp_op("__lt__")
__le__ = _comp_op("__le__")
__gt__ = _comp_op("__gt__")
__ge__ = _comp_op("__ge__")
del _comp_op
class VersionString(VersionCompareMixin, str):
def __new__(cls, object):
return super().__new__(cls, str(to_version(object)))
class VersionUserString(VersionCompareMixin, collections.UserString):
def __init__(self, data):
self.data = data
@property
def data(self):
return self.__dict__['data']
@data.setter
def data(self, data):
self.__dict__['data'] = str(to_version(data))
class VersionInterface(VersionCompareMixin):
def __init__(self, version):
self._version = to_version(version)
def __repr__(self):
return str(self._version)
try:
from . import _version
__version__ = verstr(_version.version)
except ImportError:
__version__ = None
|
# -*- coding: utf-8 -*-
"""
The BaseCase class is the main gateway for using The SeleniumBase Framework.
It inherits Python's unittest.TestCase class, and runs with Pytest or Nose.
All tests using BaseCase automatically launch WebDriver browsers for tests.
Usage:
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_anything(self):
# Write your code here. Example:
self.open("https://github.com/")
self.update_text("input.header-search-input", "SeleniumBase\n")
self.click('a[href="/seleniumbase/SeleniumBase"]')
self.assert_element("div.repository-content")
....
SeleniumBase methods expand and improve on existing WebDriver commands.
Improvements include making WebDriver more robust, reliable, and flexible.
Page elements are given enough time to load before WebDriver acts on them.
Code becomes greatly simplified and easier to maintain.
"""
import codecs
import json
import logging
import math
import os
import re
import sys
import time
import urllib3
import unittest
import uuid
from selenium.common.exceptions import (StaleElementReferenceException,
MoveTargetOutOfBoundsException,
WebDriverException)
from selenium.common import exceptions as selenium_exceptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.remote_connection import LOGGER
from selenium.webdriver.support.ui import Select
from seleniumbase import config as sb_config
from seleniumbase.common import decorators
from seleniumbase.config import settings
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.core import download_helper
from seleniumbase.core import log_helper
from seleniumbase.core import settings_parser
from seleniumbase.core import tour_helper
from seleniumbase.core import visual_helper
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import js_utils
from seleniumbase.fixtures import page_actions
from seleniumbase.fixtures import page_utils
from seleniumbase.fixtures import shared_utils
from seleniumbase.fixtures import xpath_to_css
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
urllib3.disable_warnings()
LOGGER.setLevel(logging.WARNING)
ECI_Exception = selenium_exceptions.ElementClickInterceptedException
ENI_Exception = selenium_exceptions.ElementNotInteractableException
class BaseCase(unittest.TestCase):
'''
A base test case that wraps methods for enhanced usage.
You can also add your own methods here.
'''
def __init__(self, *args, **kwargs):
super(BaseCase, self).__init__(*args, **kwargs)
self.driver = None
self.environment = None
self.env = None # Add a shortened version of self.environment
self.__last_url_of_delayed_assert = "data:,"
self.__last_page_load_url = "data:,"
self.__last_page_screenshot = None
self.__last_page_screenshot_png = None
self.__last_page_url = None
self.__last_page_source = None
self.__added_pytest_html_extra = None
self.__delayed_assert_count = 0
self.__delayed_assert_failures = []
self.__device_width = None
self.__device_height = None
self.__device_pixel_ratio = None
# Requires self._* instead of self.__* for external class use
self._html_report_extra = [] # (Used by pytest_plugin.py)
self._default_driver = None
self._drivers_list = []
self._tour_steps = {}
def open(self, url):
""" Navigates the current browser window to the specified page. """
self.__last_page_load_url = None
if url.startswith("://"):
# Convert URLs such as "://google.com" into "https://google.com"
url = "https" + url
self.driver.get(url)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def open_url(self, url):
""" Same as open() - Original saved for backwards compatibility. """
self.open(url)
def get(self, url):
""" Same as open() - WebDriver uses this method name. """
self.open(url)
def visit(self, url):
""" Same as open() - Some JS frameworks use this method name. """
self.open(url)
def click(self, selector, by=By.CSS_SELECTOR, timeout=None, delay=0):
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if page_utils.is_link_text_selector(selector) or by == By.LINK_TEXT:
if not self.is_link_text_visible(selector):
# Handle a special case of links hidden in dropdowns
self.click_link_text(selector, timeout=timeout)
return
if page_utils.is_partial_link_text_selector(selector) or (
by == By.PARTIAL_LINK_TEXT):
if not self.is_partial_link_text_visible(selector):
# Handle a special case of partial links hidden in dropdowns
self.click_partial_link_text(selector, timeout=timeout)
return
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
if delay and delay > 0:
time.sleep(delay)
try:
if self.browser == 'ie' and by == By.LINK_TEXT:
# An issue with clicking Link Text on IE means using jquery
self.__jquery_click(selector, by=by)
elif self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
# Normal click
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout)
if self.browser == "safari":
if by == By.LINK_TEXT:
self.__jquery_click(selector, by=by)
else:
self.__js_click(selector, by=by)
else:
element.click()
except (WebDriverException, MoveTargetOutOfBoundsException):
self.wait_for_ready_state_complete()
try:
self.__js_click(selector, by=by)
except Exception:
try:
self.__jquery_click(selector, by=by)
except Exception:
# One more attempt to click on the element
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout)
element.click()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def slow_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Similar to click(), but pauses for a brief moment before clicking.
When used in combination with setting the user-agent, you can often
bypass bot-detection by tricking websites into thinking that you're
not a bot. (Useful on websites that block web automation tools.)
To set the user-agent, use: ``--agent=AGENT``.
Here's an example message from GitHub's bot-blocker:
``You have triggered an abuse detection mechanism...`` """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if not self.demo_mode:
self.click(selector, by=by, timeout=timeout, delay=1.05)
else:
# Demo Mode already includes a small delay
self.click(selector, by=by, timeout=timeout, delay=0.25)
def double_click(self, selector, by=By.CSS_SELECTOR, timeout=None):
from selenium.webdriver.common.action_chains import ActionChains
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
try:
actions = ActionChains(self.driver)
actions.move_to_element(element)
actions.double_click(element)
actions.perform()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=timeout)
actions = ActionChains(self.driver)
actions.move_to_element(element)
actions.double_click(element)
actions.perform()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_chain(self, selectors_list, by=By.CSS_SELECTOR,
timeout=None, spacing=0):
""" This method clicks on a list of elements in succession.
'spacing' is the amount of time to wait between clicks. (sec) """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for selector in selectors_list:
self.click(selector, by=by, timeout=timeout)
if spacing > 0:
time.sleep(spacing)
def type(self, selector, text, by=By.CSS_SELECTOR,
timeout=None, retry=False):
""" The short version of update_text(), which clears existing text
and adds new text into the text field.
We want to keep the other version for backward compatibility. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def input(self, selector, text, by=By.CSS_SELECTOR,
timeout=None, retry=False):
""" Same as update_text(). """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.update_text(selector, text, by=by, timeout=timeout, retry=retry)
def update_text(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=None, retry=False):
""" This method updates an element's text field with new text.
Has multiple parts:
* Waits for the element to be visible.
* Waits for the element to be interactive.
* Clears the text field.
* Types in the new text.
* Hits Enter/Submit (if the text ends in "\n").
@Params
selector - the selector of the text field
new_value - the new value to type into the text field
by - the type of selector to search by (Default: CSS Selector)
timeout - how long to wait for the selector to be visible
retry - if True, use JS if the Selenium text update fails
"""
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
try:
element.clear()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.06)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
try:
element.clear()
except Exception:
pass # Clearing the text field first isn't critical
except Exception:
pass # Clearing the text field first isn't critical
self.__demo_mode_pause_if_active(tiny=True)
pre_action_url = self.driver.current_url
if type(new_value) is int or type(new_value) is float:
new_value = str(new_value)
try:
if not new_value.endswith('\n'):
element.send_keys(new_value)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
else:
new_value = new_value[:-1]
element.send_keys(new_value)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.06)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
element.clear()
if not new_value.endswith('\n'):
element.send_keys(new_value)
else:
new_value = new_value[:-1]
element.send_keys(new_value)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except Exception:
exc_message = self.__get_improved_exception_message()
raise Exception(exc_message)
if (retry and element.get_attribute('value') != new_value and (
not new_value.endswith('\n'))):
logging.debug('update_text() is falling back to JavaScript!')
self.set_value(selector, new_value, by=by)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def add_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
""" The more-reliable version of driver.send_keys()
Similar to update_text(), but won't clear the text field first. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
pre_action_url = self.driver.current_url
try:
if not text.endswith('\n'):
element.send_keys(text)
else:
text = text[:-1]
element.send_keys(text)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.06)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
if not text.endswith('\n'):
element.send_keys(text)
else:
text = text[:-1]
element.send_keys(text)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
except Exception:
exc_message = self.__get_improved_exception_message()
raise Exception(exc_message)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def send_keys(self, selector, text, by=By.CSS_SELECTOR, timeout=None):
""" Same as add_text() """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.add_text(selector, text, by=by, timeout=timeout)
def submit(self, selector, by=By.CSS_SELECTOR):
""" Alternative to self.driver.find_element_by_*(SELECTOR).submit() """
if page_utils.is_xpath_selector(selector):
by = By.XPATH
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
element.submit()
self.__demo_mode_pause_if_active()
def refresh_page(self):
self.__last_page_load_url = None
self.driver.refresh()
self.wait_for_ready_state_complete()
def refresh(self):
""" The shorter version of self.refresh_page() """
self.refresh_page()
def get_current_url(self):
current_url = self.driver.current_url
if "%" in current_url and sys.version_info[0] >= 3:
try:
from urllib.parse import unquote
current_url = unquote(current_url, errors='strict')
except Exception:
pass
return current_url
def get_page_source(self):
self.wait_for_ready_state_complete()
return self.driver.page_source
def get_page_title(self):
self.wait_for_ready_state_complete()
self.wait_for_element_present("title", timeout=settings.SMALL_TIMEOUT)
time.sleep(0.03)
return self.driver.title
def get_title(self):
""" The shorter version of self.get_page_title() """
return self.get_page_title()
def go_back(self):
self.__last_page_load_url = None
self.driver.back()
if self.browser == "safari":
self.driver.refresh()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def go_forward(self):
self.__last_page_load_url = None
self.driver.forward()
if self.browser == "safari":
self.driver.refresh()
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def is_element_present(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_element_present(self.driver, selector, by)
def is_element_visible(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_element_visible(self.driver, selector, by)
def is_text_visible(self, text, selector="html", by=By.CSS_SELECTOR):
self.wait_for_ready_state_complete()
time.sleep(0.01)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.is_text_visible(self.driver, text, selector, by)
def is_link_text_visible(self, link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(self.driver, link_text,
by=By.LINK_TEXT)
def is_partial_link_text_visible(self, partial_link_text):
self.wait_for_ready_state_complete()
time.sleep(0.01)
return page_actions.is_element_visible(self.driver, partial_link_text,
by=By.PARTIAL_LINK_TEXT)
def is_link_text_present(self, link_text):
""" Returns True if the link text appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection. """
soup = self.get_beautiful_soup()
html_links = soup.find_all('a')
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
return True
return False
def is_partial_link_text_present(self, link_text):
""" Returns True if the partial link appears in the HTML of the page.
The element doesn't need to be visible,
such as elements hidden inside a dropdown selection. """
soup = self.get_beautiful_soup()
html_links = soup.find_all('a')
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
return True
return False
def get_link_attribute(self, link_text, attribute, hard_fail=True):
""" Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned). """
soup = self.get_beautiful_soup()
html_links = soup.find_all('a')
for html_link in html_links:
if html_link.text.strip() == link_text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
'Unable to find attribute {%s} from link text {%s}!'
% (attribute, link_text))
else:
return None
if hard_fail:
raise Exception("Link text {%s} was not found!" % link_text)
else:
return None
def get_link_text_attribute(self, link_text, attribute, hard_fail=True):
""" Same as self.get_link_attribute()
Finds a link by link text and then returns the attribute's value.
If the link text or attribute cannot be found, an exception will
get raised if hard_fail is True (otherwise None is returned). """
return self.get_link_attribute(link_text, attribute, hard_fail)
def get_partial_link_text_attribute(self, link_text, attribute,
hard_fail=True):
""" Finds a link by partial link text and then returns the attribute's
value. If the partial link text or attribute cannot be found, an
exception will get raised if hard_fail is True (otherwise None
is returned). """
soup = self.get_beautiful_soup()
html_links = soup.find_all('a')
for html_link in html_links:
if link_text.strip() in html_link.text.strip():
if html_link.has_attr(attribute):
attribute_value = html_link.get(attribute)
return attribute_value
if hard_fail:
raise Exception(
'Unable to find attribute {%s} from '
'partial link text {%s}!'
% (attribute, link_text))
else:
return None
if hard_fail:
raise Exception(
"Partial Link text {%s} was not found!" % link_text)
else:
return None
def click_link_text(self, link_text, timeout=None):
""" This method clicks link text on a page """
# If using phantomjs, might need to extract and open the link directly
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == 'phantomjs':
if self.is_link_text_visible(link_text):
element = self.wait_for_link_text_visible(
link_text, timeout=timeout)
element.click()
return
self.open(self.__get_href_from_link_text(link_text))
return
if self.browser == "safari":
self.__jquery_click(link_text, by=By.LINK_TEXT)
return
if not self.is_link_text_present(link_text):
self.wait_for_link_text_present(link_text, timeout=timeout)
pre_action_url = self.get_current_url()
try:
element = self.wait_for_link_text_visible(
link_text, timeout=0.2)
self.__demo_mode_highlight_if_active(link_text, by=By.LINK_TEXT)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_link_text_visible(
link_text, timeout=timeout)
element.click()
except Exception:
found_css = False
text_id = self.get_link_attribute(link_text, "id", False)
if text_id:
link_css = '[id="%s"]' % link_text
found_css = True
if not found_css:
href = self.__get_href_from_link_text(link_text, False)
if href:
if href.startswith('/') or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_link_attribute(link_text, "ng-click", False)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_link_attribute(link_text, "onclick", False)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_link_text(
link_text, link_css)
if not success:
element = self.wait_for_link_text_visible(
link_text, timeout=settings.MINI_TIMEOUT)
element.click()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def click_link(self, link_text, timeout=None):
""" Same as self.click_link_text() """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.click_link_text(link_text, timeout=timeout)
def click_partial_link_text(self, partial_link_text, timeout=None):
""" This method clicks the partial link text on a page. """
# If using phantomjs, might need to extract and open the link directly
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == 'phantomjs':
if self.is_partial_link_text_visible(partial_link_text):
element = self.wait_for_partial_link_text(partial_link_text)
element.click()
return
soup = self.get_beautiful_soup()
html_links = soup.fetch('a')
for html_link in html_links:
if partial_link_text in html_link.text:
for html_attribute in html_link.attrs:
if html_attribute[0] == 'href':
href = html_attribute[1]
if href.startswith('//'):
link = "http:" + href
elif href.startswith('/'):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
self.open(link)
return
raise Exception(
'Could not parse link from partial link_text '
'{%s}' % partial_link_text)
raise Exception(
"Partial link text {%s} was not found!" % partial_link_text)
if not self.is_partial_link_text_present(partial_link_text):
self.wait_for_partial_link_text_present(
partial_link_text, timeout=timeout)
pre_action_url = self.get_current_url()
try:
element = self.wait_for_partial_link_text(
partial_link_text, timeout=0.2)
self.__demo_mode_highlight_if_active(
partial_link_text, by=By.LINK_TEXT)
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
element.click()
except Exception:
found_css = False
text_id = self.get_partial_link_text_attribute(
partial_link_text, "id", False)
if text_id:
link_css = '[id="%s"]' % partial_link_text
found_css = True
if not found_css:
href = self.__get_href_from_partial_link_text(
partial_link_text, False)
if href:
if href.startswith('/') or page_utils.is_valid_url(href):
link_css = '[href="%s"]' % href
found_css = True
if not found_css:
ngclick = self.get_partial_link_text_attribute(
partial_link_text, "ng-click", False)
if ngclick:
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css:
onclick = self.get_partial_link_text_attribute(
partial_link_text, "onclick", False)
if onclick:
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css:
if self.is_element_visible(link_css):
self.click(link_css)
success = True
else:
# The link text might be hidden under a dropdown menu
success = self.__click_dropdown_partial_link_text(
partial_link_text, link_css)
if not success:
element = self.wait_for_link_text_visible(
partial_link_text, timeout=settings.MINI_TIMEOUT)
element.click()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def get_text(self, selector, by=By.CSS_SELECTOR, timeout=None):
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout)
try:
element_text = element.text
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.06)
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout)
element_text = element.text
return element_text
def get_attribute(self, selector, attribute, by=By.CSS_SELECTOR,
timeout=None, hard_fail=True):
""" This method uses JavaScript to get the value of an attribute. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.01)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout)
try:
attribute_value = element.get_attribute(attribute)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.06)
element = page_actions.wait_for_element_present(
self.driver, selector, by, timeout)
attribute_value = element.get_attribute(attribute)
if attribute_value is not None:
return attribute_value
else:
if hard_fail:
raise Exception("Element {%s} has no attribute {%s}!" % (
selector, attribute))
else:
return None
def set_attribute(self, selector, attribute, value, by=By.CSS_SELECTOR,
timeout=None):
""" This method uses JavaScript to set/update an attribute.
Only the first matching selector from querySelector() is used. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""document.querySelector('%s').setAttribute('%s','%s');"""
% (css_selector, attribute, value))
self.execute_script(script)
def set_attributes(self, selector, attribute, value, by=By.CSS_SELECTOR):
""" This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attributes("a", "href", "https://google.com") """
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
value = re.escape(value)
value = self.__escape_quotes_if_needed(value)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].setAttribute('%s','%s');}"""
% (css_selector, attribute, value))
try:
self.execute_script(script)
except Exception:
pass
def set_attribute_all(self, selector, attribute, value,
by=By.CSS_SELECTOR):
""" Same as set_attributes(), but using querySelectorAll naming scheme.
This method uses JavaScript to set/update a common attribute.
All matching selectors from querySelectorAll() are used.
Example => (Make all links on a website redirect to Google):
self.set_attribute_all("a", "href", "https://google.com") """
self.set_attributes(selector, attribute, value, by=by)
def remove_attribute(self, selector, attribute, by=By.CSS_SELECTOR,
timeout=None):
""" This method uses JavaScript to remove an attribute.
Only the first matching selector from querySelector() is used. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_visible(selector, by=by):
try:
self.scroll_to(selector, by=by, timeout=timeout)
except Exception:
pass
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""document.querySelector('%s').removeAttribute('%s');"""
% (css_selector, attribute))
self.execute_script(script)
def remove_attributes(self, selector, attribute, by=By.CSS_SELECTOR):
""" This method uses JavaScript to remove a common attribute.
All matching selectors from querySelectorAll() are used. """
selector, by = self.__recalculate_selector(selector, by)
attribute = re.escape(attribute)
attribute = self.__escape_quotes_if_needed(attribute)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].removeAttribute('%s');}"""
% (css_selector, attribute))
try:
self.execute_script(script)
except Exception:
pass
def get_property_value(self, selector, property, by=By.CSS_SELECTOR,
timeout=None):
""" Returns the property value of a page element's computed style.
Example:
opacity = self.get_property_value("html body a", "opacity")
self.assertTrue(float(opacity) > 0, "Element not visible!") """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
page_actions.wait_for_element_present(
self.driver, selector, by, timeout)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!" % (
selector, by))
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = ("""var $elm = document.querySelector('%s');
$val = window.getComputedStyle($elm).getPropertyValue('%s');
return $val;"""
% (selector, property))
value = self.execute_script(script)
if value is not None:
return value
else:
return "" # Return an empty string if the property doesn't exist
def get_image_url(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Extracts the URL from an image element on the page. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.get_attribute(selector,
attribute='src', by=by, timeout=timeout)
def find_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
""" Returns a list of matching WebElements.
Elements could be either hidden or visible on the page.
If "limit" is set and > 0, will only return that many elements. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
elements = self.driver.find_elements(by=by, value=selector)
if limit and limit > 0 and len(elements) > limit:
elements = elements[:limit]
return elements
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
""" Returns a list of matching WebElements that are visible.
If "limit" is set and > 0, will only return that many elements. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_ready_state_complete()
time.sleep(0.05)
v_elems = page_actions.find_visible_elements(self.driver, selector, by)
if limit and limit > 0 and len(v_elems) > limit:
v_elems = v_elems[:limit]
return v_elems
def click_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0):
""" Finds all matching page elements and clicks visible ones in order.
If a click reloads or opens a new page, the clicking will stop.
If no matching elements appear, an Exception will be raised.
If "limit" is set and > 0, will only click that many elements.
Also clicks elements that become visible from previous clicks.
Works best for actions such as clicking all checkboxes on a page.
Example: self.click_visible_elements('input[type="checkbox"]') """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
elements = self.find_elements(selector, by=by)
if self.browser == "safari":
if not limit:
limit = 0
num_elements = len(elements)
if num_elements == 0:
raise Exception(
"No matching elements found for selector {%s}!" % selector)
elif num_elements < limit or limit == 0:
limit = num_elements
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
last_css_chunk = css_selector.split(' ')[-1]
if ":" in last_css_chunk:
self.__js_click_all(css_selector)
self.wait_for_ready_state_complete()
return
else:
for i in range(1, limit+1):
new_selector = css_selector + ":nth-of-type(%s)" % str(i)
if self.is_element_visible(new_selector):
self.__js_click(new_selector)
self.wait_for_ready_state_complete()
return
click_count = 0
for element in elements:
if limit and limit > 0 and click_count >= limit:
return
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except ECI_Exception:
continue # ElementClickInterceptedException (Overlay likely)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.03)
try:
if element.is_displayed():
self.__scroll_to_element(element)
element.click()
click_count += 1
self.wait_for_ready_state_complete()
except (StaleElementReferenceException, ENI_Exception):
return # Probably on new page / Elements are all stale
def click_nth_visible_element(self, selector, number, by=By.CSS_SELECTOR):
""" Finds all matching page elements and clicks the nth visible one.
Example: self.click_nth_visible_element('[type="checkbox"]', 5)
(Clicks the 5th visible checkbox on the page.) """
elements = self.find_visible_elements(selector, by=by)
if len(elements) < number:
raise Exception("Not enough matching {%s} elements of type {%s} to"
" click number %s!" % (selector, by, number))
number = number - 1
if number < 0:
number = 0
element = elements[number]
self.wait_for_ready_state_complete()
try:
self.__scroll_to_element(element)
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
self.__scroll_to_element(element)
element.click()
def click_if_visible(self, selector, by=By.CSS_SELECTOR):
""" If the page selector exists and is visible, clicks on the element.
This method only clicks on the first matching element found.
(Use click_visible_elements() to click all matching elements.) """
self.wait_for_ready_state_complete()
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
def is_checked(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Determines if a checkbox or a radio button element is checked.
Returns True if the element is checked.
Returns False if the element is not checked.
If the element is not present on the page, raises an exception.
If the element is not a checkbox or radio, raises an exception. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
kind = self.get_attribute(selector, "type", by=by, timeout=timeout)
if kind != "checkbox" and kind != "radio":
raise Exception("Expecting a checkbox or a radio button element!")
is_checked = self.get_attribute(
selector, "checked", by=by, timeout=timeout, hard_fail=False)
if is_checked:
return True
else: # (NoneType)
return False
def is_selected(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as is_checked() """
return self.is_checked(selector, by=by, timeout=timeout)
def check_if_unchecked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox or radio button is not checked, will check it. """
selector, by = self.__recalculate_selector(selector, by)
if not self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.js_click(selector, by=By.CSS_SELECTOR)
def select_if_unselected(self, selector, by=By.CSS_SELECTOR):
""" Same as check_if_unchecked() """
self.check_if_unchecked(selector, by=by)
def uncheck_if_checked(self, selector, by=By.CSS_SELECTOR):
""" If a checkbox is checked, will uncheck it. """
selector, by = self.__recalculate_selector(selector, by)
if self.is_checked(selector, by=by):
if self.is_element_visible(selector, by=by):
self.click(selector, by=by)
else:
selector = self.convert_to_css_selector(selector, by=by)
self.js_click(selector, by=By.CSS_SELECTOR)
def unselect_if_selected(self, selector, by=By.CSS_SELECTOR):
""" Same as uncheck_if_checked() """
self.uncheck_if_checked(selector, by=by)
def is_element_in_an_iframe(self, selector, by=By.CSS_SELECTOR):
""" Returns True if the selector's element is located in an iframe.
Otherwise returns False. """
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return False
soup = self.get_beautiful_soup()
iframe_list = soup.select('iframe')
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr('name') and len(iframe['name']) > 0:
iframe_identifier = iframe['name']
elif iframe.has_attr('id') and len(iframe['id']) > 0:
iframe_identifier = iframe['id']
elif iframe.has_attr('class') and len(iframe['class']) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
self.switch_to_frame(iframe_identifier)
if self.is_element_present(selector, by=by):
self.switch_to_default_content()
return True
self.switch_to_default_content()
return False
def switch_to_frame_of_element(self, selector, by=By.CSS_SELECTOR):
""" Set driver control to the iframe containing element (assuming the
element is in a single-nested iframe) and returns the iframe name.
If element is not in an iframe, returns None, and nothing happens.
May not work if multiple iframes are nested within each other. """
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return None
soup = self.get_beautiful_soup()
iframe_list = soup.select('iframe')
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr('name') and len(iframe['name']) > 0:
iframe_identifier = iframe['name']
elif iframe.has_attr('id') and len(iframe['id']) > 0:
iframe_identifier = iframe['id']
elif iframe.has_attr('class') and len(iframe['class']) > 0:
iframe_class = " ".join(iframe["class"])
iframe_identifier = '[class="%s"]' % iframe_class
else:
continue
try:
self.switch_to_frame(iframe_identifier, timeout=1)
if self.is_element_present(selector, by=by):
return iframe_identifier
except Exception:
pass
self.switch_to_default_content()
try:
self.switch_to_frame(selector, timeout=1)
return selector
except Exception:
if self.is_element_present(selector, by=by):
return ""
raise Exception("Could not switch to iframe containing "
"element {%s}!" % selector)
def hover_on_element(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
by = By.CSS_SELECTOR
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
self.__demo_mode_highlight_if_active(selector, by)
self.scroll_to(selector, by=by)
time.sleep(0.05) # Settle down from scrolling before hovering
return page_actions.hover_on_element(self.driver, selector)
def hover_and_click(self, hover_selector, click_selector,
hover_by=By.CSS_SELECTOR, click_by=By.CSS_SELECTOR,
timeout=None):
""" When you want to hover over an element or dropdown menu,
and then click an element that appears after that. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by)
hover_selector = self.convert_to_css_selector(
hover_selector, hover_by)
hover_by = By.CSS_SELECTOR
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout)
self.__demo_mode_highlight_if_active(hover_selector, hover_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
outdated_driver = False
element = None
try:
if self.browser == "safari":
# Use the workaround for hover-clicking on Safari
raise Exception("This Exception will be caught.")
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(self.__get_href_from_partial_link_text(
click_selector))
else:
self.js_click(click_selector, click_by)
if not outdated_driver:
element = page_actions.hover_and_click(
self.driver, hover_selector, click_selector,
hover_by, click_by, timeout)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def hover_and_double_click(self, hover_selector, click_selector,
hover_by=By.CSS_SELECTOR,
click_by=By.CSS_SELECTOR,
timeout=None):
""" When you want to hover over an element or dropdown menu,
and then double-click an element that appears after that. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
hover_selector, hover_by = self.__recalculate_selector(
hover_selector, hover_by)
hover_selector = self.convert_to_css_selector(
hover_selector, hover_by)
click_selector, click_by = self.__recalculate_selector(
click_selector, click_by)
dropdown_element = self.wait_for_element_visible(
hover_selector, by=hover_by, timeout=timeout)
self.__demo_mode_highlight_if_active(hover_selector, hover_by)
self.scroll_to(hover_selector, by=hover_by)
pre_action_url = self.driver.current_url
outdated_driver = False
element = None
try:
page_actions.hover_element(self.driver, dropdown_element)
except Exception:
outdated_driver = True
element = self.wait_for_element_present(
click_selector, click_by, timeout)
if click_by == By.LINK_TEXT:
self.open(self.__get_href_from_link_text(click_selector))
elif click_by == By.PARTIAL_LINK_TEXT:
self.open(self.__get_href_from_partial_link_text(
click_selector))
else:
self.js_click(click_selector, click_by)
if not outdated_driver:
element = page_actions.hover_element_and_double_click(
self.driver, dropdown_element, click_selector,
click_by=By.CSS_SELECTOR, timeout=timeout)
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
return element
def __select_option(self, dropdown_selector, option,
dropdown_by=By.CSS_SELECTOR, option_by="text",
timeout=None):
""" Selects an HTML <select> option by specification.
Option specifications are by "text", "index", or "value".
Defaults to "text" if option_by is unspecified or unknown. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(dropdown_selector):
dropdown_by = By.XPATH
self.wait_for_ready_state_complete()
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout)
if self.is_element_visible(dropdown_selector, by=dropdown_by):
self.__demo_mode_highlight_if_active(
dropdown_selector, dropdown_by)
pre_action_url = self.driver.current_url
try:
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_present(
dropdown_selector, by=dropdown_by, timeout=timeout)
if option_by == "index":
Select(element).select_by_index(option)
elif option_by == "value":
Select(element).select_by_value(option)
else:
Select(element).select_by_visible_text(option)
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def select_option_by_text(self, dropdown_selector, option,
dropdown_by=By.CSS_SELECTOR,
timeout=None):
""" Selects an HTML <select> option by option text.
@Params
dropdown_selector - the <select> selector
option - the text of the option """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(dropdown_selector, option,
dropdown_by=dropdown_by, option_by="text",
timeout=timeout)
def select_option_by_index(self, dropdown_selector, option,
dropdown_by=By.CSS_SELECTOR,
timeout=None):
""" Selects an HTML <select> option by option index.
@Params
dropdown_selector - the <select> selector
option - the index number of the option """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(dropdown_selector, option,
dropdown_by=dropdown_by, option_by="index",
timeout=timeout)
def select_option_by_value(self, dropdown_selector, option,
dropdown_by=By.CSS_SELECTOR,
timeout=None):
""" Selects an HTML <select> option by option value.
@Params
dropdown_selector - the <select> selector
option - the value property of the option """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__select_option(dropdown_selector, option,
dropdown_by=dropdown_by, option_by="value",
timeout=timeout)
def load_html_string(self, html_string, new_page=True):
""" Loads an HTML string into the web browser.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page. """
soup = self.get_beautiful_soup(html_string)
scripts = soup.findAll("script")
for script in scripts:
html_string = html_string.replace(str(script), "")
soup = self.get_beautiful_soup(html_string)
found_head = False
found_body = False
html_head = None
html_body = None
if soup.head and len(str(soup.head)) > 12:
found_head = True
html_head = str(soup.head)
html_head = re.escape(html_head)
html_head = self.__escape_quotes_if_needed(html_head)
html_head = html_head.replace('\\ ', ' ')
if soup.body and len(str(soup.body)) > 12:
found_body = True
html_body = str(soup.body)
html_body = re.escape(html_body)
html_body = self.__escape_quotes_if_needed(html_body)
html_body = html_body.replace('\\ ', ' ')
html_string = re.escape(html_string)
html_string = self.__escape_quotes_if_needed(html_string)
html_string = html_string.replace('\\ ', ' ')
if new_page:
self.open("data:text/html,")
inner_head = '''document.getElementsByTagName("head")[0].innerHTML'''
inner_body = '''document.getElementsByTagName("body")[0].innerHTML'''
if not found_body:
self.execute_script(
'''%s = \"%s\"''' % (inner_body, html_string))
elif found_body and not found_head:
self.execute_script(
'''%s = \"%s\"''' % (inner_body, html_body))
elif found_body and found_head:
self.execute_script(
'''%s = \"%s\"''' % (inner_head, html_head))
self.execute_script(
'''%s = \"%s\"''' % (inner_body, html_body))
else:
raise Exception("Logic Error!")
for script in scripts:
js_code = script.string
js_code_lines = js_code.split('\n')
new_lines = []
for line in js_code_lines:
line = line.strip()
new_lines.append(line)
js_code = '\n'.join(new_lines)
js_utils.add_js_code(self.driver, js_code)
def load_html_file(self, html_file, new_page=True):
""" Loads a local html file into the browser from a relative file path.
If new_page==True, the page will switch to: "data:text/html,"
If new_page==False, will load HTML into the current page.
Local images and other local src content WILL BE IGNORED. """
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % html_file
f = open(file_path, 'r')
html_string = f.read().strip()
f.close()
self.load_html_string(html_string, new_page)
def open_html_file(self, html_file):
""" Opens a local html file into the browser from a relative file path.
The URL displayed in the web browser will start with "file://". """
if len(html_file) < 6 or not html_file.endswith(".html"):
raise Exception('Expecting a ".html" file!')
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % html_file
self.open("file://" + file_path)
def execute_script(self, script):
return self.driver.execute_script(script)
def execute_async_script(self, script, timeout=None):
if not timeout:
timeout = settings.EXTREME_TIMEOUT
return js_utils.execute_async_script(self.driver, script, timeout)
def safe_execute_script(self, script):
""" When executing a script that contains a jQuery command,
it's important that the jQuery library has been loaded first.
This method will load jQuery if it wasn't already loaded. """
try:
self.execute_script(script)
except Exception:
# The likely reason this fails is because: "jQuery is not defined"
self.activate_jquery() # It's a good thing we can define it here
self.execute_script(script)
def set_window_rect(self, x, y, width, height):
self.driver.set_window_rect(x, y, width, height)
self.__demo_mode_pause_if_active()
def set_window_size(self, width, height):
self.driver.set_window_size(width, height)
self.__demo_mode_pause_if_active()
def maximize_window(self):
self.driver.maximize_window()
self.__demo_mode_pause_if_active()
def switch_to_frame(self, frame, timeout=None):
"""
Wait for an iframe to appear, and switch to it. This should be
usable as a drop-in replacement for driver.switch_to.frame().
@Params
frame - the frame element, name, id, index, or selector
timeout - the time to wait for the alert in seconds
"""
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
page_actions.switch_to_frame(self.driver, frame, timeout)
def switch_to_default_content(self):
""" Brings driver control outside the current iframe.
(If driver control is inside an iframe, the driver control
will be set to one level above the current frame. If the driver
control is not currenly in an iframe, nothing will happen.) """
self.driver.switch_to.default_content()
def open_new_window(self, switch_to=True):
""" Opens a new browser tab/window and switches to it by default. """
self.driver.execute_script("window.open('');")
time.sleep(0.01)
if switch_to:
self.switch_to_window(len(self.driver.window_handles) - 1)
def switch_to_window(self, window, timeout=None):
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
page_actions.switch_to_window(self.driver, window, timeout)
def switch_to_default_window(self):
self.switch_to_window(0)
def get_new_driver(self, browser=None, headless=None,
servername=None, port=None, proxy=None, agent=None,
switch_to=True, cap_file=None, cap_string=None,
disable_csp=None, enable_sync=None, use_auto_ext=None,
no_sandbox=None, disable_gpu=None,
incognito=None, guest_mode=None, devtools=None,
user_data_dir=None, extension_zip=None,
extension_dir=None, is_mobile=False,
d_width=None, d_height=None, d_p_r=None):
""" This method spins up an extra browser for tests that require
more than one. The first browser is already provided by tests
that import base_case.BaseCase from seleniumbase. If parameters
aren't specified, the method uses the same as the default driver.
@Params
browser - the browser to use. (Ex: "chrome", "firefox")
headless - the option to run webdriver in headless mode
servername - if using a Selenium Grid, set the host address here
port - if using a Selenium Grid, set the host port here
proxy - if using a proxy server, specify the "host:port" combo here
switch_to - the option to switch to the new driver (default = True)
cap_file - the file containing desired capabilities for the browser
cap_string - the string with desired capabilities for the browser
disable_csp - an option to disable Chrome's Content Security Policy
enable_sync - the option to enable the Chrome Sync feature (Chrome)
use_auto_ext - the option to enable Chrome's Automation Extension
no_sandbox - the option to enable the "No-Sandbox" feature (Chrome)
disable_gpu - the option to enable Chrome's "Disable GPU" feature
incognito - the option to enable Chrome's Incognito mode (Chrome)
guest - the option to enable Chrome's Guest mode (Chrome)
devtools - the option to open Chrome's DevTools on start (Chrome)
user_data_dir - Chrome's User Data Directory to use (Chrome-only)
extension_zip - A Chrome Extension ZIP file to use (Chrome-only)
extension_dir - A Chrome Extension folder to use (Chrome-only)
is_mobile - the option to use the mobile emulator (Chrome-only)
d_width - the device width of the mobile emulator (Chrome-only)
d_height - the device height of the mobile emulator (Chrome-only)
d_p_r - the device pixel ratio of the mobile emulator (Chrome-only)
"""
if self.browser == "remote" and self.servername == "localhost":
raise Exception('Cannot use "remote" browser driver on localhost!'
' Did you mean to connect to a remote Grid server'
' such as BrowserStack or Sauce Labs? In that'
' case, you must specify the "server" and "port"'
' parameters on the command line! '
'Example: '
'--server=user:key@hub.browserstack.com --port=80')
browserstack_ref = (
'https://browserstack.com/automate/capabilities')
sauce_labs_ref = (
'https://wiki.saucelabs.com/display/DOCS/Platform+Configurator#/')
if self.browser == "remote" and not (self.cap_file or self.cap_string):
raise Exception('Need to specify a desired capabilities file when '
'using "--browser=remote". Add "--cap_file=FILE". '
'File should be in the Python format used by: '
'%s OR '
'%s '
'See SeleniumBase/examples/sample_cap_file_BS.py '
'and SeleniumBase/examples/sample_cap_file_SL.py'
% (browserstack_ref, sauce_labs_ref))
if browser is None:
browser = self.browser
browser_name = browser
if headless is None:
headless = self.headless
if servername is None:
servername = self.servername
if port is None:
port = self.port
use_grid = False
if servername != "localhost":
# Use Selenium Grid (Use "127.0.0.1" for localhost Grid)
use_grid = True
proxy_string = proxy
if proxy_string is None:
proxy_string = self.proxy_string
user_agent = agent
if user_agent is None:
user_agent = self.user_agent
if disable_csp is None:
disable_csp = self.disable_csp
if enable_sync is None:
enable_sync = self.enable_sync
if use_auto_ext is None:
use_auto_ext = self.use_auto_ext
if no_sandbox is None:
no_sandbox = self.no_sandbox
if disable_gpu is None:
disable_gpu = self.disable_gpu
if incognito is None:
incognito = self.incognito
if guest_mode is None:
guest_mode = self.guest_mode
if devtools is None:
devtools = self.devtools
if user_data_dir is None:
user_data_dir = self.user_data_dir
if extension_zip is None:
extension_zip = self.extension_zip
if extension_dir is None:
extension_dir = self.extension_dir
# Due to https://stackoverflow.com/questions/23055651/ , skip extension
# if self.demo_mode or self.masterqa_mode:
# disable_csp = True
test_id = self.__get_test_id()
if cap_file is None:
cap_file = self.cap_file
if cap_string is None:
cap_string = self.cap_string
if is_mobile is None:
is_mobile = False
if d_width is None:
d_width = self.__device_width
if d_height is None:
d_height = self.__device_height
if d_p_r is None:
d_p_r = self.__device_pixel_ratio
valid_browsers = constants.ValidBrowsers.valid_browsers
if browser_name not in valid_browsers:
raise Exception("Browser: {%s} is not a valid browser option. "
"Valid options = {%s}" % (browser, valid_browsers))
# Launch a web browser
from seleniumbase.core import browser_launcher
new_driver = browser_launcher.get_driver(browser_name=browser_name,
headless=headless,
use_grid=use_grid,
servername=servername,
port=port,
proxy_string=proxy_string,
user_agent=user_agent,
cap_file=cap_file,
cap_string=cap_string,
disable_csp=disable_csp,
enable_sync=enable_sync,
use_auto_ext=use_auto_ext,
no_sandbox=no_sandbox,
disable_gpu=disable_gpu,
incognito=incognito,
guest_mode=guest_mode,
devtools=devtools,
user_data_dir=user_data_dir,
extension_zip=extension_zip,
extension_dir=extension_dir,
test_id=test_id,
mobile_emulator=is_mobile,
device_width=d_width,
device_height=d_height,
device_pixel_ratio=d_p_r)
self._drivers_list.append(new_driver)
if switch_to:
self.driver = new_driver
if self.headless:
# Make sure the invisible browser window is big enough
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
# This shouldn't fail, but in case it does,
# get safely through setUp() so that
# WebDrivers can get closed during tearDown().
pass
else:
if self.browser == 'chrome' or self.browser == 'edge':
width = settings.CHROME_START_WIDTH
height = settings.CHROME_START_HEIGHT
try:
if self.maximize_option:
self.driver.maximize_window()
else:
self.driver.set_window_size(width, height)
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
elif self.browser == 'firefox':
pass # No changes
elif self.browser == 'safari':
if self.maximize_option:
try:
self.driver.maximize_window()
self.wait_for_ready_state_complete()
except Exception:
pass # Keep existing browser resolution
else:
try:
self.driver.set_window_rect(10, 30, 945, 630)
except Exception:
pass
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
self.open(self.start_page)
else:
new_start_page = "http://" + self.start_page
if page_utils.is_valid_url(new_start_page):
self.open(new_start_page)
return new_driver
def switch_to_driver(self, driver):
""" Sets self.driver to the specified driver.
You may need this if using self.get_new_driver() in your code. """
self.driver = driver
def switch_to_default_driver(self):
""" Sets self.driver to the default/original driver. """
self.driver = self._default_driver
def save_screenshot(self, name, folder=None):
""" The screenshot will be in PNG format. """
return page_actions.save_screenshot(self.driver, name, folder)
def save_page_source(self, name, folder=None):
""" Saves the page HTML to the current directory (or given subfolder).
If the folder specified doesn't exist, it will get created.
@Params
name - The file name to save the current page's HTML to.
folder - The folder to save the file to. (Default = current folder)
"""
return page_actions.save_page_source(self.driver, name, folder)
def save_cookies(self, name="cookies.txt"):
""" Saves the page cookies to the "saved_cookies" folder. """
cookies = self.driver.get_cookies()
json_cookies = json.dumps(cookies)
if name.endswith('/'):
raise Exception("Invalid filename for Cookies!")
if '/' in name:
name = name.split('/')[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
cookies_file_path = "%s/%s" % (file_path, name)
cookies_file = codecs.open(cookies_file_path, "w+")
cookies_file.writelines(json_cookies)
cookies_file.close()
def load_cookies(self, name="cookies.txt"):
""" Loads the page cookies from the "saved_cookies" folder. """
if name.endswith('/'):
raise Exception("Invalid filename for Cookies!")
if '/' in name:
name = name.split('/')[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
f = open(cookies_file_path, 'r')
json_cookies = f.read().strip()
f.close()
cookies = json.loads(json_cookies)
for cookie in cookies:
if 'expiry' in cookie:
del cookie['expiry']
self.driver.add_cookie(cookie)
def delete_all_cookies(self):
""" Deletes all cookies in the web browser.
Does NOT delete the saved cookies file. """
self.driver.delete_all_cookies()
def delete_saved_cookies(self, name="cookies.txt"):
""" Deletes the cookies file from the "saved_cookies" folder.
Does NOT delete the cookies from the web browser. """
if name.endswith('/'):
raise Exception("Invalid filename for Cookies!")
if '/' in name:
name = name.split('/')[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath('.')
file_path = abs_path + "/%s" % folder
cookies_file_path = "%s/%s" % (file_path, name)
if os.path.exists(cookies_file_path):
if cookies_file_path.endswith('.txt'):
os.remove(cookies_file_path)
def wait_for_ready_state_complete(self, timeout=None):
try:
# If there's an alert, skip
self.driver.switch_to.alert
return
except Exception:
# If there's no alert, continue
pass
if not timeout:
timeout = settings.EXTREME_TIMEOUT
if self.timeout_multiplier and timeout == settings.EXTREME_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
is_ready = js_utils.wait_for_ready_state_complete(self.driver, timeout)
self.wait_for_angularjs(timeout=settings.MINI_TIMEOUT)
if self.js_checking_on:
self.assert_no_js_errors()
if self.ad_block_on:
# If the ad_block feature is enabled, then block ads for new URLs
current_url = self.get_current_url()
if not current_url == self.__last_page_load_url:
time.sleep(0.02)
self.ad_block()
time.sleep(0.01)
if self.is_element_present("iframe"):
time.sleep(0.07) # iframe ads take slightly longer to load
self.ad_block() # Do ad_block on slower-loading iframes
self.__last_page_load_url = current_url
return is_ready
def wait_for_angularjs(self, timeout=None, **kwargs):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
js_utils.wait_for_angularjs(self.driver, timeout, **kwargs)
def sleep(self, seconds):
if not sb_config.time_limit:
time.sleep(seconds)
else:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (seconds * 1000.0)
for x in range(int(seconds * 5)):
shared_utils.check_if_time_limit_exceeded()
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
def activate_jquery(self):
""" If "jQuery is not defined", use this method to activate it for use.
This happens because jQuery is not always defined on web sites. """
js_utils.activate_jquery(self.driver)
self.wait_for_ready_state_complete()
def __are_quotes_escaped(self, string):
return js_utils.are_quotes_escaped(string)
def __escape_quotes_if_needed(self, string):
return js_utils.escape_quotes_if_needed(string)
def bring_to_front(self, selector, by=By.CSS_SELECTOR):
""" Updates the Z-index of a page element to bring it into view.
Useful when getting a WebDriverException, such as the one below:
{ Element is not clickable at point (#, #).
Other element would receive the click: ... } """
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't run action if can't convert to CSS_Selector for JavaScript
return
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
script = ("""document.querySelector('%s').style.zIndex = '999999';"""
% selector)
self.execute_script(script)
def highlight_click(self, selector, by=By.CSS_SELECTOR,
loops=3, scroll=True):
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.click(selector, by=by)
def highlight_update_text(self, selector, new_value, by=By.CSS_SELECTOR,
loops=3, scroll=True):
if not self.demo_mode:
self.highlight(selector, by=by, loops=loops, scroll=scroll)
self.update_text(selector, new_value, by=by)
def highlight(self, selector, by=By.CSS_SELECTOR,
loops=None, scroll=True):
""" This method uses fancy JavaScript to highlight an element.
Used during demo_mode.
@Params
selector - the selector of the element to find
by - the type of selector to search by (Default: CSS)
loops - # of times to repeat the highlight animation
(Default: 4. Each loop lasts for about 0.18s)
scroll - the option to scroll to the element first (Default: True)
"""
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if not loops:
loops = settings.HIGHLIGHTS
if scroll:
try:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
self.__slow_scroll_to_element(element)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
if self.highlights:
loops = self.highlights
if self.browser == 'ie':
loops = 1 # Override previous setting because IE is slow
loops = int(loops)
o_bs = '' # original_box_shadow
try:
style = element.get_attribute('style')
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT)
style = element.get_attribute('style')
if style:
if 'box-shadow: ' in style:
box_start = style.find('box-shadow: ')
box_end = style.find(';', box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js(selector, loops, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery(selector, loops, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
time.sleep(0.065)
def __highlight_with_js(self, selector, loops, o_bs):
js_utils.highlight_with_js(self.driver, selector, loops, o_bs)
def __highlight_with_jquery(self, selector, loops, o_bs):
js_utils.highlight_with_jquery(self.driver, selector, loops, o_bs)
def press_up_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
""" Simulates pressing the UP Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens. """
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_UP)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_UP)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_down_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
""" Simulates pressing the DOWN Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens. """
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_DOWN)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_DOWN)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_left_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
""" Simulates pressing the LEFT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens. """
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_LEFT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_LEFT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def press_right_arrow(self, selector="html", times=1, by=By.CSS_SELECTOR):
""" Simulates pressing the RIGHT Arrow on the keyboard.
By default, "html" will be used as the CSS Selector target.
You can specify how many times in-a-row the action happens. """
if times < 1:
return
element = self.wait_for_element_present(selector)
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
for i in range(int(times)):
try:
element.send_keys(Keys.ARROW_RIGHT)
except Exception:
self.wait_for_ready_state_complete()
element = self.wait_for_element_visible(selector)
element.send_keys(Keys.ARROW_RIGHT)
time.sleep(0.01)
if self.slow_mode:
time.sleep(0.1)
def scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
''' Fast scroll to destination '''
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.demo_mode or self.slow_mode:
self.slow_scroll_to(selector, by=by, timeout=timeout)
return
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
try:
self.__scroll_to_element(element, selector, by)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__scroll_to_element(element, selector, by)
def slow_scroll_to(self, selector, by=By.CSS_SELECTOR, timeout=None):
''' Slow motion scroll to destination '''
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
try:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__slow_scroll_to_element(element)
def scroll_to_top(self):
""" Scroll to the top of the page. """
scroll_script = "window.scrollTo(0, 0);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def scroll_to_bottom(self):
""" Scroll to the bottom of the page. """
scroll_script = "window.scrollTo(0, 10000);"
try:
self.execute_script(scroll_script)
time.sleep(0.012)
return True
except Exception:
return False
def click_xpath(self, xpath):
# Technically self.click() will automatically detect an xpath selector,
# so self.click_xpath() is just a longer name for the same action.
self.click(xpath, by=By.XPATH)
def js_click(self, selector, by=By.CSS_SELECTOR, all_matches=False):
""" Clicks an element using pure JS. Does not use jQuery.
If "all_matches" is False, only the first match is clicked. """
selector, by = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
"You may want to use self.jquery_click() instead, which "
"allows this with :contains(), assuming jQuery isn't blocked. "
"For now, self.js_click() will use a regular WebDriver click.")
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element, selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
if not all_matches:
self.__js_click(selector, by=by) # The real "magic" happens
else:
self.__js_click_all(selector, by=by) # The real "magic" happens
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
self.js_click(selector, by=By.CSS_SELECTOR, all_matches=True)
def jquery_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using jQuery. Different from using pure JS. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click()""" % selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def jquery_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using jQuery. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
click_script = """jQuery('%s').click()""" % selector
self.safe_execute_script(click_script)
self.__demo_mode_pause_if_active()
def hide_element(self, selector, by=By.CSS_SELECTOR):
""" Hide the first element on the page that matches the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
hide_script = """jQuery('%s').hide()""" % selector
self.safe_execute_script(hide_script)
def hide_elements(self, selector, by=By.CSS_SELECTOR):
""" Hide all elements on the page that match the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
hide_script = """jQuery('%s').hide()""" % selector
self.safe_execute_script(hide_script)
def show_element(self, selector, by=By.CSS_SELECTOR):
""" Show the first element on the page that matches the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
show_script = """jQuery('%s').show(0)""" % selector
self.safe_execute_script(show_script)
def show_elements(self, selector, by=By.CSS_SELECTOR):
""" Show all elements on the page that match the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
show_script = """jQuery('%s').show(0)""" % selector
self.safe_execute_script(show_script)
def remove_element(self, selector, by=By.CSS_SELECTOR):
""" Remove the first element on the page that matches the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
remove_script = """jQuery('%s').remove()""" % selector
self.safe_execute_script(remove_script)
def remove_elements(self, selector, by=By.CSS_SELECTOR):
""" Remove all elements on the page that match the selector. """
selector, by = self.__recalculate_selector(selector, by)
selector = self.convert_to_css_selector(selector, by=by)
remove_script = """jQuery('%s').remove()""" % selector
self.safe_execute_script(remove_script)
def ad_block(self):
""" Block ads that appear on the current web page. """
self.wait_for_ready_state_complete()
from seleniumbase.config import ad_block_list
for css_selector in ad_block_list.AD_BLOCK_LIST:
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
$elements[index].remove();}"""
% css_selector)
try:
self.execute_script(script)
except Exception:
pass # Don't fail test if ad_blocking fails
def block_ads(self):
""" Same as ad_block() """
self.ad_block()
def get_domain_url(self, url):
return page_utils.get_domain_url(url)
def get_beautiful_soup(self, source=None):
""" BeautifulSoup is a toolkit for dissecting an HTML document
and extracting what you need. It's great for screen-scraping! """
from bs4 import BeautifulSoup
if not source:
self.wait_for_ready_state_complete()
source = self.get_page_source()
soup = BeautifulSoup(source, "html.parser")
return soup
def get_unique_links(self):
""" Get all unique links in the html of the page source.
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src". """
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
links = page_utils._get_unique_links(page_url, soup)
return links
def get_link_status_code(self, link, allow_redirects=False, timeout=5):
""" Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes """
status_code = page_utils._get_link_status_code(
link, allow_redirects=allow_redirects, timeout=timeout)
return status_code
def assert_link_status_code_is_not_404(self, link):
status_code = str(self.get_link_status_code(link))
bad_link_str = 'Error: "%s" returned a 404!' % link
self.assertNotEqual(status_code, "404", bad_link_str)
def assert_no_404_errors(self, multithreaded=True):
""" Assert no 404 errors from page links obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src". """
all_links = self.get_unique_links()
links = []
for link in all_links:
if "javascript:" not in link and "mailto:" not in link:
links.append(link)
if multithreaded:
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(10)
pool.map(self.assert_link_status_code_is_not_404, links)
pool.close()
pool.join()
else:
for link in links:
self.assert_link_status_code_is_not_404(link)
if self.demo_mode:
messenger_post = ("ASSERT NO 404 ERRORS")
self.__highlight_with_assert_success(messenger_post, "html")
def print_unique_links_with_status_codes(self):
""" Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src". """
page_url = self.get_current_url()
soup = self.get_beautiful_soup(self.get_page_source())
page_utils._print_unique_links_with_status_codes(page_url, soup)
def __fix_unicode_conversion(self, text):
""" Fixing Chinese characters when converting from PDF to HTML. """
if sys.version_info[0] < 3:
# Update encoding for Python 2 users
reload(sys) # noqa
sys.setdefaultencoding('utf8')
text = text.replace(u'\u2f8f', u'\u884c')
text = text.replace(u'\u2f45', u'\u65b9')
text = text.replace(u'\u2f08', u'\u4eba')
text = text.replace(u'\u2f70', u'\u793a')
return text
def get_pdf_text(self, pdf, page=None, maxpages=None,
password=None, codec='utf-8', wrap=False, nav=False,
override=False):
""" Gets text from a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
page - The page number (or a list of page numbers) of the PDF.
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, returns all PDF text.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into seperate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again. """
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
from pdfminer.high_level import extract_text
if not password:
password = ''
if not maxpages:
maxpages = 0
if not pdf.lower().endswith('.pdf'):
raise Exception("%s is not a PDF file! (Expecting a .pdf)" % pdf)
file_path = None
if page_utils.is_valid_url(pdf):
if nav:
if self.get_current_url() != pdf:
self.open(pdf)
file_name = pdf.split('/')[-1]
file_path = self.get_downloads_folder() + '/' + file_name
if not os.path.exists(file_path):
self.download_file(pdf)
elif override:
self.download_file(pdf)
else:
if not os.path.exists(pdf):
raise Exception("%s is not a valid URL or file path!" % pdf)
file_path = os.path.abspath(pdf)
page_search = None # (Pages are delimited by '\x0c')
if type(page) is list:
pages = page
page_search = []
for page in pages:
page_search.append(page - 1)
elif type(page) is int:
page = page - 1
if page < 0:
page = 0
page_search = [page]
else:
page_search = None
pdf_text = extract_text(
file_path, password='', page_numbers=page_search,
maxpages=maxpages, caching=False, codec=codec)
pdf_text = self.__fix_unicode_conversion(pdf_text)
if wrap:
pdf_text = pdf_text.replace(' \n', ' ')
return pdf_text
def assert_pdf_text(self, pdf, text, page=None, maxpages=None,
password=None, codec='utf-8', wrap=True, nav=False,
override=False):
""" Asserts text in a PDF file.
PDF can be either a URL or a file path on the local file system.
@Params
pdf - The URL or file path of the PDF file.
text - The expected text to verify in the PDF.
page - The page number of the PDF to use (optional).
If a page number is provided, looks only at that page.
(1 is the first page, 2 is the second page, etc.)
If no page number is provided, looks at all the pages.
maxpages - Instead of providing a page number, you can provide
the number of pages to use from the beginning.
password - If the PDF is password-protected, enter it here.
codec - The compression format for character encoding.
(The default codec used by this method is 'utf-8'.)
wrap - Replaces ' \n' with ' ' so that individual sentences
from a PDF don't get broken up into seperate lines when
getting converted into text format.
nav - If PDF is a URL, navigates to the URL in the browser first.
(Not needed because the PDF will be downloaded anyway.)
override - If the PDF file to be downloaded already exists in the
downloaded_files/ folder, that PDF will be used
instead of downloading it again. """
text = self.__fix_unicode_conversion(text)
if not codec:
codec = 'utf-8'
pdf_text = self.get_pdf_text(
pdf, page=page, maxpages=maxpages, password=password, codec=codec,
wrap=wrap, nav=nav, override=override)
if type(page) is int:
if text not in pdf_text:
raise Exception("PDF [%s] is missing expected text [%s] on "
"page [%s]!" % (pdf, text, page))
else:
if text not in pdf_text:
raise Exception("PDF [%s] is missing expected text [%s]!"
"" % (pdf, text))
return True
def create_folder(self, folder):
""" Creates a folder of the given name if it doesn't already exist. """
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) < 1:
raise Exception("Minimum folder name length = 1.")
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception:
pass
def choose_file(self, selector, file_path, by=By.CSS_SELECTOR,
timeout=None):
""" This method is used to choose a file to upload to a website.
It works by populating a file-chooser "input" field of type="file".
A relative file_path will get converted into an absolute file_path.
Example usage:
self.choose_file('input[type="file"]', "my_dir/my_file.txt")
"""
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
abs_path = os.path.abspath(file_path)
self.add_text(selector, abs_path, by=by, timeout=timeout)
def save_element_as_image_file(self, selector, file_name, folder=None):
""" Take a screenshot of an element and save it as an image file.
If no folder is specified, will save it to the current folder. """
element = self.wait_for_element_visible(selector)
element_png = element.screenshot_as_png
if len(file_name.split('.')[0]) < 1:
raise Exception("Error: file_name length must be > 0.")
if not file_name.endswith(".png"):
file_name = file_name + ".png"
image_file_path = None
if folder:
if folder.endswith("/"):
folder = folder[:-1]
if len(folder) > 0:
self.create_folder(folder)
image_file_path = "%s/%s" % (folder, file_name)
if not image_file_path:
image_file_path = file_name
with open(image_file_path, "wb") as file:
file.write(element_png)
def download_file(self, file_url, destination_folder=None):
""" Downloads the file from the url to the destination folder.
If no destination folder is specified, the default one is used.
(The default downloads folder = "./downloaded_files") """
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
page_utils._download_file_to(file_url, destination_folder)
def save_file_as(self, file_url, new_file_name, destination_folder=None):
""" Similar to self.download_file(), except that you get to rename the
file being downloaded to whatever you want. """
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._download_file_to(
file_url, destination_folder, new_file_name)
def save_data_as(self, data, file_name, destination_folder=None):
""" Saves the data specified to a file of the name specified.
If no destination folder is specified, the default one is used.
(The default downloads folder = "./downloaded_files") """
if not destination_folder:
destination_folder = constants.Files.DOWNLOADS_FOLDER
page_utils._save_data_as(data, destination_folder, file_name)
def get_downloads_folder(self):
""" Returns the OS path of the Downloads Folder.
(Works with Chrome and Firefox only, for now.) """
return download_helper.get_downloads_folder()
def get_path_of_downloaded_file(self, file):
""" Returns the OS path of the downloaded file. """
return os.path.join(self.get_downloads_folder(), file)
def is_downloaded_file_present(self, file):
""" Checks if the file exists in the Downloads Folder. """
return os.path.exists(self.get_path_of_downloaded_file(file))
def assert_downloaded_file(self, file, timeout=None):
""" Asserts that the file exists in the Downloads Folder. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout)):
shared_utils.check_if_time_limit_exceeded()
try:
self.assertTrue(
os.path.exists(self.get_path_of_downloaded_file(file)),
"File [%s] was not found in the downloads folder [%s]!"
"" % (file, self.get_downloads_folder()))
if self.demo_mode:
messenger_post = ("ASSERT DOWNLOADED FILE: [%s]" % file)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(1)
self.assertTrue(
os.path.exists(self.get_path_of_downloaded_file(file)),
"File [%s] was not found in the downloads folder [%s] "
"after %s seconds! (Or the download didn't complete!)"
"" % (file, self.get_downloads_folder(), timeout))
if self.demo_mode:
messenger_post = ("ASSERT DOWNLOADED FILE: [%s]" % file)
js_utils.post_messenger_success_message(
self.driver, messenger_post, self.message_duration)
def assert_true(self, expr, msg=None):
""" Asserts that the expression is True.
Will raise an exception if the statement if False. """
self.assertTrue(expr, msg=msg)
def assert_false(self, expr, msg=None):
""" Asserts that the expression is False.
Will raise an exception if the statement if True. """
self.assertFalse(expr, msg=msg)
def assert_equal(self, first, second, msg=None):
""" Asserts that the two values are equal.
Will raise an exception if the values are not equal. """
self.assertEqual(first, second, msg=msg)
def assert_not_equal(self, first, second, msg=None):
""" Asserts that the two values are not equal.
Will raise an exception if the values are equal. """
self.assertNotEqual(first, second, msg=msg)
def assert_raises(self, *args, **kwargs):
""" Asserts that the following block of code raises an exception.
Will raise an exception if the block of code has no exception.
Usage Example =>
# Verify that the expected exception is raised.
with self.assert_raises(Exception):
raise Exception("Expected Exception!")
"""
self.assertRaises(*args, **kwargs)
def assert_title(self, title):
""" Asserts that the web page title matches the expected title. """
expected = title
actual = self.get_page_title()
self.assertEqual(expected, actual, "Expected page title [%s] "
"does not match the actual page title [%s]!"
"" % (expected, actual))
if self.demo_mode:
messenger_post = ("ASSERT TITLE = {%s}" % title)
self.__highlight_with_assert_success(messenger_post, "html")
def assert_no_js_errors(self):
""" Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY for Chrome (non-headless) and Chrome-based browsers.
Does NOT work on Firefox, Edge, IE, and some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266 """
time.sleep(0.1) # May take a moment for errors to appear after loads.
try:
browser_logs = self.driver.get_log('browser')
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
errors = []
for entry in browser_logs:
if entry['level'] == 'SEVERE':
if messenger_library not in entry['message']:
# Add errors if not caused by SeleniumBase dependencies
errors.append(entry)
if len(errors) > 0:
current_url = self.get_current_url()
raise Exception(
"JavaScript errors found on %s => %s" % (current_url, errors))
if self.demo_mode:
if (self.browser == 'chrome' or self.browser == 'edge'):
messenger_post = ("ASSERT NO JS ERRORS")
self.__highlight_with_assert_success(messenger_post, "html")
def __activate_html_inspector(self):
self.wait_for_ready_state_complete()
time.sleep(0.05)
js_utils.activate_html_inspector(self.driver)
def inspect_html(self):
""" Inspects the Page HTML with HTML-Inspector.
(https://github.com/philipwalton/html-inspector)
(https://cdnjs.com/libraries/html-inspector)
Prints the results and also returns them. """
self.__activate_html_inspector()
script = ("""HTMLInspector.inspect();""")
self.execute_script(script)
time.sleep(0.1)
browser_logs = []
try:
browser_logs = self.driver.get_log('browser')
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return("(Unable to Inspect HTML! -> Only works on Chrome!)")
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
url = self.get_current_url()
header = '\n* HTML Inspection Results: %s' % url
results = [header]
row_count = 0
for entry in browser_logs:
message = entry['message']
if "0:6053 " in message:
message = message.split("0:6053")[1]
message = message.replace("\\u003C", "<")
if message.startswith(' "') and message.count('"') == 2:
message = message.split('"')[1]
message = "X - " + message
if messenger_library not in message:
if message not in results:
results.append(message)
row_count += 1
if row_count > 0:
results.append('* (See the Console output for details!)')
else:
results.append('* (No issues detected!)')
results = '\n'.join(results)
print(results)
return(results)
def get_google_auth_password(self, totp_key=None):
""" Returns a time-based one-time password based on the
Google Authenticator password algorithm. Works with Authy.
If "totp_key" is not specified, defaults to using the one
provided in seleniumbase/config/settings.py
Google Auth passwords expire and change at 30-second intervals.
If the fetched password expires in the next 1.5 seconds, waits
for a new one before returning it (may take up to 1.5 seconds).
See https://pyotp.readthedocs.io/en/latest/ for details. """
import pyotp
if not totp_key:
totp_key = settings.TOTP_KEY
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if float(cycle_lifespan) > 0.95:
# Password expires in the next 1.5 seconds. Wait for a new one.
for i in range(30):
time.sleep(0.05)
epoch_interval = time.time() / 30.0
cycle_lifespan = float(epoch_interval) - int(epoch_interval)
if not float(cycle_lifespan) > 0.95:
# The new password cycle has begun
break
totp = pyotp.TOTP(totp_key)
return str(totp.now())
def convert_xpath_to_css(self, xpath):
return xpath_to_css.convert_xpath_to_css(xpath)
def convert_to_css_selector(self, selector, by):
""" This method converts a selector to a CSS_SELECTOR.
jQuery commands require a CSS_SELECTOR for finding elements.
This method should only be used for jQuery/JavaScript actions.
Pure JavaScript doesn't support using a:contains("LINK_TEXT"). """
if by == By.CSS_SELECTOR:
return selector
elif by == By.ID:
return '#%s' % selector
elif by == By.CLASS_NAME:
return '.%s' % selector
elif by == By.NAME:
return '[name="%s"]' % selector
elif by == By.TAG_NAME:
return selector
elif by == By.XPATH:
return self.convert_xpath_to_css(selector)
elif by == By.LINK_TEXT:
return 'a:contains("%s")' % selector
elif by == By.PARTIAL_LINK_TEXT:
return 'a:contains("%s")' % selector
else:
raise Exception(
"Exception: Could not convert {%s}(by=%s) to CSS_SELECTOR!" % (
selector, by))
def set_value(self, selector, new_value, by=By.CSS_SELECTOR, timeout=None):
""" This method uses JavaScript to update a text field. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
orginal_selector = selector
css_selector = self.convert_to_css_selector(selector, by=by)
self.__demo_mode_highlight_if_active(orginal_selector, by)
if not self.demo_mode:
self.scroll_to(orginal_selector, by=by, timeout=timeout)
value = re.escape(new_value)
value = self.__escape_quotes_if_needed(value)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""document.querySelector('%s').value='%s';"""
% (css_selector, value))
self.execute_script(script)
if new_value.endswith('\n'):
element = self.wait_for_element_present(
orginal_selector, by=by, timeout=timeout)
element.send_keys(Keys.RETURN)
if settings.WAIT_FOR_RSC_ON_PAGE_LOADS:
self.wait_for_ready_state_complete()
self.__demo_mode_pause_if_active()
def js_update_text(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=None):
""" Same as self.set_value() """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.set_value(
selector, new_value, by=by, timeout=timeout)
def jquery_update_text(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=None):
""" This method uses jQuery to update a text field.
If the new_value string ends with the newline character,
WebDriver will finish the call, which simulates pressing
{Enter/Return} after the text is entered. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
element = self.wait_for_element_visible(
selector, by=by, timeout=timeout)
self.__demo_mode_highlight_if_active(selector, by)
self.scroll_to(selector, by=by)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
selector = self.__escape_quotes_if_needed(selector)
new_value = re.escape(new_value)
new_value = self.__escape_quotes_if_needed(new_value)
update_text_script = """jQuery('%s').val('%s')""" % (
selector, new_value)
self.safe_execute_script(update_text_script)
if new_value.endswith('\n'):
element.send_keys('\n')
self.__demo_mode_pause_if_active()
def set_time_limit(self, time_limit):
if time_limit:
try:
sb_config.time_limit = float(time_limit)
except Exception:
sb_config.time_limit = None
else:
sb_config.time_limit = None
if sb_config.time_limit and sb_config.time_limit > 0:
sb_config.time_limit_ms = int(sb_config.time_limit * 1000.0)
self.time_limit = sb_config.time_limit
else:
self.time_limit = None
sb_config.time_limit = None
sb_config.time_limit_ms = None
def skip(self, reason=""):
""" Mark the test as Skipped. """
self.skipTest(reason)
############
def add_css_link(self, css_link):
js_utils.add_css_link(self.driver, css_link)
def add_js_link(self, js_link):
js_utils.add_js_link(self.driver, js_link)
def add_css_style(self, css_style):
js_utils.add_css_style(self.driver, css_style)
def add_js_code_from_link(self, js_link):
js_utils.add_js_code_from_link(self.driver, js_link)
def add_js_code(self, js_code):
js_utils.add_js_code(self.driver, js_code)
def add_meta_tag(self, http_equiv=None, content=None):
js_utils.add_meta_tag(
self.driver, http_equiv=http_equiv, content=content)
############
def create_tour(self, name=None, theme=None):
""" Creates a tour for a website. By default, the Shepherd JavaScript
Library is used with the Shepherd "Light" / "Arrows" theme.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
Alternatively, you may use a different JavaScript Library
as the theme. Those include "IntroJS", "Bootstrap", and
"Hopscotch".
"""
if not name:
name = "default"
if theme:
if theme.lower() == "bootstrap":
self.create_bootstrap_tour(name)
return
elif theme.lower() == "hopscotch":
self.create_hopscotch_tour(name)
return
elif theme.lower() == "intro":
self.create_introjs_tour(name)
return
elif theme.lower() == "introjs":
self.create_introjs_tour(name)
return
elif theme.lower() == "shepherd":
self.create_shepherd_tour(name, theme="light")
return
else:
self.create_shepherd_tour(name, theme)
else:
self.create_shepherd_tour(name, theme="light")
def create_shepherd_tour(self, name=None, theme=None):
""" Creates a Shepherd JS website tour.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
theme - Sets the default theme for the tour.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("light" is used if None is selected.)
"""
shepherd_theme = "shepherd-theme-arrows"
if theme:
if theme.lower() == "default":
shepherd_theme = "shepherd-theme-default"
elif theme.lower() == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme.lower() == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme.lower() == "square":
shepherd_theme = "shepherd-theme-square"
elif theme.lower() == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
if not name:
name = "default"
new_tour = (
"""
// Shepherd Tour
var tour = new Shepherd.Tour({
defaults: {
classes: '%s',
scrollTo: true
}
});
var allButtons = {
skip: {
text: "Skip",
action: tour.cancel,
classes: 'shepherd-button-secondary tour-button-left'
},
back: {
text: "Back",
action: tour.back,
classes: 'shepherd-button-secondary'
},
next: {
text: "Next",
action: tour.next,
classes: 'shepherd-button-primary tour-button-right'
},
};
var firstStepButtons = [allButtons.skip, allButtons.next];
var midTourButtons = [allButtons.back, allButtons.next];
""" % shepherd_theme)
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_bootstrap_tour(self, name=None):
""" Creates a Bootstrap tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = (
"""
// Bootstrap Tour
var tour = new Tour({
});
tour.addSteps([
""")
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_hopscotch_tour(self, name=None):
""" Creates an Hopscotch tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = (
"""
// Hopscotch Tour
var tour = {
id: "hopscotch_tour",
steps: [
""")
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def create_introjs_tour(self, name=None):
""" Creates an IntroJS tour for a website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
"""
if not name:
name = "default"
new_tour = (
"""
// IntroJS Tour
function startIntro(){
var intro = introJs();
intro.setOptions({
steps: [
""")
self._tour_steps[name] = []
self._tour_steps[name].append(new_tour)
def add_tour_step(self, message, selector=None, name=None,
title=None, theme=None, alignment=None, duration=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (NON-Bootstrap Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if not selector:
selector = "html"
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
selector = self.__escape_quotes_if_needed(selector)
if not name:
name = "default"
if name not in self._tour_steps:
# By default, will create an IntroJS tour if no tours exist
self.create_tour(name=name, theme="introjs")
if not title:
title = ""
title = self.__escape_quotes_if_needed(title)
if message:
message = self.__escape_quotes_if_needed(message)
else:
message = ""
if not alignment or (
alignment not in ["top", "bottom", "left", "right"]):
if "Hopscotch" not in self._tour_steps[name][0]:
alignment = "top"
else:
alignment = "bottom"
if "Bootstrap" in self._tour_steps[name][0]:
self.__add_bootstrap_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment, duration=duration)
elif "Hopscotch" in self._tour_steps[name][0]:
self.__add_hopscotch_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment)
elif "IntroJS" in self._tour_steps[name][0]:
self.__add_introjs_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment)
else:
self.__add_shepherd_tour_step(
message, selector=selector, name=name, title=title,
theme=theme, alignment=alignment)
def __add_shepherd_tour_step(self, message, selector=None, name=None,
title=None, theme=None, alignment=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (NON-Bootstrap Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if theme == "default":
shepherd_theme = "shepherd-theme-default"
elif theme == "dark":
shepherd_theme = "shepherd-theme-dark"
elif theme == "light":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "arrows":
shepherd_theme = "shepherd-theme-arrows"
elif theme == "square":
shepherd_theme = "shepherd-theme-square"
elif theme == "square-dark":
shepherd_theme = "shepherd-theme-square-dark"
else:
shepherd_base_theme = re.search(
r"[\S\s]+classes: '([\S\s]+)',[\S\s]+",
self._tour_steps[name][0]).group(1)
shepherd_theme = shepherd_base_theme
shepherd_classes = shepherd_theme
if selector == "html":
shepherd_classes += " shepherd-orphan"
buttons = "firstStepButtons"
if len(self._tour_steps[name]) > 1:
buttons = "midTourButtons"
step = ("""
tour.addStep('%s', {
title: '%s',
classes: '%s',
text: '%s',
attachTo: {element: '%s', on: '%s'},
buttons: %s,
advanceOn: '.docs-link click'
});""" % (
name, title, shepherd_classes, message, selector, alignment,
buttons))
self._tour_steps[name].append(step)
def __add_bootstrap_tour_step(self, message, selector=None, name=None,
title=None, alignment=None, duration=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if selector != "html":
selector = self.__make_css_match_first_element_only(selector)
element_row = "element: '%s'," % selector
else:
element_row = ""
if not duration:
duration = "0"
else:
duration = str(float(duration) * 1000.0)
step = ("""{
%s
title: '%s',
content: '%s',
orphan: true,
placement: 'auto %s',
smartPlacement: true,
duration: %s,
},""" % (element_row, title, message, alignment, duration))
self._tour_steps[name].append(step)
def __add_hopscotch_tour_step(self, message, selector=None, name=None,
title=None, alignment=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("bottom" is the default alignment).
"""
arrow_offset_row = None
if not selector or selector == "html":
selector = "head"
alignment = "bottom"
arrow_offset_row = "arrowOffset: '200',"
else:
arrow_offset_row = ""
step = ("""{
target: '%s',
title: '%s',
content: '%s',
%s
showPrevButton: 'true',
scrollDuration: '550',
placement: '%s'},
""" % (selector, title, message, arrow_offset_row, alignment))
self._tour_steps[name].append(step)
def __add_introjs_tour_step(self, message, selector=None, name=None,
title=None, alignment=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
"""
if selector != "html":
element_row = "element: '%s'," % selector
else:
element_row = ""
if title:
message = "<center><b>" + title + "</b></center><hr>" + message
message = '<font size=\"3\" color=\"#33475B\">' + message + '</font>'
step = ("""{%s
intro: '%s',
position: '%s'},
""" % (element_row, message, alignment))
self._tour_steps[name].append(step)
def play_tour(self, name=None, interval=0):
""" Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps.
If set to 0 (default), the tour is fully manual control.
"""
if self.headless:
return # Tours should not run in headless mode.
if not name:
name = "default"
if name not in self._tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if "Bootstrap" in self._tour_steps[name][0]:
tour_helper.play_bootstrap_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "Hopscotch" in self._tour_steps[name][0]:
tour_helper.play_hopscotch_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "IntroJS" in self._tour_steps[name][0]:
tour_helper.play_introjs_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
else:
# "Shepherd"
tour_helper.play_shepherd_tour(
self.driver, self._tour_steps,
self.message_duration, name=name, interval=interval)
def export_tour(self, name=None, filename="my_tour.js", url=None):
""" Exports a tour as a JS file.
You can call self.export_tour() anywhere where you would
normally use self.play_tour() to play a tour.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
filename - The name of the JavaScript file that you wish to
save the tour to. """
if not url:
url = self.get_current_url()
tour_helper.export_tour(
self._tour_steps, name=name, filename=filename, url=url)
def activate_jquery_confirm(self):
""" See https://craftpip.github.io/jquery-confirm/ for usage. """
js_utils.activate_jquery_confirm(self.driver)
self.wait_for_ready_state_complete()
def activate_messenger(self):
js_utils.activate_messenger(self.driver)
self.wait_for_ready_state_complete()
def set_messenger_theme(self, theme="default", location="default",
max_messages="default"):
""" Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display. """
if not theme:
theme = "default" # "future"
if not location:
location = "default" # "bottom_right"
if not max_messages:
max_messages = "default" # "8"
js_utils.set_messenger_theme(
self.driver, theme=theme,
location=location, max_messages=max_messages)
def post_message(self, message, duration=None, pause=True, style="info"):
""" Post a message on the screen with Messenger.
Arguments:
message: The message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
style: "info", "success", or "error".
You can also post messages by using =>
self.execute_script('Messenger().post("My Message")')
"""
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
js_utils.post_message(
self.driver, message, duration, style=style)
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_success_message(self, message, duration=None, pause=True):
""" Post a success message on the screen with Messenger.
Arguments:
message: The success message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
js_utils.post_message(
self.driver, message, duration, style="success")
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
def post_error_message(self, message, duration=None, pause=True):
""" Post an error message on the screen with Messenger.
Arguments:
message: The error message to display.
duration: The time until the message vanishes. (Default: 2.55s)
pause: If True, the program waits until the message completes.
"""
if not duration:
if not self.message_duration:
duration = settings.DEFAULT_MESSAGE_DURATION
else:
duration = self.message_duration
js_utils.post_message(
self.driver, message, duration, style="error")
if pause:
duration = float(duration) + 0.15
time.sleep(float(duration))
############
def generate_referral(self, start_page, destination_page):
""" This method opens the start_page, creates a referral link there,
and clicks on that link, which goes to the destination_page.
(This generates real traffic for testing analytics software.) """
if not page_utils.is_valid_url(destination_page):
raise Exception(
"Exception: destination_page {%s} is not a valid URL!"
% destination_page)
if start_page:
if not page_utils.is_valid_url(start_page):
raise Exception(
"Exception: start_page {%s} is not a valid URL! "
"(Use an empty string or None to start from current page.)"
% start_page)
self.open(start_page)
time.sleep(0.08)
self.wait_for_ready_state_complete()
referral_link = ('''<body>'''
'''<a class='analytics referral test' href='%s' '''
'''style='font-family: Arial,sans-serif; '''
'''font-size: 30px; color: #18a2cd'>'''
'''Magic Link Button</a></body>''' % destination_page)
self.execute_script(
'''document.body.outerHTML = \"%s\"''' % referral_link)
self.click(
"a.analytics.referral.test", timeout=2) # Clicks generated button
time.sleep(0.15)
try:
self.click("html")
time.sleep(0.08)
except Exception:
pass
def generate_traffic(self, start_page, destination_page, loops=1):
""" Similar to generate_referral(), but can do multiple loops. """
for loop in range(loops):
self.generate_referral(start_page, destination_page)
time.sleep(0.05)
def generate_referral_chain(self, pages):
""" Use this method to chain the action of creating button links on
one website page that will take you to the next page.
(When you want to create a referral to a website for traffic
generation without increasing the bounce rate, you'll want to visit
at least one additional page on that site with a button click.) """
if not type(pages) is tuple and not type(pages) is list:
raise Exception(
"Exception: Expecting a list of website pages for chaining!")
if len(pages) < 2:
raise Exception(
"Exception: At least two website pages required for chaining!")
for page in pages:
# Find out if any of the web pages are invalid before continuing
if not page_utils.is_valid_url(page):
raise Exception(
"Exception: Website page {%s} is not a valid URL!" % page)
for page in pages:
self.generate_referral(None, page)
def generate_traffic_chain(self, pages, loops=1):
""" Similar to generate_referral_chain(), but for multiple loops. """
for loop in range(loops):
self.generate_referral_chain(pages)
time.sleep(0.05)
############
def wait_for_element_present(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden). """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_present(
self.driver, selector, by, timeout)
def wait_for_element_visible(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden). """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout)
def wait_for_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" The shorter version of wait_for_element_visible() """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(selector, by=by, timeout=timeout)
def get_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as wait_for_element_present() - returns the element.
The element does not need be visible (it may be hidden). """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_present(selector, by=by, timeout=timeout)
def assert_element_present(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Similar to wait_for_element_present(), but returns nothing.
Waits for an element to appear in the HTML of a page.
The element does not need be visible (it may be hidden).
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_present(selector, by=by, timeout=timeout)
return True
def find_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Same as wait_for_element_visible() - returns the element """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(selector, by=by, timeout=timeout)
def assert_element(self, selector, by=By.CSS_SELECTOR, timeout=None):
""" Similar to wait_for_element_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_visible(selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(selector, by)
messenger_post = "ASSERT %s: %s" % (by, selector)
self.__highlight_with_assert_success(messenger_post, selector, by)
return True
def assert_element_visible(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Same as self.assert_element()
As above, will raise an exception if nothing can be found. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.assert_element(selector, by=by, timeout=timeout)
return True
############
def wait_for_text_visible(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_text_visible(
self.driver, text, selector, by, timeout)
def wait_for_exact_text_visible(self, text, selector="html",
by=By.CSS_SELECTOR,
timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_exact_text_visible(
self.driver, text, selector, by, timeout)
def wait_for_text(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" The shorter version of wait_for_text_visible() """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout)
def find_text(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" Same as wait_for_text_visible() - returns the element """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_text_visible(
text, selector, by=by, timeout=timeout)
def assert_text_visible(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" Same as assert_text() """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.assert_text(text, selector, by=by, timeout=timeout)
def assert_text(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" Similar to wait_for_text_visible()
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(selector, by)
messenger_post = ("ASSERT TEXT {%s} in %s: %s"
% (text, by, selector))
self.__highlight_with_assert_success(messenger_post, selector, by)
return True
def assert_exact_text(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" Similar to assert_text(), but the text must be exact, rather than
exist as a subset of the full text.
(Extra whitespace at the beginning or the end doesn't count.)
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_exact_text_visible(
text, selector, by=by, timeout=timeout)
if self.demo_mode:
selector, by = self.__recalculate_selector(selector, by)
messenger_post = ("ASSERT EXACT TEXT {%s} in %s: %s"
% (text, by, selector))
self.__highlight_with_assert_success(messenger_post, selector, by)
return True
############
def wait_for_link_text_present(self, link_text, timeout=None):
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_link_text_present(link_text):
raise Exception(
"Link text {%s} was not found!" % link_text)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
raise Exception(
"Link text {%s} was not present after %s seconds!" % (
link_text, timeout))
def wait_for_partial_link_text_present(self, link_text, timeout=None):
if not timeout:
timeout = settings.SMALL_TIMEOUT
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 5)):
shared_utils.check_if_time_limit_exceeded()
try:
if not self.is_partial_link_text_present(link_text):
raise Exception(
"Partial Link text {%s} was not found!" % link_text)
return
except Exception:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.2)
raise Exception(
"Partial Link text {%s} was not present after %s seconds!" % (
link_text, timeout))
def wait_for_link_text_visible(self, link_text, timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
link_text, by=By.LINK_TEXT, timeout=timeout)
def wait_for_link_text(self, link_text, timeout=None):
""" The shorter version of wait_for_link_text_visible() """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def find_link_text(self, link_text, timeout=None):
""" Same as wait_for_link_text_visible() - returns the element """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_link_text_visible(link_text, timeout=timeout)
def assert_link_text(self, link_text, timeout=None):
""" Similar to wait_for_link_text_visible(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_link_text_visible(link_text, timeout=timeout)
if self.demo_mode:
messenger_post = ("ASSERT LINK TEXT {%s}." % link_text)
self.__highlight_with_assert_success(
messenger_post, link_text, by=By.LINK_TEXT)
return True
def wait_for_partial_link_text(self, partial_link_text, timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_element_visible(
partial_link_text, by=By.PARTIAL_LINK_TEXT, timeout=timeout)
def find_partial_link_text(self, partial_link_text, timeout=None):
""" Same as wait_for_partial_link_text() - returns the element """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
def assert_partial_link_text(self, partial_link_text, timeout=None):
""" Similar to wait_for_partial_link_text(), but returns nothing.
As above, will raise an exception if nothing can be found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_partial_link_text(partial_link_text, timeout=timeout)
if self.demo_mode:
messenger_post = (
"ASSERT PARTIAL LINK TEXT {%s}." % partial_link_text)
self.__highlight_with_assert_success(
messenger_post, partial_link_text, by=By.PARTIAL_LINK_TEXT)
return True
############
def wait_for_element_absent(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Waits for an element to no longer appear in the HTML of a page.
A hidden element still counts as appearing in the page HTML.
If an element with "hidden" status is acceptable,
use wait_for_element_not_visible() instead. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
return page_actions.wait_for_element_absent(
self.driver, selector, by, timeout)
def assert_element_absent(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Similar to wait_for_element_absent() - returns nothing.
As above, will raise an exception if the element stays present.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_absent(selector, by=by, timeout=timeout)
return True
############
def wait_for_element_not_visible(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Waits for an element to no longer be visible on a page.
The element can be non-existant in the HTML or hidden on the page
to qualify as not visible. """
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_element_not_visible(
self.driver, selector, by, timeout)
def assert_element_not_visible(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" Similar to wait_for_element_not_visible() - returns nothing.
As above, will raise an exception if the element stays visible.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_element_not_visible(selector, by=by, timeout=timeout)
return True
############
def wait_for_text_not_visible(self, text, selector="html",
by=By.CSS_SELECTOR,
timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
selector, by = self.__recalculate_selector(selector, by)
return page_actions.wait_for_text_not_visible(
self.driver, text, selector, by, timeout)
def assert_text_not_visible(self, text, selector="html",
by=By.CSS_SELECTOR,
timeout=None):
""" Similar to wait_for_text_not_visible()
Raises an exception if the element or the text is not found.
Returns True if successful. Default timeout = SMALL_TIMEOUT. """
if not timeout:
timeout = settings.SMALL_TIMEOUT
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.wait_for_text_not_visible(text, selector, by=by, timeout=timeout)
############
def wait_for_and_accept_alert(self, timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_accept_alert(self.driver, timeout)
def wait_for_and_dismiss_alert(self, timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_dismiss_alert(self.driver, timeout)
def wait_for_and_switch_to_alert(self, timeout=None):
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
return page_actions.wait_for_and_switch_to_alert(self.driver, timeout)
############
def __assert_eq(self, *args, **kwargs):
""" Minified assert_equal() using only the list diff. """
minified_exception = None
try:
self.assertEqual(*args, **kwargs)
except Exception as e:
str_e = str(e)
minified_exception = "\nAssertionError:\n"
lines = str_e.split('\n')
countdown = 3
countdown_on = False
for line in lines:
if countdown_on:
minified_exception += line + '\n'
countdown = countdown - 1
if countdown == 0:
countdown_on = False
elif line.startswith('F'):
countdown_on = True
countdown = 3
minified_exception += line + '\n'
elif line.startswith('+') or line.startswith('-'):
minified_exception += line + '\n'
elif line.startswith('?'):
minified_exception += line + '\n'
elif line.strip().startswith('*'):
minified_exception += line + '\n'
if minified_exception:
raise Exception(minified_exception)
def check_window(self, name="default", level=0, baseline=False):
""" *** Automated Visual Testing with SeleniumBase ***
The first time a test calls self.check_window() for a unique "name"
parameter provided, it will set a visual baseline, meaning that it
creates a folder, saves the URL to a file, saves the current window
screenshot to a file, and creates the following three files
with the listed data saved:
tags_level1.txt -> HTML tags from the window
tags_level2.txt -> HTML tags + attributes from the window
tags_level3.txt -> HTML tags + attributes/values from the window
Baseline folders are named based on the test name and the name
parameter passed to self.check_window(). The same test can store
multiple baseline folders.
If the baseline is being set/reset, the "level" doesn't matter.
After the first run of self.check_window(), it will compare the
HTML tags of the latest window to the one from the initial run.
Here's how the level system works:
* level=0 ->
DRY RUN ONLY - Will perform a comparison to the baseline, and
print out any differences that are found, but
won't fail the test even if differences exist.
* level=1 ->
HTML tags are compared to tags_level1.txt
* level=2 ->
HTML tags are compared to tags_level1.txt and
HTML tags/attributes are compared to tags_level2.txt
* level=3 ->
HTML tags are compared to tags_level1.txt and
HTML tags + attributes are compared to tags_level2.txt and
HTML tags + attributes/values are compared to tags_level3.txt
As shown, Level-3 is the most strict, Level-1 is the least strict.
If the comparisons from the latest window to the existing baseline
don't match, the current test will fail, except for Level-0 tests.
You can reset the visual baseline on the command line by using:
--visual_baseline
As long as "--visual_baseline" is used on the command line while
running tests, the self.check_window() method cannot fail because
it will rebuild the visual baseline rather than comparing the html
tags of the latest run to the existing baseline. If there are any
expected layout changes to a website that you're testing, you'll
need to reset the baseline to prevent unnecessary failures.
self.check_window() will fail with "Page Domain Mismatch Failure"
if the page domain doesn't match the domain of the baseline.
If you want to use self.check_window() to compare a web page to
a later version of itself from within the same test run, you can
add the parameter "baseline=True" to the first time you call
self.check_window() in a test to use that as the baseline. This
only makes sense if you're calling self.check_window() more than
once with the same name parameter in the same test.
Automated Visual Testing with self.check_window() is not very
effective for websites that have dynamic content that changes
the layout and structure of web pages. For those, you're much
better off using regular SeleniumBase functional testing.
Example usage:
self.check_window(name="testing", level=0)
self.check_window(name="xkcd_home", level=1)
self.check_window(name="github_page", level=2)
self.check_window(name="wikipedia_page", level=3)
"""
if level == "0":
level = 0
if level == "1":
level = 1
if level == "2":
level = 2
if level == "3":
level = 3
if level != 0 and level != 1 and level != 2 and level != 3:
raise Exception('Parameter "level" must be set to 0, 1, 2, or 3!')
if self.demo_mode:
raise Exception(
"WARNING: Using Demo Mode will break layout tests "
"that use the check_window() method due to custom "
"HTML edits being made on the page!\n"
"Please rerun without using Demo Mode!")
module = self.__class__.__module__
if '.' in module and len(module.split('.')[-1]) > 1:
module = module.split('.')[-1]
test_id = "%s.%s" % (module, self._testMethodName)
if not name or len(name) < 1:
name = "default"
name = str(name)
visual_helper.visual_baseline_folder_setup()
baseline_dir = constants.VisualBaseline.STORAGE_FOLDER
visual_baseline_path = baseline_dir + "/" + test_id + "/" + name
page_url_file = visual_baseline_path + "/page_url.txt"
screenshot_file = visual_baseline_path + "/screenshot.png"
level_1_file = visual_baseline_path + "/tags_level_1.txt"
level_2_file = visual_baseline_path + "/tags_level_2.txt"
level_3_file = visual_baseline_path + "/tags_level_3.txt"
set_baseline = False
if baseline or self.visual_baseline:
set_baseline = True
if not os.path.exists(visual_baseline_path):
set_baseline = True
try:
os.makedirs(visual_baseline_path)
except Exception:
pass # Only reachable during multi-threaded test runs
if not os.path.exists(page_url_file):
set_baseline = True
if not os.path.exists(screenshot_file):
set_baseline = True
if not os.path.exists(level_1_file):
set_baseline = True
if not os.path.exists(level_2_file):
set_baseline = True
if not os.path.exists(level_3_file):
set_baseline = True
page_url = self.get_current_url()
soup = self.get_beautiful_soup()
html_tags = soup.body.find_all()
level_1 = [[tag.name] for tag in html_tags]
level_1 = json.loads(json.dumps(level_1)) # Tuples become lists
level_2 = [[tag.name, sorted(tag.attrs.keys())] for tag in html_tags]
level_2 = json.loads(json.dumps(level_2)) # Tuples become lists
level_3 = [[tag.name, sorted(tag.attrs.items())] for tag in html_tags]
level_3 = json.loads(json.dumps(level_3)) # Tuples become lists
if set_baseline:
self.save_screenshot("screenshot.png", visual_baseline_path)
out_file = codecs.open(page_url_file, "w+")
out_file.writelines(page_url)
out_file.close()
out_file = codecs.open(level_1_file, "w+")
out_file.writelines(json.dumps(level_1))
out_file.close()
out_file = codecs.open(level_2_file, "w+")
out_file.writelines(json.dumps(level_2))
out_file.close()
out_file = codecs.open(level_3_file, "w+")
out_file.writelines(json.dumps(level_3))
out_file.close()
if not set_baseline:
f = open(page_url_file, 'r')
page_url_data = f.read().strip()
f.close()
f = open(level_1_file, 'r')
level_1_data = json.loads(f.read())
f.close()
f = open(level_2_file, 'r')
level_2_data = json.loads(f.read())
f.close()
f = open(level_3_file, 'r')
level_3_data = json.loads(f.read())
f.close()
domain_fail = (
"\nPage Domain Mismatch Failure: "
"Current Page Domain doesn't match the Page Domain of the "
"Baseline! Can't compare two completely different sites! "
"Run with --visual_baseline to reset the baseline!")
level_1_failure = (
"\n*\n*** Exception: <Level 1> Visual Diff Failure:\n"
"* HTML tags don't match the baseline!")
level_2_failure = (
"\n*\n*** Exception: <Level 2> Visual Diff Failure:\n"
"* HTML tag attribute names don't match the baseline!")
level_3_failure = (
"\n*\n*** Exception: <Level 3> Visual Diff Failure:\n"
"* HTML tag attribute values don't match the baseline!")
page_domain = self.get_domain_url(page_url)
page_data_domain = self.get_domain_url(page_url_data)
unittest.TestCase.maxDiff = 1000
if level != 0:
self.assertEqual(page_data_domain, page_domain, domain_fail)
unittest.TestCase.maxDiff = None
if level == 3:
self.__assert_eq(level_3_data, level_3, level_3_failure)
if level == 2:
self.__assert_eq(level_2_data, level_2, level_2_failure)
unittest.TestCase.maxDiff = 1000
if level == 1:
self.__assert_eq(level_1_data, level_1, level_1_failure)
unittest.TestCase.maxDiff = None
if level == 0:
try:
unittest.TestCase.maxDiff = 1000
self.assertEqual(
page_domain, page_data_domain, domain_fail)
unittest.TestCase.maxDiff = None
self.__assert_eq(level_3_data, level_3, level_3_failure)
except Exception as e:
print(e) # Level-0 Dry Run (Only print the differences)
############
def __get_new_timeout(self, timeout):
""" When using --timeout_multiplier=#.# """
try:
timeout_multiplier = float(self.timeout_multiplier)
if timeout_multiplier <= 0.5:
timeout_multiplier = 0.5
timeout = int(math.ceil(timeout_multiplier * timeout))
return timeout
except Exception:
# Wrong data type for timeout_multiplier (expecting int or float)
return timeout
############
def __get_exception_message(self):
""" This method extracts the message from an exception if there
was an exception that occurred during the test, assuming
that the exception was in a try/except block and not thrown. """
exception_info = sys.exc_info()[1]
if hasattr(exception_info, 'msg'):
exc_message = exception_info.msg
elif hasattr(exception_info, 'message'):
exc_message = exception_info.message
else:
exc_message = sys.exc_info()
return exc_message
def __get_improved_exception_message(self):
"""
If Chromedriver is out-of-date, make it clear!
Given the high popularity of the following StackOverflow article:
https://stackoverflow.com/questions/49162667/unknown-error-
call-function-result-missing-value-for-selenium-send-keys-even
... the original error message was not helpful. Tell people directly.
(Only expected when using driver.send_keys() with an old Chromedriver.)
"""
exc_message = self.__get_exception_message()
maybe_using_old_chromedriver = False
if "unknown error: call function result missing" in exc_message:
maybe_using_old_chromedriver = True
if self.browser == 'chrome' and maybe_using_old_chromedriver:
update = ("Your version of ChromeDriver may be out-of-date! "
"Please go to "
"https://sites.google.com/a/chromium.org/chromedriver/ "
"and download the latest version to your system PATH! "
"Or use: ``seleniumbase install chromedriver`` . "
"Original Exception Message: %s" % exc_message)
exc_message = update
return exc_message
def __add_delayed_assert_failure(self):
""" Add a delayed_assert failure into a list for future processing. """
current_url = self.driver.current_url
message = self.__get_exception_message()
self.__delayed_assert_failures.append(
"CHECK #%s: (%s)\n %s" % (
self.__delayed_assert_count, current_url, message))
def delayed_assert_element(self, selector, by=By.CSS_SELECTOR,
timeout=None):
""" A non-terminating assertion for an element on a page.
Failures will be saved until the process_delayed_asserts()
method is called from inside a test, likely at the end of it. """
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__delayed_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_delayed_assert:
timeout = 1
else:
self.__last_url_of_delayed_assert = url
except Exception:
pass
try:
self.wait_for_element_visible(selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_delayed_assert_failure()
return False
def delayed_assert_text(self, text, selector="html", by=By.CSS_SELECTOR,
timeout=None):
""" A non-terminating assertion for text from an element on a page.
Failures will be saved until the process_delayed_asserts()
method is called from inside a test, likely at the end of it. """
if not timeout:
timeout = settings.MINI_TIMEOUT
if self.timeout_multiplier and timeout == settings.MINI_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.__delayed_assert_count += 1
try:
url = self.get_current_url()
if url == self.__last_url_of_delayed_assert:
timeout = 1
else:
self.__last_url_of_delayed_assert = url
except Exception:
pass
try:
self.wait_for_text_visible(text, selector, by=by, timeout=timeout)
return True
except Exception:
self.__add_delayed_assert_failure()
return False
def process_delayed_asserts(self, print_only=False):
""" To be used with any test that uses delayed_asserts, which are
non-terminating verifications that only raise exceptions
after this method is called.
This is useful for pages with multiple elements to be checked when
you want to find as many bugs as possible in a single test run
before having all the exceptions get raised simultaneously.
Might be more useful if this method is called after processing all
the delayed asserts on a single html page so that the failure
screenshot matches the location of the delayed asserts.
If "print_only" is set to True, the exception won't get raised. """
if self.__delayed_assert_failures:
exception_output = ''
exception_output += "\n*** DELAYED ASSERTION FAILURES FOR: "
exception_output += "%s\n" % self.id()
all_failing_checks = self.__delayed_assert_failures
self.__delayed_assert_failures = []
for tb in all_failing_checks:
exception_output += "%s\n" % tb
if print_only:
print(exception_output)
else:
raise Exception(exception_output)
############
def __js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var someLink = document.querySelector('%s');
simulateClick(someLink);"""
% css_selector)
self.execute_script(script)
def __js_click_all(self, selector, by=By.CSS_SELECTOR):
""" Clicks all matching elements using pure JS. (No jQuery) """
selector, by = self.__recalculate_selector(selector, by)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
script = ("""var simulateClick = function (elem) {
var evt = new MouseEvent('click', {
bubbles: true,
cancelable: true,
view: window
});
var canceled = !elem.dispatchEvent(evt);
};
var $elements = document.querySelectorAll('%s');
var index = 0, length = $elements.length;
for(; index < length; index++){
simulateClick($elements[index]);}"""
% css_selector)
self.execute_script(script)
def __jquery_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using jQuery. Different from using pure JS. """
selector, by = self.__recalculate_selector(selector, by)
self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
selector = self.convert_to_css_selector(selector, by=by)
selector = self.__make_css_match_first_element_only(selector)
click_script = """jQuery('%s')[0].click()""" % selector
self.safe_execute_script(click_script)
def __get_href_from_link_text(self, link_text, hard_fail=True):
href = self.get_link_attribute(link_text, "href", hard_fail)
if not href:
return None
if href.startswith('//'):
link = "http:" + href
elif href.startswith('/'):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_link_text(self, link_text, link_css):
""" When a link may be hidden under a dropdown menu, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select('li[class]'):
drop_down_list.append(item)
csstype = link_css.split('[')[1].split('=')[0]
for item in drop_down_list:
item_text_list = item.text.split('\n')
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item['class']:
dropdown_css += '.'
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver, dropdown)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(self.__get_href_from_link_text(
link_text))
return True
page_actions.hover_element_and_click(
self.driver, dropdown, link_text,
click_by=By.LINK_TEXT, timeout=0.12)
return True
except Exception:
pass
return False
def __get_href_from_partial_link_text(self, link_text, hard_fail=True):
href = self.get_partial_link_text_attribute(
link_text, "href", hard_fail)
if not href:
return None
if href.startswith('//'):
link = "http:" + href
elif href.startswith('/'):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
return link
def __click_dropdown_partial_link_text(self, link_text, link_css):
""" When a partial link may be hidden under a dropdown, use this. """
soup = self.get_beautiful_soup()
drop_down_list = []
for item in soup.select('li[class]'):
drop_down_list.append(item)
csstype = link_css.split('[')[1].split('=')[0]
for item in drop_down_list:
item_text_list = item.text.split('\n')
if link_text in item_text_list and csstype in item.decode():
dropdown_css = ""
try:
for css_class in item['class']:
dropdown_css += '.'
dropdown_css += css_class
except Exception:
continue
dropdown_css = item.name + dropdown_css
matching_dropdowns = self.find_visible_elements(dropdown_css)
for dropdown in matching_dropdowns:
# The same class names might be used for multiple dropdowns
if dropdown.is_displayed():
try:
try:
page_actions.hover_element(
self.driver, dropdown)
except Exception:
# If hovering fails, driver is likely outdated
# Time to go directly to the hidden link text
self.open(
self.__get_href_from_partial_link_text(
link_text))
return True
page_actions.hover_element_and_click(
self.driver, dropdown, link_text,
click_by=By.LINK_TEXT, timeout=0.12)
return True
except Exception:
pass
return False
def __recalculate_selector(self, selector, by):
# Try to determine the type of selector automatically
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
if page_utils.is_partial_link_text_selector(selector):
selector = page_utils.get_partial_link_text_from_selector(selector)
by = By.PARTIAL_LINK_TEXT
if page_utils.is_name_selector(selector):
name = page_utils.get_name_from_selector(selector)
selector = '[name="%s"]' % name
by = By.CSS_SELECTOR
return (selector, by)
def __make_css_match_first_element_only(self, selector):
# Only get the first match
return page_utils.make_css_match_first_element_only(selector)
def __demo_mode_pause_if_active(self, tiny=False):
if self.demo_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
if not tiny:
time.sleep(wait_time)
else:
time.sleep(wait_time / 3.4)
elif self.slow_mode:
self.__slow_mode_pause_if_active()
def __slow_mode_pause_if_active(self):
if self.slow_mode:
wait_time = settings.DEFAULT_DEMO_MODE_TIMEOUT
if self.demo_sleep:
wait_time = float(self.demo_sleep)
time.sleep(wait_time)
def __demo_mode_scroll_if_active(self, selector, by):
if self.demo_mode:
self.slow_scroll_to(selector, by=by)
def __demo_mode_highlight_if_active(self, selector, by):
if self.demo_mode:
# Includes self.slow_scroll_to(selector, by=by) by default
self.highlight(selector, by=by)
elif self.slow_mode:
# Just do the slow scroll part of the highlight() method
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
try:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
self.__slow_scroll_to_element(element)
def __scroll_to_element(self, element, selector=None, by=By.CSS_SELECTOR):
success = js_utils.scroll_to_element(self.driver, element)
if not success and selector:
self.wait_for_ready_state_complete()
element = page_actions.wait_for_element_visible(
self.driver, selector, by, timeout=settings.SMALL_TIMEOUT)
self.__demo_mode_pause_if_active(tiny=True)
def __slow_scroll_to_element(self, element):
js_utils.slow_scroll_to_element(self.driver, element, self.browser)
def __highlight_with_assert_success(
self, message, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
try:
selector = self.convert_to_css_selector(selector, by=by)
except Exception:
# Don't highlight if can't convert to CSS_SELECTOR
return
try:
self.__slow_scroll_to_element(element)
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
self.__slow_scroll_to_element(element)
o_bs = '' # original_box_shadow
try:
style = element.get_attribute('style')
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_element_visible(
selector, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT)
style = element.get_attribute('style')
if style:
if 'box-shadow: ' in style:
box_start = style.find('box-shadow: ')
box_end = style.find(';', box_start) + 1
original_box_shadow = style[box_start:box_end]
o_bs = original_box_shadow
if ":contains" not in selector and ":first" not in selector:
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
self.__highlight_with_js_2(message, selector, o_bs)
else:
selector = self.__make_css_match_first_element_only(selector)
selector = re.escape(selector)
selector = self.__escape_quotes_if_needed(selector)
try:
self.__highlight_with_jquery_2(message, selector, o_bs)
except Exception:
pass # JQuery probably couldn't load. Skip highlighting.
time.sleep(0.065)
def __highlight_with_js_2(self, message, selector, o_bs):
js_utils.highlight_with_js_2(
self.driver, message, selector, o_bs, self.message_duration)
def __highlight_with_jquery_2(self, message, selector, o_bs):
js_utils.highlight_with_jquery_2(
self.driver, message, selector, o_bs, self.message_duration)
############
# Deprecated Methods (Replace these if they're still in your code!)
@decorators.deprecated(
"scroll_click() is deprecated. Use self.click() - It scrolls for you!")
def scroll_click(self, selector, by=By.CSS_SELECTOR):
# DEPRECATED - self.click() now scrolls to the element before clicking.
# self.scroll_to(selector, by=by) # Redundant
self.click(selector, by=by)
@decorators.deprecated(
"update_text_value() is deprecated. Use self.update_text() instead!")
def update_text_value(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=None, retry=False):
# DEPRECATED - self.update_text() should be used instead.
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if page_utils.is_xpath_selector(selector):
by = By.XPATH
self.update_text(
selector, new_value, by=by, timeout=timeout, retry=retry)
@decorators.deprecated(
"jquery_update_text_value() is deprecated. Use jquery_update_text()")
def jquery_update_text_value(self, selector, new_value, by=By.CSS_SELECTOR,
timeout=None):
# DEPRECATED - self.jquery_update_text() should be used instead.
if not timeout:
timeout = settings.LARGE_TIMEOUT
if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
self.jquery_update_text(selector, new_value, by=by, timeout=timeout)
@decorators.deprecated(
"jq_format() is deprecated. Use re.escape() instead!")
def jq_format(self, code):
# DEPRECATED - re.escape() already performs the intended action!
return js_utils._jq_format(code)
############
def setUp(self, masterqa_mode=False):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass setUp() method:
super(SubClassOfBaseCase, self).setUp()
"""
self.masterqa_mode = masterqa_mode
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
self.browser = sb_config.browser
self.data = sb_config.data
self.var1 = sb_config.var1
self.var2 = sb_config.var2
self.var3 = sb_config.var3
self.slow_mode = sb_config.slow_mode
self.demo_mode = sb_config.demo_mode
self.demo_sleep = sb_config.demo_sleep
self.highlights = sb_config.highlights
self.time_limit = sb_config.time_limit
self.environment = sb_config.environment
self.env = self.environment # Add a shortened version
self.with_selenium = sb_config.with_selenium # Should be True
self.headless = sb_config.headless
self.headless_active = False
self.headed = sb_config.headed
self.start_page = sb_config.start_page
self.log_path = sb_config.log_path
self.with_testing_base = sb_config.with_testing_base
self.with_basic_test_info = sb_config.with_basic_test_info
self.with_screen_shots = sb_config.with_screen_shots
self.with_page_source = sb_config.with_page_source
self.with_db_reporting = sb_config.with_db_reporting
self.with_s3_logging = sb_config.with_s3_logging
self.servername = sb_config.servername
self.port = sb_config.port
self.proxy_string = sb_config.proxy_string
self.user_agent = sb_config.user_agent
self.mobile_emulator = sb_config.mobile_emulator
self.device_metrics = sb_config.device_metrics
self.cap_file = sb_config.cap_file
self.cap_string = sb_config.cap_string
self.settings_file = sb_config.settings_file
self.database_env = sb_config.database_env
self.message_duration = sb_config.message_duration
self.js_checking_on = sb_config.js_checking_on
self.ad_block_on = sb_config.ad_block_on
self.verify_delay = sb_config.verify_delay
self.disable_csp = sb_config.disable_csp
self.enable_sync = sb_config.enable_sync
self.use_auto_ext = sb_config.use_auto_ext
self.no_sandbox = sb_config.no_sandbox
self.disable_gpu = sb_config.disable_gpu
self.incognito = sb_config.incognito
self.guest_mode = sb_config.guest_mode
self.devtools = sb_config.devtools
self.user_data_dir = sb_config.user_data_dir
self.extension_zip = sb_config.extension_zip
self.extension_dir = sb_config.extension_dir
self.maximize_option = sb_config.maximize_option
self._reuse_session = sb_config.reuse_session
self.save_screenshot_after_test = sb_config.save_screenshot
self.visual_baseline = sb_config.visual_baseline
self.timeout_multiplier = sb_config.timeout_multiplier
self.pytest_html_report = sb_config.pytest_html_report
self.report_on = False
if self.pytest_html_report:
self.report_on = True
self.use_grid = False
if self.servername != "localhost":
# Use Selenium Grid (Use --server="127.0.0.1" for a local Grid)
self.use_grid = True
if self.with_db_reporting:
from seleniumbase.core.application_manager import (
ApplicationManager)
from seleniumbase.core.testcase_manager import (
ExecutionQueryPayload)
import getpass
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.application = None
self.testcase_manager = None
self.error_handled = False
self.testcase_manager = TestcaseManager(self.database_env)
#
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
#
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if self.with_selenium:
data_payload.browser = self.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test_id
application = ApplicationManager.generate_application_string(
self._testMethodName)
data_payload.env = application.split('.')[0]
data_payload.start_time = application.split('.')[1]
data_payload.state = constants.State.NOTRUN
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
if self.headless:
width = settings.HEADLESS_START_WIDTH
height = settings.HEADLESS_START_HEIGHT
try:
# from pyvirtualdisplay import Display # Skip for own lib
from seleniumbase.virtual_display.display import Display
self.display = Display(visible=0, size=(width, height))
self.display.start()
self.headless_active = True
except Exception:
# pyvirtualdisplay might not be necessary anymore because
# Chrome and Firefox now have built-in headless displays
pass
else:
# (Nosetests / Not Pytest)
pass # Setup performed in plugins
# Verify that SeleniumBase is installed successfully
if not hasattr(self, "browser"):
raise Exception("""SeleniumBase plugins DID NOT load!\n\n"""
"""*** Please REINSTALL SeleniumBase using: >\n"""
""" >>> "pip install -r requirements.txt"\n"""
""" >>> "python setup.py install" """)
# Configure the test time limit (if used)
self.set_time_limit(self.time_limit)
# Set the start time for the test (in ms)
sb_config.start_time_ms = int(time.time() * 1000.0)
# Parse the settings file
if self.settings_file:
settings_parser.set_settings(self.settings_file)
# Mobile Emulator device metrics: CSS Width, CSS Height, & Pixel-Ratio
if self.device_metrics:
metrics_string = self.device_metrics
metrics_string = metrics_string.replace(' ', '')
metrics_list = metrics_string.split(',')
exception_string = (
'Invalid input for Mobile Emulator device metrics!\n'
'Expecting a comma-separated string with three\n'
'integer values for Width, Height, and Pixel-Ratio.\n'
'Example: --metrics="411,731,3" ')
if len(metrics_list) != 3:
raise Exception(exception_string)
try:
self.__device_width = int(metrics_list[0])
self.__device_height = int(metrics_list[1])
self.__device_pixel_ratio = int(metrics_list[2])
self.mobile_emulator = True
except Exception:
raise Exception(exception_string)
if self.mobile_emulator:
if not self.user_agent:
# Use the Pixel 3 user agent by default if not specified
self.user_agent = (
"Mozilla/5.0 (Linux; Android 9; Pixel 3 XL) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/76.0.3809.132 Mobile Safari/537.36")
has_url = False
if self._reuse_session:
if not hasattr(sb_config, 'shared_driver'):
sb_config.shared_driver = None
if sb_config.shared_driver:
try:
self._default_driver = sb_config.shared_driver
self.driver = sb_config.shared_driver
self._drivers_list = [sb_config.shared_driver]
url = self.get_current_url()
if len(url) > 3:
has_url = True
except Exception:
pass
if self._reuse_session and sb_config.shared_driver and has_url:
if self.start_page and len(self.start_page) >= 4:
if page_utils.is_valid_url(self.start_page):
self.open(self.start_page)
else:
new_start_page = "http://" + self.start_page
if page_utils.is_valid_url(new_start_page):
self.open(new_start_page)
else:
if self.get_current_url() != "data:,":
self.open("data:,")
else:
# Launch WebDriver for both Pytest and Nosetests
self.driver = self.get_new_driver(browser=self.browser,
headless=self.headless,
servername=self.servername,
port=self.port,
proxy=self.proxy_string,
agent=self.user_agent,
switch_to=True,
cap_file=self.cap_file,
cap_string=self.cap_string,
disable_csp=self.disable_csp,
enable_sync=self.enable_sync,
use_auto_ext=self.use_auto_ext,
no_sandbox=self.no_sandbox,
disable_gpu=self.disable_gpu,
incognito=self.incognito,
guest_mode=self.guest_mode,
devtools=self.devtools,
user_data_dir=self.user_data_dir,
extension_zip=self.extension_zip,
extension_dir=self.extension_dir,
is_mobile=self.mobile_emulator,
d_width=self.__device_width,
d_height=self.__device_height,
d_p_r=self.__device_pixel_ratio)
self._default_driver = self.driver
if self._reuse_session:
sb_config.shared_driver = self.driver
def __set_last_page_screenshot(self):
""" self.__last_page_screenshot is only for pytest html report logs
self.__last_page_screenshot_png is for all screenshot log files """
if not self.__last_page_screenshot and (
not self.__last_page_screenshot_png):
try:
element = self.driver.find_element(
by=By.TAG_NAME, value="body")
if self.is_pytest and self.report_on:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png())
self.__last_page_screenshot = element.screenshot_as_base64
else:
self.__last_page_screenshot_png = element.screenshot_as_png
except Exception:
if not self.__last_page_screenshot:
if self.is_pytest and self.report_on:
try:
self.__last_page_screenshot = (
self.driver.get_screenshot_as_base64())
except Exception:
pass
if not self.__last_page_screenshot_png:
try:
self.__last_page_screenshot_png = (
self.driver.get_screenshot_as_png())
except Exception:
pass
def __set_last_page_url(self):
if not self.__last_page_url:
try:
self.__last_page_url = log_helper.get_last_page(self.driver)
except Exception:
self.__last_page_url = None
def __set_last_page_source(self):
if not self.__last_page_source:
try:
self.__last_page_source = (
log_helper.get_html_source_with_base_href(
self.driver, self.driver.page_source))
except Exception:
self.__last_page_source = None
def __insert_test_result(self, state, err):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err:
import traceback
tb_string = traceback.format_exc()
if "Message: " in tb_string:
data_payload.message = "Message: " + tb_string.split(
"Message: ")[-1]
elif "Exception: " in tb_string:
data_payload.message = tb_string.split("Exception: ")[-1]
elif "Error: " in tb_string:
data_payload.message = tb_string.split("Error: ")[-1]
else:
data_payload.message = "Unknown Error: See Stacktrace"
self.testcase_manager.update_testcase_data(data_payload)
def __add_pytest_html_extra(self):
if not self.__added_pytest_html_extra:
try:
if self.with_selenium:
if not self.__last_page_screenshot:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.report_on:
extra_url = {}
extra_url['name'] = 'URL'
extra_url['format'] = 'url'
extra_url['content'] = self.get_current_url()
extra_url['mime_type'] = None
extra_url['extension'] = None
extra_image = {}
extra_image['name'] = 'Screenshot'
extra_image['format'] = 'image'
extra_image['content'] = self.__last_page_screenshot
extra_image['mime_type'] = 'image/png'
extra_image['extension'] = 'png'
self.__added_pytest_html_extra = True
self._html_report_extra.append(extra_url)
self._html_report_extra.append(extra_image)
except Exception:
pass
def __quit_all_drivers(self):
if self._reuse_session and sb_config.shared_driver:
if len(self._drivers_list) > 0:
sb_config.shared_driver = self._drivers_list[0]
self._default_driver = self._drivers_list[0]
self.switch_to_default_driver()
if len(self._drivers_list) > 1:
self._drivers_list = self._drivers_list[1:]
else:
self._drivers_list = []
# Close all open browser windows
self._drivers_list.reverse() # Last In, First Out
for driver in self._drivers_list:
try:
driver.quit()
except AttributeError:
pass
except Exception:
pass
self.driver = None
self._default_driver = None
self._drivers_list = []
def __has_exception(self):
has_exception = False
if sys.version_info[0] >= 3 and hasattr(self, '_outcome'):
if hasattr(self._outcome, 'errors') and self._outcome.errors:
has_exception = True
else:
has_exception = sys.exc_info()[1] is not None
return has_exception
def __get_test_id(self):
test_id = "%s.%s.%s" % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
return test_id
def __create_log_path_as_needed(self, test_logpath):
if not os.path.exists(test_logpath):
try:
os.makedirs(test_logpath)
except Exception:
pass # Only reachable during multi-threaded runs
def save_teardown_screenshot(self):
""" (Should ONLY be used at the start of custom tearDown() methods.)
This method takes a screenshot of the current web page for a
failing test (or when running your tests with --save-screenshot).
That way your tearDown() method can navigate away from the last
page where the test failed, and still get the correct screenshot
before performing tearDown() steps on other pages. If this method
is not included in your custom tearDown() method, a screenshot
will still be taken after the last step of your tearDown(), where
you should be calling "super(SubClassOfBaseCase, self).tearDown()"
"""
if self.__has_exception() or self.save_screenshot_after_test:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
if self.is_pytest:
self.__add_pytest_html_extra()
def tearDown(self):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass's tearDown():
super(SubClassOfBaseCase, self).tearDown()
"""
self.__slow_mode_pause_if_active()
has_exception = self.__has_exception()
if self.__delayed_assert_failures:
print(
"\nWhen using self.delayed_assert_*() methods in your tests, "
"remember to call self.process_delayed_asserts() afterwards. "
"Now calling in tearDown()...\nFailures Detected:")
if not has_exception:
self.process_delayed_asserts()
else:
self.process_delayed_asserts(print_only=True)
if self.is_pytest:
# pytest-specific code
test_id = self.__get_test_id()
try:
with_selenium = self.with_selenium
except Exception:
sub_class_name = str(
self.__class__.__bases__[0]).split('.')[-1].split("'")[0]
sub_file_name = str(self.__class__.__bases__[0]).split('.')[-2]
sub_file_name = sub_file_name + ".py"
class_name = str(self.__class__).split('.')[-1].split("'")[0]
file_name = str(self.__class__).split('.')[-2] + ".py"
class_name_used = sub_class_name
file_name_used = sub_file_name
if sub_class_name == "BaseCase":
class_name_used = class_name
file_name_used = file_name
fix_setup = "super(%s, self).setUp()" % class_name_used
fix_teardown = "super(%s, self).tearDown()" % class_name_used
message = ("You're overriding SeleniumBase's BaseCase setUp() "
"method with your own setUp() method, which breaks "
"SeleniumBase. You can fix this by going to your "
"%s class located in your %s file and adding the "
"following line of code AT THE BEGINNING of your "
"setUp() method:\n%s\n\nAlso make sure "
"you have added the following line of code AT THE "
"END of your tearDown() method:\n%s\n"
% (class_name_used, file_name_used,
fix_setup, fix_teardown))
raise Exception(message)
if with_selenium:
# Save a screenshot if logging is on when an exception occurs
if has_exception:
self.__add_pytest_html_extra()
if self.with_testing_base and not has_exception and (
self.save_screenshot_after_test):
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
self.__add_pytest_html_extra()
if self.with_testing_base and has_exception:
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if ((not self.with_screen_shots) and (
not self.with_basic_test_info) and (
not self.with_page_source)):
# Log everything if nothing specified (if testing_base)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser,
self.__last_page_url)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source)
else:
if self.with_screen_shots:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
if self.with_basic_test_info:
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser,
self.__last_page_url)
if self.with_page_source:
log_helper.log_page_source(
test_logpath, self.driver,
self.__last_page_source)
# (Pytest) Finally close all open browser windows
self.__quit_all_drivers()
if self.headless:
if self.headless_active:
try:
self.display.stop()
except AttributeError:
pass
except Exception:
pass
self.display = None
if self.with_db_reporting:
if has_exception:
self.__insert_test_result(constants.State.ERROR, True)
else:
self.__insert_test_result(constants.State.PASS, False)
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(
self.execution_guid, runtime)
if self.with_s3_logging and has_exception:
""" If enabled, upload logs to S3 during test exceptions. """
from seleniumbase.core.s3_manager import S3LoggingBucket
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = "%s/%s" % (self.log_path, test_id)
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = "%s/%s/%s" % (guid,
test_id,
logfile.split(path)[-1])
s3_bucket.upload_file(logfile_name,
"%s/%s" % (path, logfile))
uploaded_files.append(logfile_name)
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test_id, guid)
print("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
logging.info(
"\n\n*** Log files uploaded: ***\n%s\n" % index_file)
if self.with_db_reporting:
self.testcase_manager = TestcaseManager(self.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = self.testcase_guid
data_payload.logURL = index_file
self.testcase_manager.update_testcase_log_url(data_payload)
else:
# (Nosetests)
if has_exception:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
log_helper.log_test_failure_data(
self, test_logpath, self.driver, self.browser,
self.__last_page_url)
if len(self._drivers_list) > 0:
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
log_helper.log_page_source(
test_logpath, self.driver, self.__last_page_source)
elif self.save_screenshot_after_test:
test_id = self.__get_test_id()
test_logpath = self.log_path + "/" + test_id
self.__create_log_path_as_needed(test_logpath)
if not self.__last_page_screenshot_png:
self.__set_last_page_screenshot()
self.__set_last_page_url()
self.__set_last_page_source()
log_helper.log_screenshot(
test_logpath,
self.driver,
self.__last_page_screenshot_png)
if self.report_on:
self._last_page_screenshot = self.__last_page_screenshot_png
try:
self._last_page_url = self.get_current_url()
except Exception:
self._last_page_url = "(Error: Unknown URL)"
# Finally close all open browser windows
self.__quit_all_drivers()
|
from __future__ import annotations
import re
from checkov.common.bridgecrew.integration_features.features.policy_metadata_integration import (
integration as metadata_integration,
)
from checkov.common.comment.enum import COMMENT_REGEX
from checkov.common.models.enums import CheckResult
from checkov.common.typing import _CheckResult, _SkippedCheck
def collect_suppressions_for_report(code_lines: list[tuple[int, str]]) -> dict[str, _CheckResult]:
"""Searches for suppressions in a config block to be used in a report"""
suppressions = {}
for _, line in code_lines:
skip_search = re.search(COMMENT_REGEX, line)
if skip_search:
check_result: _CheckResult = {
"result": CheckResult.SKIPPED,
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
}
suppressions[skip_search.group(2)] = check_result
return suppressions
def collect_suppressions_for_context(code_lines: list[tuple[int, int | str]]) -> list[_SkippedCheck]:
"""Searches for suppressions in a config block to be used in a context"""
skipped_checks = []
bc_id_mapping = metadata_integration.bc_to_ckv_id_mapping
for _, line in code_lines:
skip_search = re.search(COMMENT_REGEX, str(line))
if skip_search:
skipped_check: _SkippedCheck = {
"id": skip_search.group(2),
"suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided",
}
# No matter which ID was used to skip, save the pair of IDs in the appropriate fields
if bc_id_mapping and skipped_check["id"] in bc_id_mapping:
skipped_check["bc_id"] = skipped_check["id"]
skipped_check["id"] = bc_id_mapping[skipped_check["id"]]
elif metadata_integration.check_metadata:
skipped_check["bc_id"] = metadata_integration.get_bc_id(skipped_check["id"])
skipped_checks.append(skipped_check)
return skipped_checks
|
import unittest
from MusicTheory.pitch.Accidental import Accidental
import Framework.ConstMeta
"""
Degreeのテスト。
"""
class TestAccidental(unittest.TestCase):
def test_Accidentals(self):
self.assertEqual(Accidental.Accidentals, {'♯': 1, '#': 1, '+': 1, '♭': -1, 'b': -1, '-': -1})
def test_Accidentals_NotSet(self):
with self.assertRaises(Framework.ConstMeta.ConstMeta.ConstError) as e:
Accidental.Accidentals = 'some value.'
self.assertEqual('readonly。再代入禁止です。', str(e.exception))
def test_Get(self):
for count in range(1, 4):
for name, interval in Accidental.Accidentals.items():
if not name: continue
with self.subTest(accidenta=name, count=count):
self.assertEqual(Accidental.Get(name * count), interval * count)
def test_Get_None(self): self.assertEqual(Accidental.Get(None), 0)
def test_Get_Blank(self): self.assertEqual(Accidental.Get(''), 0)
def test_Get_int(self):
with self.assertRaises(TypeError) as e:
Accidental.Get(100)
self.assertIn('引数accidentalは文字列型にしてください。', str(e.exception))
def test_Get_NotSameChars(self):
with self.assertRaises(ValueError) as e:
Accidental.Get('無効な文字')
self.assertIn('引数accidentalは同じ文字のみ連続使用を許されます。異なる文字を混在させることはできません。', str(e.exception))
def test_Get_Invalid(self):
with self.assertRaises(ValueError) as e:
Accidental.Get('無無無')
self.assertIn('引数accidentalに使える文字は次のものだけです。', str(e.exception))
def test_Get_Valid_NotSameChars(self):
with self.assertRaises(ValueError) as e:
Accidental.Get('+-')
self.assertIn('引数accidentalは同じ文字のみ連続使用を許されます。異なる文字を混在させることはできません。', str(e.exception))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
def map_reg_to_text(reg_code):
reg_dict = ("rip", "rsp", "rax", "rbx", "rcx", "rdx", "cs", "ss", "eflags", "rbp", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "rsi", "rdi", "orig_rax", "fs_base", "gs_base",
"ds", "es", "fs", "gs")
return reg_dict[reg_code]
class ucXception_fi_parser:
def parse(self, inj_time, reg, bit, chosen_thread, stdout, stderr):
row = {}
row["inj_time"] = inj_time
row["reg"] = map_reg_to_text(reg)
row["reg_d"] = np.int32(reg)
row["bit"] = np.int32(bit)
row["pid"] = np.int32(chosen_thread)
# Get the values of old and new registers
prefix = "none"
for line in stdout.split("\n")[:-1]:
if "Old register values" in line:
prefix="old_"
elif "New register values" in line:
prefix="new_"
else:
(reg_name, reg_val) = line.split(": ")
reg_name = reg_name.rstrip().lower()
reg_val = "0x%s" % reg_val.rstrip()
#print reg_name, reg_val, type(reg_val)
row[prefix + reg_name] = reg_val
# We also add the register value in decimal
row[prefix + reg_name + "_d"] = np.int(reg_val, 16) # np.int64 gives a strange exception--- (numpy bug?)
return row
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azext_databricks._help import helps # pylint: disable=unused-import
class DatabricksClientCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azext_databricks._client_factory import cf_databricks
databricks_custom = CliCommandType(
operations_tmpl='azext_databricks.custom#{}',
client_factory=cf_databricks)
super().__init__(cli_ctx=cli_ctx,
custom_command_type=databricks_custom)
def load_command_table(self, args):
from azext_databricks.commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azext_databricks._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = DatabricksClientCommandsLoader
|
from django.shortcuts import render,HttpResponse,redirect
from .models import books
from django.contrib.auth.models import User
from django.contrib.auth import login,logout,authenticate
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from . forms import sellbookform, ContactForm
from .models import Order,TrackUpdate
from django.core.mail import send_mail
def loginsignup(request):
return render(request,'home/loginlink.html')
def home(request):
allProds = []
book = books.objects.all()
categories = books.objects.values('category')
ca = {item['category'] for item in categories}
cats = list(ca)
for cat in cats:
prod = books.objects.filter(category = cat)
allProds.append([prod,range(len(prod))])
params = {'books':book, 'cats':cats, 'allProds':allProds}
return render(request,'home/home.html',params)
def handleSignup(request):
if request.method =='POST':
username = request.POST['username']
email = request.POST['signupemail']
fname = request.POST['fname']
lname = request.POST['lname']
pass1 = request.POST['pass1']
pass2 = request.POST['pass2']
if len(username) > 25:
messages.error(request, "User name must be under 25 Characters")
return redirect('/')
if pass1 != pass2:
messages.error(request, "Password do not match")
return redirect('/')
myuser = User.objects.create_user(username=username,email=email,password=pass2)
myuser.first_name = fname
myuser.last_name = lname
myuser.save()
messages.success(request,'Your account has been created Successfully ')
return redirect('/')
else:
return HttpResponse('NOT ALLOWED')
def handleLogin(request):
loginusername = request.POST['loginusername']
loginpass = request.POST['loginpass']
user = authenticate(username=loginusername,password=loginpass)
if user is not None:
login(request,user)
messages.success(request,"Successfully Logged In ")
return redirect('/')
else:
messages.error(request,"Please Enter the username or password correctly!")
return redirect('/')
@login_required(login_url='/loginsignup')
def handleLogout(request):
logout(request)
messages.success(request,"Successfully logged out")
return redirect('/')
@login_required(login_url='/loginsignup')
def sellbook(request):
context ={'form': sellbookform()}
return render(request, "home/sellbook.html", context)
@login_required(login_url='/loginsignup')
def savebook(request):
sellername = request.user.username
book_name = request.POST.get('book_name')
category = request.POST.get('category')
price = request.POST.get('price')
image = request.FILES['image']
pickuplocation = request.POST.get('pickuplocation')
slug = book_name.replace(" ", "+") + "+by+" + str(sellername)
newbook = books.objects.create(sellername=sellername, book_name = book_name, category = category, price= price,image= image,pickuplocation = pickuplocation, slug= slug)
try:
newbook.save()
messages.success(request,'Your post has been added successfully, Thank you for your great effort.')
except:
messages.error(request,"Sorry! unable to Process..")
return redirect('/')
@login_required(login_url='/loginsignup')
def checkout(request):
if request.method=="POST":
items_json = request.POST.get('itemsJson', '')
name = request.POST.get('name', '')
email = request.POST.get('email', '')
address = request.POST.get('address1', '') + " " + request.POST.get('address2', '')
city = request.POST.get('city', '')
state = request.POST.get('state', '')
zip_code = request.POST.get('zip_code', '')
phone = request.POST.get('phone', '')
order = Order(items_json=items_json, name=name, email=email, address=address, city=city,
state=state, zip_code=zip_code, phone=phone)
order.save()
updateorder = TrackUpdate(order_id=order.order_id,update="Your Order Is Placed")
updateorder.save()
thank = True
id = order.order_id
return render(request, 'home/checkout.html', {'thank':thank, 'id': id})
return render(request, 'home/checkout.html')
def TrackOrder(request):
if request.method == "POST":
order_id = request.POST.get('order_id')
updates = TrackUpdate.objects.filter(order_id=order_id)
context = {'updates':updates}
return render(request,'home/updatepage.html',context)
return render(request,'home/trackorder.html')
def search(request):
searchquery = request.GET['search']
if len(searchquery)>200:
allposts = Post.objects.none()
if len(searchquery)<4:
allposts = Post.objects.none()
messages.error(request,'Please enter more than 4 characters')
redirect('/')
else:
allpoststitle = books.objects.filter(book_name__icontains=searchquery)
allpostscontent = books.objects.filter(category__icontains=searchquery)
allposts = allpoststitle.union(allpostscontent)
context = {'allposts':allposts,'search':searchquery}
return render(request,'home/search.html',context)
return render(request,'home/search.html')
#def contact(request):
# if request.method == "POST":
# form_name=request.POST['from_name']
# form_lastname=request.POST['from_lastname']
# form_email=request.POST['from_email']
# form_phone=request.POST['from_phone']
# form_message=request.POST['from_message']
#
# #send an Email
# send_mail(
# form_name,#subject
# form_message,#message
# form_email,#email from
# ['gkaur2_be19@thapar.edu'],#email to
# )
#
# return render(request, 'home/contact.html', {'form_name':form_name})
# else:
# return render(request, 'home/contact.html', {})
def contact(request):
name=''
email=''
comment=''
form= ContactForm(request.POST or None)
if form.is_valid():
name= form.cleaned_data.get("name")
email= form.cleaned_data.get("email")
comment=form.cleaned_data.get("comment")
subject= "A Visitor's Comment"
comment= name + " with the email, " + email + ", sent the following message:\n\n" + comment;
send_mail(subject, comment, email , ['gkaur2_be19@thapar.edu'])
context= {'name': name}
return render(request, 'home/contact.html', context)
else:
context= {'form': form}
return render(request, 'home/contact.html', context)
|
import datetime
import json
import os
import sys
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from utils.flags import FLAGS
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from model.vqa_model import ModelParams, VQAModel
import demo.demo_dataset as dataset
import demo.visualize as visualize
class Inference:
def __init__(self):
self.model = self._load_model()
self.demo_data = dataset.VQAFeatureDataset()
def _get_answer(self, p, dataloader):
_m, idx = p.max(1)
return dataloader.dataset.label2ans[idx.item()]
def _load_model(self):
data_params = json.load(open(FLAGS.data_params_path))
model_params = ModelParams(
add_self_attention=FLAGS.add_self_attention,
fusion_method=FLAGS.fusion_method,
question_sequence_length=dataset.MAX_QUES_SEQ_LEN,
number_of_objects=dataset.NO_OBJECTS,
word_embedding_dimension=data_params["word_feat_dimension"],
object_embedding_dimension=data_params["image_feat_dimension"],
vocabulary_size=data_params["vocabulary_size"],
num_ans_candidates=data_params["number_of_answer_candidiates"],
)
model = VQAModel(
glove_path=FLAGS.glove_path,
model_params=model_params,
hidden_dimension=FLAGS.hidden_dimension,
).cuda()
FLAGS.snapshot_path = (
"/home/rachana/Documents/vizwiz/save_folder/self_cross_3/final"
)
model_path = FLAGS.snapshot_path
print("loading %s" % model_path)
model_data = torch.load(model_path)
model = nn.DataParallel(model).cuda()
model.load_state_dict(model_data.get("model_state", model_data))
model.train(False)
return model
def get_prediction(self, image_id, question, batch_size=1):
self.demo_data.set_input(image_id, question)
demo_data_loader = DataLoader(
self.demo_data,
batch_size,
shuffle=False,
num_workers=1,
)
visual_feature, bboxes, question = iter(demo_data_loader).next()
visual_feature = Variable(visual_feature).cuda()
bboxes = Variable(bboxes).cuda()
question = Variable(question).cuda()
pred, i_att, q_att = self.model(visual_feature, question)
answer = self._get_answer(pred.data, demo_data_loader)
return (
answer,
i_att,
q_att,
bboxes,
)
|
# This program uses a pie chart to display the percentages of the overall grade represented by a project's,
# quizzes, the midterm exam, and the final exam, 20 percent of the grade and its value is displayed in red,
# quizzes are 10 percent and are displayed in blue, the midterm exam is 30 percent and is displayed in green and
# the final exam is 40 percent and is displayed in orange.
from tkinter import * # Import tkinter
import math
radius = 100
width = 300
height = 300
class PieChart:
def __init__(self):
window = Tk() # Create a window
window.title("Pie Chart") # Set a title
self.canvas = Canvas(window, bg="white", width=width, height=height)
self.canvas.pack()
self.drawAPie(0, 360 * 0.2, "red", "Project -- 20%")
self.drawAPie(360 * 0.2, 360 * 0.1, "blue", "Quizzes -- 10%")
self.drawAPie(360 * 0.2 + 360 * 0.1, 360 * 0.3, "green", "Midterm -- 30%")
self.drawAPie(360 * 0.2 + 360 * 0.1 + 360 * 0.3, 360 * 0.4, "orange", "Final -- 40%")
window.mainloop() # Create an event loop
def drawAPie(self, start, extent, color, title):
self.canvas.create_arc(width / 2 - radius, height / 2 - radius,
width / 2 + radius, height / 2 + radius,
start=start, extent=extent, fill=color)
x = width / 2 + radius * math.cos(math.radians(extent / 2 + start))
y = height / 2 - radius * math.sin(math.radians(extent / 2 + start))
self.canvas.create_text(x, y, text=title)
PieChart()
|
import uuid
from typing import Optional, Union
from datetime import datetime, timedelta
from fastapi import Request, HTTPException, status, Depends
from fastapi.security import OAuth2
from fastapi.openapi.models import OAuthFlows as OAuthFlowsModel
from fastapi.security.utils import get_authorization_scheme_param
from jose import JWTError, jwt
from . import schema, db
from .config import settings
class OAuth2PasswordBearerWithCookie(OAuth2):
def __init__(
self,
tokenUrl: str,
scheme_name: str = None,
scopes: dict = None,
auto_error: bool = True,
):
if not scopes:
scopes = {}
flows = OAuthFlowsModel(password={"tokenUrl": tokenUrl, "scopes": scopes})
super().__init__(flows=flows, scheme_name=scheme_name, auto_error=auto_error)
async def __call__(self, request: Request) -> Optional[str]:
authorization: Optional[str] = request.cookies.get("access_token")
if not authorization:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="No cookie",
headers={"WWW-Authenticate": "Bearer"},
)
scheme, param = get_authorization_scheme_param(authorization)
if scheme.lower() != "bearer":
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
return param
oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="/api/users/login")
async def authenticate_user(username: str, password: str) -> Optional[schema.User]:
user = await db.get_user(username)
if not user:
return None
if user.password_hash is None or password != user.password_hash:
return None
return user
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, settings.JWT_SECRET_KEY, algorithm=settings.JWT_ALGORITHM)
return encoded_jwt
def create_user_token(user: schema.User):
access_token_expires = timedelta(minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"user_id": str(user.user_id)}, expires_delta=access_token_expires)
return access_token
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
user_id: Union[str, uuid.UUID, None] = None
try:
payload = jwt.decode(token, settings.JWT_SECRET_KEY, algorithms=[settings.JWT_ALGORITHM]) # no "alg:none"
user_id = payload.get("user_id")
if user_id is None:
raise credentials_exception
except JWTError:
raise credentials_exception
user_id = uuid.UUID(user_id)
user = await db.get_user_uuid(uuid=user_id)
if user is None:
raise credentials_exception
return user
|
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def branchSums(root, sum=0, sumsList=None):
if root is None:
return
if sumsList is None: #refer https://stackoverflow.com/a/60202340/6699913
sumsList = []
sum += root.value
if root.left is None and root.right is None:
sumsList.append(sum)
branchSums(root.left, sum, sumsList)
branchSums(root.right, sum, sumsList)
return sumsList
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from copy import deepcopy
import nnunet
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.utils import create_lists_from_splitted_dataset
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
from nnunet.preprocessing.cropping import get_case_identifier_from_npz
from nnunet.training.model_restore import recursive_find_python_class
class ExperimentPlanner(object):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
self.folder_with_cropped_data = folder_with_cropped_data
self.preprocessed_output_folder = preprocessed_output_folder
self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, ".npz", True)
self.preprocessor_name = "GenericPreprocessor"
assert isfile(join(self.folder_with_cropped_data, "dataset_properties.pkl")), \
"folder_with_cropped_data must contain dataset_properties.pkl"
self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, "dataset_properties.pkl"))
self.plans_per_stage = OrderedDict()
self.plans = OrderedDict()
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixed_plans_3D.pkl")
self.data_identifier = default_data_identifier
self.transpose_forward = [0, 1, 2]
self.transpose_backward = [0, 1, 2]
self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D
self.unet_max_num_filters = 320
self.unet_max_numpool = 999
self.unet_min_batch_size = 2
self.unet_featuremap_min_edge_length = 4
self.target_spacing_percentile = 50
self.anisotropy_threshold = 3
self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient
self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more
# than 5% of the entire dataset
self.conv_per_stage = 2
def get_target_spacing(self):
spacings = self.dataset_properties['all_spacings']
# target = np.median(np.vstack(spacings), 0)
# if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing
# uncomment after mystery task submission
"""worst_spacing_axis = np.argmax(target)
if max(target) > (2.5 * min(target)):
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5)
target[worst_spacing_axis] = target_spacing_of_that_axis"""
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
return target
def save_my_plans(self):
with open(self.plans_fname, 'wb') as f:
pickle.dump(self.plans, f)
def load_my_plans(self):
self.plans = load_pickle(self.plans_fname)
self.plans_per_stage = self.plans['plans_per_stage']
self.dataset_properties = self.plans['dataset_properties']
self.transpose_forward = self.plans['transpose_forward']
self.transpose_backward = self.plans['transpose_backward']
def determine_postprocessing(self):
pass
"""
Spoiler: This is unused, postprocessing was removed. Ignore it.
:return:
print("determining postprocessing...")
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()]
all_region_keys = list(set(all_region_keys))
only_keep_largest_connected_component = OrderedDict()
for r in all_region_keys:
all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()]
only_keep_largest_connected_component[tuple(r)] = all(all_results)
print("Postprocessing: only_keep_largest_connected_component", only_keep_largest_connected_component)
all_classes = self.dataset_properties['all_classes']
classes = [i for i in all_classes if i > 0]
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
min_size_per_class = OrderedDict()
for c in classes:
all_num_voxels = []
for k in props_per_patient.keys():
all_num_voxels.append(props_per_patient[k]['volume_per_class'][c])
if len(all_num_voxels) > 0:
min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR
else:
min_size_per_class[c] = np.inf
min_region_size_per_class = OrderedDict()
for c in classes:
region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]]
if len(region_sizes) > 0:
min_region_size_per_class[c] = min(region_sizes)
# we don't need that line but better safe than sorry, right?
min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c])
else:
min_region_size_per_class[c] = 0
print("Postprocessing: min_size_per_class", min_size_per_class)
print("Postprocessing: min_region_size_per_class", min_region_size_per_class)
return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class
"""
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is
opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that
for some organ of interest the acquisition method will most likely be chosen such that the field of view and
voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated
for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I
will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to
try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)
The patches created here attempt keep the aspect ratio of the new_median_shape
:param current_spacing:
:param original_spacing:
:param original_shape:
:param num_cases:
:return:
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
# print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = max(1, min(batch_size, max_batch_size))
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = list()
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
print("generating configuration for 3d_fullres")
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1))
# thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)
# if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
# architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:
architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)
if np.prod(median_shape) / architecture_input_voxels_here < \
self.how_much_of_a_patient_must_the_network_see_at_stage0:
more = False
else:
more = True
if more:
print("generating configuration for 3d_lowres")
# if we are doing more than one stage then we want the lowest stage to have exactly
# HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the
# median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by
# default). Problem is that we are downsampling higher resolution axes before we start downsampling the
# out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here
# we do it the dumb way
lowres_stage_spacing = deepcopy(target_spacing)
num_voxels = np.prod(median_shape, dtype=np.float64)
while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here:
max_spacing = max(lowres_stage_spacing)
if np.any((max_spacing / lowres_stage_spacing) > 2):
lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \
*= 1.01
else:
lowres_stage_spacing *= 1.01
num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.float64)
lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward]
new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1)
architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64)
if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod(
self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64):
self.plans_per_stage.append(new)
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
print(self.plans_per_stage)
print("transpose forward", self.transpose_forward)
print("transpose backward", self.transpose_backward)
normalization_schemes = self.determine_normalization_scheme()
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# removed training data based postprocessing. This is deprecated
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
'conv_per_stage': self.conv_per_stage,
}
self.plans = plans
self.save_my_plans()
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if modalities[i] == "CT" or modalities[i] == 'ct':
schemes[i] = "CT"
elif modalities[i] == 'noNorm':
schemes[i] = "noNorm"
else:
schemes[i] = "nonCT"
return schemes
def save_properties_of_cropped(self, case_identifier, properties):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def load_properties_of_cropped(self, case_identifier):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def determine_whether_to_use_mask_for_norm(self):
# only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in
# image size (this is an indication that the data is something like brats/isles and then we want to
# normalize in the brain region only)
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
use_nonzero_mask_for_norm = OrderedDict()
for i in range(num_modalities):
if "CT" in modalities[i]:
use_nonzero_mask_for_norm[i] = False
else:
all_size_reductions = []
for k in self.dataset_properties['size_reductions'].keys():
all_size_reductions.append(self.dataset_properties['size_reductions'][k])
if np.median(all_size_reductions) < 3 / 4.:
print("using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = True
else:
print("not using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = False
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm
self.save_properties_of_cropped(case_identifier, properties)
use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm
return use_nonzero_mask_for_normalization
def write_normalization_scheme_to_patients(self):
"""
This is used for test set preprocessing
:return:
"""
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm']
self.save_properties_of_cropped(case_identifier, properties)
def run_preprocessing(self, num_threads):
if os.path.isdir(join(self.preprocessed_output_folder, "gt_segmentations")):
shutil.rmtree(join(self.preprocessed_output_folder, "gt_segmentations"))
shutil.copytree(join(self.folder_with_cropped_data, "gt_segmentations"),
join(self.preprocessed_output_folder, "gt_segmentations"))
normalization_schemes = self.plans['normalization_schemes']
use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm']
intensityproperties = self.plans['dataset_properties']['intensityproperties']
preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")],
self.preprocessor_name, current_module="nnunet.preprocessing")
assert preprocessor_class is not None
preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization,
self.transpose_forward,
intensityproperties)
target_spacings = [i["current_spacing"] for i in self.plans_per_stage.values()]
if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)):
num_threads = (default_num_threads, num_threads)
elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)):
num_threads = num_threads[-1]
preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder,
self.plans['data_identifier'], num_threads)
def maybe_mkdir_p(directory):
directory = os.path.abspath(directory)
splits = directory.split("\\")[1:]
base = directory.split('\\')[0]
for i in range(0, len(splits)):
if not os.path.isdir(join(base, join("\\", *splits[:i+1]))):
try:
os.mkdir(join(base, join("\\", *splits[:i+1])))
except FileExistsError:
# this can sometimes happen when two jobs try to create the same directory at the same time,
# especially on network drives.
print("WARNING: Folder %s already existed and does not need to be created" % directory)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_ids", nargs="+", help="list of int")
parser.add_argument("-p", action="store_true", help="set this if you actually want to run the preprocessing. If "
"this is not set then this script will only create the plans file")
parser.add_argument("-tl", type=int, required=False, default=8, help="num_threads_lowres")
parser.add_argument("-tf", type=int, required=False, default=8, help="num_threads_fullres")
args = parser.parse_args()
task_ids = args.task_ids
run_preprocessing = args.p
tl = args.tl
tf = args.tf
tasks = []
for i in task_ids:
i = int(i)
candidates = subdirs(nnUNet_cropped_data, prefix="Task%03.0d" % i, join=False)
assert len(candidates) == 1
tasks.append(candidates[0])
for t in tasks:
try:
print("\n\n\n", t)
cropped_out_dir = os.path.join(nnUNet_cropped_data, t)
preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)
splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)
lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False)
_ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner
maybe_mkdir_p(preprocessing_output_dir_this_task)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task)
shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task)
threads = (tl, tf)
print("number of threads: ", threads, "\n")
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if run_preprocessing:
exp_planner.run_preprocessing(threads)
except Exception as e:
print(e)
|
"""
dataset.py
"""
from six import (
iteritems,
with_metaclass,
)
from zipline.modelling.term import Term
class Column(object):
"""
An abstract column of data, not yet associated with a dataset.
"""
def __init__(self, dtype):
self.dtype = dtype
def bind(self, dataset, name):
"""
Bind a column to a concrete dataset.
"""
return BoundColumn(dtype=self.dtype, dataset=dataset, name=name)
class BoundColumn(Term):
"""
A Column of data that's been concretely bound to a particular dataset.
"""
def __new__(cls, dtype, dataset, name):
return super(BoundColumn, cls).__new__(
cls,
inputs=(),
window_length=0,
domain=dataset.domain,
dtype=dtype,
dataset=dataset,
name=name,
)
def _init(self, dataset, name, *args, **kwargs):
self._dataset = dataset
self._name = name
return super(BoundColumn, self)._init(*args, **kwargs)
@classmethod
def static_identity(cls, dataset, name, *args, **kwargs):
return (
super(BoundColumn, cls).static_identity(*args, **kwargs),
dataset,
name,
)
@property
def dataset(self):
return self._dataset
@property
def name(self):
return self._name
@property
def qualname(self):
"""
Fully qualified of this column.
"""
return '.'.join([self.dataset.__name__, self.name])
def __repr__(self):
return "{qualname}::{dtype}".format(
qualname=self.qualname,
dtype=self.dtype.__name__,
)
class DataSetMeta(type):
"""
Metaclass for DataSets
Supplies name and dataset information to Column attributes.
"""
def __new__(mcls, name, bases, dict_):
newtype = type.__new__(mcls, name, bases, dict_)
_columns = []
for maybe_colname, maybe_column in iteritems(dict_):
if isinstance(maybe_column, Column):
bound_column = maybe_column.bind(newtype, maybe_colname)
setattr(newtype, maybe_colname, bound_column)
_columns.append(bound_column)
newtype._columns = _columns
return newtype
@property
def columns(self):
return self._columns
class DataSet(with_metaclass(DataSetMeta)):
domain = None
|
# -*- coding: utf-8 -*-
__author__ = 'zhnlk'
import os
from setuptools import setup
import cores
def getSubpackages(name):
"""获取该模块下所有的子模块名称"""
splist = []
for dirpath, _dirnames, _filenames in os.walk(name):
if os.path.isfile(os.path.join(dirpath, '__init__.py')):
splist.append(".".join(dirpath.split(os.sep)))
return splist
setup(
name='nbcrawler',
version=cores.__version__,
author=cores.__author__,
author_email='tomleader0828@gmail.com',
license='MIT',
url='http://github.com/zhnlk/nbcrawler',
description='A crawler framework for NewBanker',
long_description=__doc__,
keywords='crawler newbanker spider distribute ',
classifiers=['Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License'],
packages=getSubpackages('vnpy'),
)
|
# sping:: pyart
from pidPyart import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.