text
stringlengths 2
999k
|
|---|
# This is an example of firing up PyMOL inside of a subordinate
# process via an "import pymol"
#
# NOTE: for this to work, PyMOL must be installed in a
# Python-dependent fashion (e.g. pymol-0_98-bin-win32-py23) etc.
#
# WARNING: stability issues have been known to occur with this
# approach, so anticipate problems...take-down is messy.
#
# WARNING: Right now, there is no way for the main process to know
# when PyMOL is actually initialized and ready to go, so we simply
# sleep a second after importing.
import string
import __main__
# note that passing in a "-z" option would keep the window hidden
# until you called pymol.cmd.window("show").
__main__.pymol_argv= string.split("pymol -qxiF -X 300 -Y 100 -H 400 -W 400")
import pymol
# give PyMOL enough time to initialize (we need to find a safe and
# robust alternative to this stupid delay especially since the
# pymol.finish_launching() method now seems to be broken)
import time
time.sleep(1)
# put up some content
if 1:
pymol.cmd.set("sweep_mode",3)
pymol.cmd.rock()
pymol.cmd.turn("x",180)
pymol.cmd.load("$TUT/1hpv.pdb")
pymol.preset.pretty("1hpv")
pymol.cmd.orient()
pymol.cmd.turn("y",85)
pymol.cmd.zoom("all",20)
pymol.cmd.orient("organic & e. N+O",animate=10)
pymol.cmd.show("sticks","organic")
# play peek-a-boo with the window
if 1:
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
time.sleep(5)
pymol.cmd.window("hide")
print("Peek-a-boo!")
time.sleep(1)
pymol.cmd.window("show")
# now quit
print("Quitting...")
time.sleep(1)
print("3...")
time.sleep(1)
print("2...")
time.sleep(1)
print("1...")
time.sleep(1)
print("Die!")
# note, we cannot let the main thread terminate without first calling
# pymol.cmd.quit() which will take-down PyMOL
pymol.cmd.quit()
|
import unittest
def linear_sum(S, n):
"""Return the sum of the first n numbers of sequence S."""
if n == 0:
return 0
else:
return linear_sum(S, n - 1) + S[n - 1]
class TestLinearSum(unittest.TestCase):
def test_linear_sum(self):
S = [4, 3, 6, 2, 8]
self.assertEqual(23, linear_sum(S, 5))
if __name__ == '__main__':
unittest.main()
|
from django.urls import path
from . import views
#here are our app-connections.(these connection just affect to our app, not at entire system)
#each connection going us to a view functionality
#these connections needs to be connect with url root, because that's where the requests come from
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/result/', views.ResultView.as_view(), name='result'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
"""
Menu handling file
- Every menu is of the Menu class
- Menus are initialized with an array of options
- What a menu option does is determined by the following table:
- "set_state_map": s.set_state('map')
- "exit": exit()
"""
from config import *
import sys
class Menu:
def __init__(self, options, sel_index, results):
self.options = options # Array of strings
self.results = results # Array of strings
self._sel_index = sel_index
self.first_print = True
@property
def sel_index(self):
return self._sel_index
@sel_index.setter
def sel_index(self, value):
length = len(self.options)
if value > length:
self._sel_index = 1
elif value < 1:
self._sel_index = length
else:
self._sel_index = value
@sel_index.deleter
def sel_index(self):
del self._sel_index
def print_menu_center(self):
if not self.first_print:
print(t.move_up(len(self.options) + 1))
for _ in range(len(self.options) + 1):
print(t.clear_eol)
print(t.move_up(len(self.options) + 2))
count = 1
for option in self.options:
if self.sel_index == count:
print(t.center("> " + str(count) + ". " + option))
else:
print(t.center(str(count) + ". " + option))
count += 1
self.first_print = False
# Prints a menu at cursor where x and y is the top left of the menu
# Specifically meant for use in the 'battle' state
def battle_menu(self):
output = []
count = 1
for option in self.options:
if self.sel_index == count:
output.append("> " + str(count) + ". " + option)
else:
output.append(str(count) + ". " + option)
count += 1
return output
def decision(self):
choice = self.results[(self.sel_index-1)]
if choice == "set_state_map":
s.set_state('map')
elif choice == "exit":
sys.exit()
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
|
from enum import Enum
class GameScenes(Enum):
FIRST_LEVEL = 1
SECOND_LEVEL = 2
THIRD_LEVEL = 3
FOURTH_LEVEL = 4
FIFTH_LEVEL = 5
|
"""
login app
"""
from zoom.apps import App
class MyApp(App):
pass
app = MyApp()
|
from cohere import Diffractometer
class Diffractometer_34idc(Diffractometer):
"""
Subclass of Diffractometer. Encapsulates "34idc" diffractometer.
"""
name = "34idc"
sampleaxes = ('y+', 'z-', 'y+') # in xrayutilities notation
detectoraxes = ('y+', 'x-')
incidentaxis = (0, 0, 1)
sampleaxes_name = ('th', 'chi', 'phi') # using the spec mnemonics for scan id.
detectoraxes_name = ('delta', 'gamma')
def __init__(self):
super(Diffractometer_34idc, self).__init__('34idc')
def create_diffractometer(diff_name):
if diff_name == '34idc':
return Diffractometer_34idc()
else:
print ('diffractometer ' + diff_name + ' not defined.')
def verify_diffractometer(diff_name):
if diff_name == '34idc':
return True
else:
return False
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CheckDomainSunriseClaimRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'CheckDomainSunriseClaim')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
|
# -*- coding: utf-8 -*-
"""
Statement pre-processors.
"""
def clean_whitespace(chatbot, statement):
"""
Remove any consecutive whitespace characters from the statement text.
"""
import re
# Replace linebreaks and tabs with spaces
statement.text = statement.text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Remove any leeding or trailing whitespace
statement.text = statement.text.strip()
# Remove consecutive spaces
statement.text = re.sub(' +', ' ', statement.text)
return statement
def unescape_html(chatbot, statement):
"""
Convert escaped html characters into unescaped html characters.
For example: "<b>" becomes "<b>".
"""
import sys
# Replace HTML escape characters
if sys.version_info[0] < 3:
from HTMLParser import HTMLParser
html = HTMLParser()
else:
import html
statement.text = html.unescape(statement.text)
return statement
def convert_to_ascii(chatbot, statement):
"""
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
"""
import unicodedata
import sys
# Normalize unicode characters
if sys.version_info[0] < 3:
statement.text = unicode(statement.text) # NOQA
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class NaluWind(CMakePackage):
"""Nalu-Wind: Wind energy focused variant of Nalu."""
homepage = "https://github.com/exawind/nalu-wind"
git = "https://github.com/exawind/nalu-wind.git"
maintainers = ['jrood-nrel']
tags = ['ecp', 'ecp-apps']
version('master', branch='master')
# Options
variant('shared', default=(sys.platform != 'darwin'),
description='Build dependencies as shared libraries')
variant('pic', default=True,
description='Position independent code')
# Third party libraries
variant('cuda', default=False,
description='Compile with CUDA support')
variant('openfast', default=False,
description='Compile with OpenFAST support')
variant('tioga', default=False,
description='Compile with Tioga support')
variant('hypre', default=False,
description='Compile with Hypre support')
variant('catalyst', default=False,
description='Compile with Catalyst support')
variant('fftw', default=False,
description='Compile with FFTW support')
# Required dependencies
depends_on('mpi')
depends_on('yaml-cpp@0.5.3:', when='+shared')
depends_on('yaml-cpp~shared@0.5.3:', when='~shared')
# Cannot build Trilinos as a shared library with STK on Darwin
# which is why we have a 'shared' variant for Nalu-Wind
# https://github.com/trilinos/Trilinos/issues/2994
depends_on('trilinos+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+shared')
depends_on('trilinos~shared+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='~shared')
depends_on('trilinos~shared+cuda+exodus+tpetra+muelu+belos+ifpack2+amesos2+zoltan+stk+boost~superlu-dist+superlu+hdf5+zlib+pnetcdf+shards~hypre@master,develop', when='+cuda')
# Optional dependencies
depends_on('openfast+cxx', when='+openfast+shared')
depends_on('openfast+cxx~shared', when='+openfast~shared')
depends_on('tioga', when='+tioga+shared')
depends_on('tioga~shared', when='+tioga~shared')
depends_on('hypre+mpi+int64', when='+hypre+shared')
depends_on('hypre+mpi+int64~shared', when='+hypre~shared')
depends_on('trilinos-catalyst-ioss-adapter', when='+catalyst')
# FFTW doesn't have a 'shared' variant at this moment
depends_on('fftw+mpi', when='+fftw')
depends_on('cuda', when='+cuda')
def setup_environment(self, spack_env, run_env):
if '+cuda' in self.spec:
spack_env.set('NVCC_WRAPPER_DEFAULT_COMPILER', spack_cxx)
def cmake_args(self):
spec = self.spec
options = []
options.extend([
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
])
if '+cuda' in self.spec:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % join_path(self.spec['trilinos'].prefix, 'bin', 'nvcc_wrapper'),
])
else:
options.extend([
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
])
options.extend([
'-DTrilinos_DIR:PATH=%s' % spec['trilinos'].prefix,
'-DYAML_DIR:PATH=%s' % spec['yaml-cpp'].prefix,
'-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=%s' % (
'ON' if '+pic' in spec else 'OFF'),
])
if '+openfast' in spec:
options.extend([
'-DENABLE_OPENFAST:BOOL=ON',
'-DOpenFAST_DIR:PATH=%s' % spec['openfast'].prefix
])
else:
options.append('-DENABLE_OPENFAST:BOOL=OFF')
if '+tioga' in spec:
options.extend([
'-DENABLE_TIOGA:BOOL=ON',
'-DTIOGA_DIR:PATH=%s' % spec['tioga'].prefix
])
else:
options.append('-DENABLE_TIOGA:BOOL=OFF')
if '+hypre' in spec:
options.extend([
'-DENABLE_HYPRE:BOOL=ON',
'-DHYPRE_DIR:PATH=%s' % spec['hypre'].prefix
])
else:
options.append('-DENABLE_HYPRE:BOOL=OFF')
if '+catalyst' in spec:
options.extend([
'-DENABLE_PARAVIEW_CATALYST:BOOL=ON',
'-DPARAVIEW_CATALYST_INSTALL_PATH:PATH=%s' %
spec['trilinos-catalyst-ioss-adapter'].prefix
])
else:
options.append('-DENABLE_PARAVIEW_CATALYST:BOOL=OFF')
if '+fftw' in spec:
options.extend([
'-DENABLE_FFTW:BOOL=ON',
'-DFFTW_DIR:PATH=%s' % spec['fftw'].prefix
])
else:
options.append('-DENABLE_FFTW:BOOL=OFF')
if '+cuda' in spec:
options.extend([
'-DENABLE_CUDA:BOOL=ON',
])
if 'darwin' in spec.architecture:
options.append('-DCMAKE_MACOSX_RPATH:BOOL=ON')
return options
|
sentence = input().split()
ae = 0
for word in sentence:
if 'ae' in word:
ae += 1
if ae/len(sentence) >= 0.4:
print("dae ae ju traeligt va")
else:
print("haer talar vi rikssvenska")
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass, viewkeys, PY3
import types
try:
from collections import OrderedDict
except ImportError:
from pip._vendor.ordereddict import OrderedDict
from . import _inputstream
from . import _tokenizer
from . import treebuilders
from .treebuilders.base import Marker
from . import _utils
from .constants import (
spaceCharacters, asciiUpper2Lower,
specialElements, headingElements, cdataElements, rcdataElements,
tokenTypes, tagTokenTypes,
namespaces,
htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
adjustForeignAttributes as adjustForeignAttributesMap,
adjustMathMLAttributes, adjustSVGAttributes,
E,
ReparseException
)
def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, **kwargs)
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, **kwargs)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except ReparseException:
self.reset()
self.mainLoop()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False # pylint:disable=redefined-variable-type
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0].name
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
prev_token = None
new_token = token
while new_token is not None:
prev_token = new_token
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
type == StartTagToken and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and prev_token["selfClosing"] and
not prev_token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": prev_token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument()
def parseFragment(self, stream, *args, **kwargs):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
scripting - treat noscript elements as if javascript was turned on
"""
self._parse(stream, True, *args, **kwargs)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars=None):
# XXX The idea is to make errorcode mandatory.
if datavars is None:
datavars = {}
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
token["data"] = OrderedDict(raw)
if len(raw) > len(token["data"]):
# we had some duplicated attribute, fix so first wins
token["data"].update(raw[::-1])
return token
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
def adjustSVGAttributes(self, token):
adjust_attributes(token, adjustSVGAttributes)
def adjustForeignAttributes(self, token):
adjust_attributes(token, adjustForeignAttributesMap)
def reparseTokenNormal(self, token):
# pylint:disable=unused-argument
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
@_utils.memoize
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
# pylint:disable=unused-argument
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html" or
publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//")) or
publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html") or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None or
systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//")) or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noframes", "style"), self.startTagNoFramesStyle),
("noscript", self.startTagNoscript),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = _inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagNoscript(self, token):
if self.parser.scripting:
self.parser.parseRCDataRawtext(token, "RAWTEXT")
else:
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inHeadNoscript"]
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
class InHeadNoscriptPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
(("head", "noscript"), self.startTagHeadNoscript),
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("noscript", self.endTagNoscript),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
self.anythingElse()
return True
def processComment(self, token):
return self.parser.phases["inHead"].processComment(token)
def processCharacters(self, token):
self.parser.parseError("char-in-head-noscript")
self.anythingElse()
return token
def processSpaceCharacters(self, token):
return self.parser.phases["inHead"].processSpaceCharacters(token)
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBaseLinkCommand(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagHeadNoscript(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagNoscript(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "noscript", "Expected noscript got %s" % node.name
self.parser.phase = self.parser.phases["inHead"]
def endTagBr(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
("noscript", self.startTagNoscript),
(("noembed", "noframes"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
node1.attributes == node2.attributes)
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagNoscript(self, token):
if self.parser.scripting:
self.startTagRawtext(token)
else:
self.startTagOther(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"gotName": "body", "expectedName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name.translate(asciiUpper2Lower) != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
# pylint:enable=unused-argument
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
"inHeadNoscript": InHeadNoscriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def adjust_attributes(token, replacements):
if PY3 or _utils.PY27:
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
else:
needs_adjustment = frozenset(token['data']) & frozenset(replacements)
if needs_adjustment:
token['data'] = OrderedDict((replacements.get(k, k), v)
for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
from docutils.parsers.rst import roles
from docutils import nodes
from docutils.parsers.rst.states import Inliner
import docutils.parsers.rst.roles
def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
"""
USAGE: :del:`your context`
:param role: my-strike
:param rawtext: :my-strike:`your context`
:param text: your context
:param lineno:
:param inliner:
:param options:
:param content:
:return:
"""
# roles.set_classes(options)
# options.setdefault('classes', []).append("mys")
node = nodes.inline(rawtext, text, **dict(classes=['strike']))
return [node], []
def setup(app):
roles.register_canonical_role('del', strike_role)
|
print("Hello Github!")
|
# Author: Khalid - naam toh suna hi hoga
# Steps to run ->
# :~$ python yoyo.py
from flask import Flask
from flask import request
from flask import render_template
import stringComparison
app = Flask(__name__)
@app.route('/')
def my_form():
return render_template("my-form.html")
@app.route('/', methods=['POST'])
def my_form_post():
text1 = request.form['text1']
text2 = request.form['text2']
plagiarismPercent = stringComparison.extremelySimplePlagiarismChecker(text1,text2)
if plagiarismPercent > 50 :
return "<h1>Plagiarism Detected !</h1>"
else :
return "<h1>No Plagiarism Detected !</h1>"
if __name__ == '__main__':
app.run()
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import types
import sys
from pysnooper.utils import truncate
from python_toolbox import sys_tools, temp_file_tools
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry)
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert len(main_thread) == len(calls[1])
assert len(main_thread) == len(calls[2])
main_thread_call_str = main_thread.find("call")
assert main_thread_call_str == calls[1].find("call")
assert main_thread_call_str == calls[2].find("call")
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_callable():
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_watch():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
))
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_watch_explode():
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_variables_classes():
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
))
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_single_watch_no_comma():
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo')
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_long_variable():
@pysnooper.snoop()
def my_function():
foo = list(range(1000))
return foo
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex)
)
)
def test_repr_exception():
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop()
def my_function():
bad = Bad()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None')
)
)
def test_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_method_and_prefix():
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ')
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
),
prefix='ZZZ'
)
def test_file_output():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_confusing_decorator_lines():
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
)
)
def test_lambda():
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
)
)
def test_unavailable_source():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder, \
sys_tools.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
)
)
def test_no_overwrite_by_default():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_overwrite():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
)
)
def test_error_in_overwrite_argument():
with temp_file_tools.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
def test_with_block():
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop()
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with sys_tools.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in recursive call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
# In with in first call
LineEntry('qux()'),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
),
)
def test_with_block_depth():
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
)
)
def test_cellvars():
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_var_order():
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
)
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
)
)
def test_custom_repr():
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')))
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
)
)
|
import logging
import pathlib
from unittest import mock
from cabinetry import templates
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.builder._Builder")
def test_build(mock_builder, mock_apply):
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
# no router
templates.build(config, method=method)
assert mock_builder.call_args_list == [
((pathlib.Path("path/"), "file.root", method), {})
]
assert mock_apply.call_count == 1
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {"match_func": None}
# including a router
mock_router = mock.MagicMock()
templates.build(config, method=method, router=mock_router)
# verify wrapper was set
assert (
mock_router.template_builder_wrapper._extract_mock_name()
== "_Builder()._wrap_custom_template_builder"
)
assert mock_apply.call_count == 2 # 1 from before
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {
"match_func": mock_router._find_template_builder_match
}
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.collector._collector", return_value="func")
def test_collect(mock_collector, mock_apply, caplog):
caplog.set_level(logging.DEBUG)
config = {
"General": {
"HistogramFolder": "path/",
"InputPath": "f.root:{VariationPath}",
"VariationPath": "nominal",
}
}
method = "uproot"
templates.collect(config, method=method)
assert mock_collector.call_args_list == [
((pathlib.Path("path/"), "f.root:{VariationPath}", "nominal", method), {})
]
assert mock_apply.call_args_list == [((config, "func"), {})]
caplog.clear()
# no VariationPath in general settings
config = {
"General": {"HistogramFolder": "path/", "InputPath": "f.root:{VariationPath}"}
}
templates.collect(config, method=method)
assert 'no VariationPath specified in general settings, defaulting to ""' in [
rec.message for rec in caplog.records
]
assert mock_collector.call_args == (
(pathlib.Path("path/"), "f.root:{VariationPath}", "", method),
{},
)
caplog.set_level(logging.DEBUG)
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.postprocessor._postprocessor", return_value="func")
def test_run(mock_postprocessor, mock_apply):
config = {"General": {"HistogramFolder": "path/"}}
templates.postprocess(config)
assert mock_postprocessor.call_args_list == [((pathlib.Path("path/"),), {})]
assert mock_apply.call_args_list == [((config, "func"), {})]
|
from flask import Blueprint, jsonify
from flask import request
from flask import abort
from services.fault_injector import FaultInjector
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime, timedelta
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from services.k8s_observer import K8sObserver
from services.message_queue import RabbitMq
chaosblade = Blueprint('chaosblade', __name__)
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-cpu', methods=['POST'])
def chaos_inject_cpu():
if not request.json or 'host' not in request.json or 'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_cpu(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-random/with_time', methods=['POST'])
def chaos_inject_cpu_with_time():
if not request.json or 'host' not in request.json or 'second' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto_time = {
'time': request.json['second']
}
dto = {
'host': request.json['host'],
'open': mq_control
}
scheduler = BackgroundScheduler()
now = datetime.now()
delta = timedelta(seconds=int(dto_time['time'].encode('raw_unicode_escape')))
scheduler.add_job(func=lambda: FaultInjector.chaos_inject_random(dto), trigger='date', next_run_time=(now + delta))
scheduler.start()
time.sleep(delta.total_seconds() + 1)
return "success"
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-mem', methods=['POST'])
def chaos_inject_mem():
if not request.json or 'host' not in request.json or 'percent' not in request.json or 'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'percent': request.json['percent'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_mem(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-disk', methods=['POST'])
def chaos_inject_disk():
if not request.json or 'host' not in request.json or 'type' not in request.json or 'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'type': request.json['type'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_disk(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-network', methods=['POST'])
def chaos_inject_network():
if not request.json or 'host' not in request.json or 'time' not in request.json or 'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'time': request.json['time'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_network(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-random', methods=['POST'])
def chaos_inject_random():
if not request.json or 'host' not in request.json or 'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_random(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/inject-pod-single', methods=['POST'])
def chaos_inject_pod_single():
if not request.json or 'host' not in request.json or 'pod' not in request.json or \
'timeout' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'pod': request.json['pod'],
'timeout': request.json['timeout'],
'open': mq_control
}
return jsonify(FaultInjector.chaos_inject_pod_single(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/stop-specific-inject', methods=['POST'])
def stop_specific_inject():
if not request.json or 'tag' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'tag': request.json['tag'],
'open': mq_control
}
return jsonify(FaultInjector.stop_specific_chaos_inject(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/stop-all-inject-on-specific-node', methods=['POST'])
def stop_all_inject():
if not request.json or 'host' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'host': request.json['host'],
'open': mq_control
}
return jsonify(FaultInjector.stop_all_on_specific_node(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/stop-all-inject-on-all-nodes', methods=['POST'])
def stop_all_inject_on_all_nodes():
mq_control = RabbitMq.control()
mq_control = {
'open': mq_control
}
return jsonify(FaultInjector.stop_all_chaos_inject_on_all_nodes(mq_control))
@chaosblade.route('/tool/api/v1.0/chaosblade/view-inject-info', methods=['GET'])
def view_inject_info():
return jsonify(FaultInjector.view_chaos_inject())
@chaosblade.route('/tool/api/v1.0/chaosblade/view-inject-on-host-by-status', methods=['POST'])
def view_all_create_success_inject_info():
if not request.json or 'host' not in request.json or 'status' not in request.json:
abort(400)
status = str(request.json['status']).capitalize()
if status not in ['Success', 'Destroyed', 'Error']:
abort(400)
dto = {
'host': request.json['host'],
'status': request.json['status']
}
return jsonify(FaultInjector.view_inject_on_host_by_status(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/delete-specific-service', methods=['POST'])
def delete_specific_kind_pods():
if not request.json or 'service' not in request.json:
abort(400)
mq_control = RabbitMq.control()
dto = {
'service': request.json['service'],
'open': mq_control
}
print dto
return jsonify(FaultInjector.delete_all_pods_for_service(dto))
@chaosblade.route('/tool/api/v1.0/chaosblade/get-service-log', methods=['POST'])
def get_pod_log():
if not request.json or 'service' not in request.json or 'namespace' not in request.json:
abort(400)
pod_name = request.json['service']
namespace = request.json['namespace']
return jsonify(K8sObserver.get_service_log(pod_name, namespace))
@chaosblade.route('/tool/api/v1.0/chaosblade/test-config-default-cmd', methods=['GET'])
def test_config():
return jsonify(FaultInjector.test_config())
|
#!/usr/bin/env python
import socket
import re
class RobotFeedback:
"""Class for the Mecademic Robot allowing for live positional
feedback of the Mecademic Robot.
Attributes
----------
address : string
The IP address associated to the Mecademic robot.
socket : socket
Socket connecting to physical Mecademic Robot.
robot_status : tuple of boolean
States status bit of the robot.
gripper_status : tuple of boolean
States status bit of the gripper.
joints : tuple of floats
Joint angle in degrees of each joint starting from
joint 1 going all way to joint 6.
cartesian : tuple of floats
The cartesian values in mm and degrees of the TRF.
joints_vel : floats
Velocity of joints.
torque : tuple of floats
Torque of joints.
accelerometer : tuple of floats
Acceleration of joints.
last_msg_chunk : string
Buffer of received messages.
version : string
Firmware version of the Mecademic Robot.
version_regex : list of int
Version_regex.
"""
def __init__(self, address, firmware_version):
"""Constructor for an instance of the class Mecademic robot.
Parameters
----------
address : string
The IP address associated to the Mecademic robot.
firmware_version : string
Firmware version of the Mecademic Robot.
"""
self.address = address
self.socket = None
self.robot_status = ()
self.gripper_status = ()
self.joints = () #Joint Angles, angles in degrees | [theta_1, theta_2, ... theta_n]
self.cartesian = () #Cartesian coordinates, distances in mm, angles in degrees | [x,y,z,alpha,beta,gamma]
self.joints_vel =()
self.torque =()
self.accelerometer =()
self.last_msg_chunk = ''
a = re.search(r'(\d+)\.(\d+)\.(\d+)', firmware_version)
self.version = a.group(0)
self.version_regex = [int(a.group(1)), int(a.group(2)), int(a.group(3))]
def connect(self):
"""Connects Mecademic Robot object communication to the physical Mecademic Robot.
Returns
-------
status : boolean
Return whether the connection is established.
"""
try:
self.socket = socket.socket()
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.socket.settimeout(1) #1s
try:
self.socket.connect((self.address, 10001)) #connect to the robot's address
except socket.timeout: #catch if the robot is not connected to in time
#raise TimeoutError
raise RuntimeError
# Receive confirmation of connection
if self.socket is None: #check that socket is not connected to nothing
raise RuntimeError
self.socket.settimeout(1) #1s
try:
if(self.version_regex[0] <= 7):
self.get_data()
elif(self.version_regex[0] > 7): #RobotStatus and GripperStatus are sent on 10001 upon connecting from 8.x firmware
msg = self.socket.recv(256).decode('ascii') #read message from robot
self._get_robot_status(msg)
self._get_gripper_status(msg)
return True
except socket.timeout:
raise RuntimeError
#except TimeoutError:
#return False
# OTHER USER !!!
except RuntimeError:
return False
def disconnect(self):
"""Disconnects Mecademic Robot object from physical Mecademic Robot.
"""
if self.socket is not None:
self.socket.close()
self.socket = None
def get_data(self, delay=0.1):
"""Receives message from the Mecademic Robot and
saves the values in appropriate variables.
Parameters
----------
delay: int or float
Time to set for timeout of the socket.
"""
if self.socket is None: #check that the connection is established
return #if no connection, nothing to receive
self.socket.settimeout(delay) #set read timeout to desired delay
try:
raw_msg = self.socket.recv(256).decode('ascii') #read message from robot
raw_response = raw_msg.split('\x00') # Split the data at \x00 to manage fragmented data
raw_response[0] = self.last_msg_chunk + raw_response[0] # Merge the first data with last fragment from previous data stream
self.last_msg_chunk = raw_response[-1]
for response in raw_response[:-1]:
if(self.version_regex[0] <= 7):
self._get_joints(response)
self._get_cartesian(response)
elif(self.version_regex[0] > 7):
self._get_joints(response)
self._get_cartesian(response)
self._get_joints_vel(response)
self._get_torque_ratio(response)
self._get_accelerometer(response)
#except TimeoutError:
except RuntimeError:
pass
def _get_robot_status(self, response):
"""Gets the values of RobotStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('RobotStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.robot_status = self._decode_msg(response, resp_code)
def _get_gripper_status(self, response):
"""Gets the values of GripperStatus bits from the message sent by
the Robot upon connecting.
Values saved to attribute robotstatus of the object.
Parameters
----------
response : string
Message received from the robot.
"""
code = None
code = self._get_response_code('GripperStatus')
for resp_code in code:
if response.find(resp_code) != -1:
self.gripper_status = self._decode_msg(response,resp_code)
def _get_joints(self, response):
"""Gets the joint values of the variables from the message sent by the Robot.
Values saved to attribute joints of the object.
Parameters
----------
response: string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints = self._decode_msg(response, resp_code)
def _get_cartesian(self, response):
"""Gets the cartesian values of the variables from the message sent by the Robot.
Values saved to attribute cartesian of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('CartesianPose')
for resp_code in code:
if response.find(resp_code) != -1:
self.cartesian = self._decode_msg(response,resp_code)
def _get_joints_vel(self, response):
"""Gets the velocity values of the Joints from the message sent by the Robot.
Values saved to attribute jointsvel of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('JointsVel')
for resp_code in code:
if response.find(resp_code) != -1:
self.joints_vel = self._decode_msg(response,resp_code)
def _get_torque_ratio(self, response):
"""Gets the torque ratio values of the Joints from the message sent by the Robot.
Values saved to attribute torque of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('TorqueRatio')
for resp_code in code:
if response.find(resp_code) != -1:
self.torque = self._decode_msg(response,resp_code)
def _get_accelerometer(self,response):
"""Gets the accelerometers values from the message sent by the Robot.
Values saved to attribute accelerometer of the object.
Parameters
----------
response : string
Message received from the Robot.
"""
code = None
code = self._get_response_code('AccelerometerData')
for resp_code in code:
if response.find(resp_code) != -1:
self.accelerometer = self._decode_msg(response,resp_code)
def _get_response_code(self, param):
"""Retreives the response code for the parameters being streamed on port 100001.
Parameters
----------
param : string
Parameter that needs to be extracted from raw data strem from Mecademic Robot.
1. Robot Status {sent only once upon connecting on 10001}.
2. Gripper Status {sent only once upon connecting on 10001}.
3. Joints Pose feedback.
4. Cartesian Pose feedback.
5. Joints Velocity feedback.
6. Torque Ratio.
7. Accelerometer data.
Returns
--------
answer_list : list of strings
List of response codes to search for in the raw data stream.
"""
if param.find('RobotStatus') != -1:
return ['[2007]']
elif param.find('GripperStatus')!= -1:
return ['[2079]']
elif param.find('JointsPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2102]']
elif(self.version_regex[0] > 7):
return ['[2026]','[2210]']
elif param.find('CartesianPose') != -1:
if(self.version_regex[0] <= 7):
return ['[2103]']
elif(self.version_regex[0] > 7):
return ['[2027]','[2211]']
elif param.find('JointsVel') != -1:
return ['[2212]']
elif param.find('TorqueRatio') != -1:
return ['[2213]']
elif param.find('AccelerometerData') != -1:
return ['[2220]']
else:
return ['Invalid']
def _decode_msg(self, response, resp_code):
"""
Parameters
----------
response : string
Message received from the Robot.
resp_code : string
Message to decode
Returns
--------
params : tuplt of float
Message decoded.
"""
response = response.replace(resp_code+'[','').replace(']','')
params = ()
if response != '':
param_str = response.split(',')
if len(param_str) == 6:
params = tuple((float(x) for x in param_str))
elif len(param_str) == 7:
params = tuple((float(x) for x in param_str[1:])) # remove timestamp
else:
params =()
return params
|
"""
Regression tests for Model inheritance behavior.
"""
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from unittest import expectedFailure
from django import forms
from django.test import TestCase
from .models import (
ArticleWithAuthor, BachelorParty, BirthdayParty, BusStation, Child,
DerivedM, InternalCertificationAudit, ItalianRestaurant, M2MChild,
MessyBachelorParty, ParkingLot, ParkingLot2, ParkingLot3, ParkingLot4A,
ParkingLot4B, Person, Place, Profile, QualityControl, Restaurant,
SelfRefChild, SelfRefParent, Senator, Supplier, TrainStation, User,
Wholesaler,
)
class ModelInheritanceTest(TestCase):
def test_model_inheritance(self):
# Regression for #7350, #7202
# Check that when you create a Parent object with a specific reference
# to an existent child instance, saving the Parent doesn't duplicate
# the child. This behavior is only activated during a raw save - it
# is mostly relevant to deserialization, but any sort of CORBA style
# 'narrow()' API would require a similar approach.
# Create a child-parent-grandparent chain
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
# Create a child-parent chain with an explicit parent link
place2 = Place(name='Main St', address='111 Main St')
place2.save_base(raw=True)
park = ParkingLot(parent=place2, capacity=100)
park.save_base(raw=True)
# Check that no extra parent objects have been created.
places = list(Place.objects.all())
self.assertEqual(places, [place1, place2])
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_hot_dogs': True
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's House of Pasta",
'serves_gnocchi': True,
'serves_hot_dogs': True,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 100,
'name': 'Main St',
}])
# You can also update objects when using a raw save.
place1.name = "Guido's All New House of Pasta"
place1.save_base(raw=True)
restaurant.serves_hot_dogs = False
restaurant.save_base(raw=True)
italian_restaurant.serves_gnocchi = False
italian_restaurant.save_base(raw=True)
place2.name = 'Derelict lot'
place2.save_base(raw=True)
park.capacity = 50
park.save_base(raw=True)
# No extra parent objects after an update, either.
places = list(Place.objects.all())
self.assertEqual(places, [place2, place1])
self.assertEqual(places[0].name, 'Derelict lot')
self.assertEqual(places[1].name, "Guido's All New House of Pasta")
dicts = list(Restaurant.objects.values('name', 'serves_hot_dogs'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_hot_dogs': False,
}])
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
dicts = list(ParkingLot.objects.values('name', 'capacity'))
self.assertEqual(dicts, [{
'capacity': 50,
'name': 'Derelict lot',
}])
# If you try to raw_save a parent attribute onto a child object,
# the attribute will be ignored.
italian_restaurant.name = "Lorenzo's Pasta Hut"
italian_restaurant.save_base(raw=True)
# Note that the name has not changed
# - name is an attribute of Place, not ItalianRestaurant
dicts = list(ItalianRestaurant.objects.values(
'name', 'serves_hot_dogs', 'serves_gnocchi'))
self.assertEqual(dicts, [{
'name': "Guido's All New House of Pasta",
'serves_gnocchi': False,
'serves_hot_dogs': False,
}])
def test_issue_7105(self):
# Regressions tests for #7105: dates() queries should be able to use
# fields from the parent model as easily as the child.
Child.objects.create(
name='child',
created=datetime.datetime(2008, 6, 26, 17, 0, 0))
datetimes = list(Child.objects.datetimes('created', 'month'))
self.assertEqual(datetimes, [datetime.datetime(2008, 6, 1, 0, 0)])
def test_issue_7276(self):
# Regression test for #7276: calling delete() on a model with
# multi-table inheritance should delete the associated rows from any
# ancestor tables, as well as any descendent objects.
place1 = Place(
name="Guido's House of Pasta",
address='944 W. Fullerton')
place1.save_base(raw=True)
restaurant = Restaurant(
place_ptr=place1,
serves_hot_dogs=True,
serves_pizza=False)
restaurant.save_base(raw=True)
italian_restaurant = ItalianRestaurant(
restaurant_ptr=restaurant,
serves_gnocchi=True)
italian_restaurant.save_base(raw=True)
ident = ItalianRestaurant.objects.all()[0].id
self.assertEqual(Place.objects.get(pk=ident), place1)
Restaurant.objects.create(
name='a',
address='xx',
serves_hot_dogs=True,
serves_pizza=False)
# This should delete both Restaurants, plus the related places, plus
# the ItalianRestaurant.
Restaurant.objects.all().delete()
with self.assertRaises(Place.DoesNotExist):
Place.objects.get(pk=ident)
with self.assertRaises(ItalianRestaurant.DoesNotExist):
ItalianRestaurant.objects.get(pk=ident)
def test_issue_6755(self):
"""
Regression test for #6755
"""
r = Restaurant(serves_pizza=False, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, r.place_ptr_id)
orig_id = r.id
r = Restaurant(place_ptr_id=orig_id, serves_pizza=True, serves_hot_dogs=False)
r.save()
self.assertEqual(r.id, orig_id)
self.assertEqual(r.id, r.place_ptr_id)
def test_issue_7488(self):
# Regression test for #7488. This looks a little crazy, but it's the
# equivalent of what the admin interface has to do for the edit-inline
# case.
suppliers = Supplier.objects.filter(
restaurant=Restaurant(name='xx', address='yy'))
suppliers = list(suppliers)
self.assertEqual(suppliers, [])
def test_issue_11764(self):
"""
Regression test for #11764
"""
wholesalers = list(Wholesaler.objects.all().select_related())
self.assertEqual(wholesalers, [])
def test_issue_7853(self):
"""
Regression test for #7853
If the parent class has a self-referential link, make sure that any
updates to that link via the child update the right table.
"""
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
obj.delete()
def test_get_next_previous_by_date(self):
"""
Regression tests for #8076
get_(next/previous)_by_date should work
"""
c1 = ArticleWithAuthor(
headline='ArticleWithAuthor 1',
author="Person 1",
pub_date=datetime.datetime(2005, 8, 1, 3, 0))
c1.save()
c2 = ArticleWithAuthor(
headline='ArticleWithAuthor 2',
author="Person 2",
pub_date=datetime.datetime(2005, 8, 1, 10, 0))
c2.save()
c3 = ArticleWithAuthor(
headline='ArticleWithAuthor 3',
author="Person 3",
pub_date=datetime.datetime(2005, 8, 2))
c3.save()
self.assertEqual(c1.get_next_by_pub_date(), c2)
self.assertEqual(c2.get_next_by_pub_date(), c3)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c3.get_next_by_pub_date()
self.assertEqual(c3.get_previous_by_pub_date(), c2)
self.assertEqual(c2.get_previous_by_pub_date(), c1)
with self.assertRaises(ArticleWithAuthor.DoesNotExist):
c1.get_previous_by_pub_date()
def test_inherited_fields(self):
"""
Regression test for #8825 and #9390
Make sure all inherited fields (esp. m2m fields, in this case) appear
on the child class.
"""
m2mchildren = list(M2MChild.objects.filter(articles__isnull=False))
self.assertEqual(m2mchildren, [])
# Ordering should not include any database column more than once (this
# is most likely to occur naturally with model inheritance, so we
# check it here). Regression test for #9390. This necessarily pokes at
# the SQL string for the query, since the duplicate problems are only
# apparent at that late stage.
qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk')
sql = qs.query.get_compiler(qs.db).as_sql()[0]
fragment = sql[sql.find('ORDER BY'):]
pos = fragment.find('pub_date')
self.assertEqual(fragment.find('pub_date', pos + 1), -1)
def test_queryset_update_on_parent_model(self):
"""
Regression test for #10362
It is possible to call update() and only change a field in
an ancestor model.
"""
article = ArticleWithAuthor.objects.create(
author="fred",
headline="Hey there!",
pub_date=datetime.datetime(2009, 3, 1, 8, 0, 0))
update = ArticleWithAuthor.objects.filter(
author="fred").update(headline="Oh, no!")
self.assertEqual(update, 1)
update = ArticleWithAuthor.objects.filter(
pk=article.pk).update(headline="Oh, no!")
self.assertEqual(update, 1)
derivedm1 = DerivedM.objects.create(
customPK=44,
base_name="b1",
derived_name="d1")
self.assertEqual(derivedm1.customPK, 44)
self.assertEqual(derivedm1.base_name, 'b1')
self.assertEqual(derivedm1.derived_name, 'd1')
derivedms = list(DerivedM.objects.all())
self.assertEqual(derivedms, [derivedm1])
def test_use_explicit_o2o_to_parent_as_pk(self):
"""
Regression tests for #10406
If there's a one-to-one link between a child model and the parent and
no explicit pk declared, we can use the one-to-one link as the pk on
the child.
"""
self.assertEqual(ParkingLot2._meta.pk.name, "parent")
# However, the connector from child to parent need not be the pk on
# the child at all.
self.assertEqual(ParkingLot3._meta.pk.name, "primary_key")
# the child->parent link
self.assertEqual(
ParkingLot3._meta.get_ancestor_link(Place).name,
"parent")
def test_use_explicit_o2o_to_parent_from_abstract_model(self):
self.assertEqual(ParkingLot4A._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4A",
address='21 Jump Street',
)
self.assertEqual(ParkingLot4B._meta.pk.name, "parent")
ParkingLot4A.objects.create(
name="Parking4B",
address='21 Jump Street',
)
def test_all_fields_from_abstract_base_class(self):
"""
Regression tests for #7588
"""
# All fields from an ABC, including those inherited non-abstractly
# should be available on child classes (#7588). Creating this instance
# should work without error.
QualityControl.objects.create(
headline="Problems in Django",
pub_date=datetime.datetime.now(),
quality=10,
assignee="adrian")
def test_abstract_base_class_m2m_relation_inheritance(self):
# Check that many-to-many relations defined on an abstract base class
# are correctly inherited (and created) on the child class.
p1 = Person.objects.create(name='Alice')
p2 = Person.objects.create(name='Bob')
p3 = Person.objects.create(name='Carol')
p4 = Person.objects.create(name='Dave')
birthday = BirthdayParty.objects.create(
name='Birthday party for Alice')
birthday.attendees.set([p1, p3])
bachelor = BachelorParty.objects.create(name='Bachelor party for Bob')
bachelor.attendees.set([p2, p4])
parties = list(p1.birthdayparty_set.all())
self.assertEqual(parties, [birthday])
parties = list(p1.bachelorparty_set.all())
self.assertEqual(parties, [])
parties = list(p2.bachelorparty_set.all())
self.assertEqual(parties, [bachelor])
# Check that a subclass of a subclass of an abstract model doesn't get
# its own accessor.
self.assertFalse(hasattr(p2, 'messybachelorparty_set'))
# ... but it does inherit the m2m from its parent
messy = MessyBachelorParty.objects.create(
name='Bachelor party for Dave')
messy.attendees.set([p4])
messy_parent = messy.bachelorparty_ptr
parties = list(p4.bachelorparty_set.all())
self.assertEqual(parties, [bachelor, messy_parent])
def test_abstract_verbose_name_plural_inheritance(self):
"""
verbose_name_plural correctly inherited from ABC if inheritance chain
includes an abstract model.
"""
# Regression test for #11369: verbose_name_plural should be inherited
# from an ABC even when there are one or more intermediate
# abstract models in the inheritance chain, for consistency with
# verbose_name.
self.assertEqual(
InternalCertificationAudit._meta.verbose_name_plural,
'Audits'
)
def test_inherited_nullable_exclude(self):
obj = SelfRefChild.objects.create(child_data=37, parent_data=42)
self.assertQuerysetEqual(
SelfRefParent.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
self.assertQuerysetEqual(
SelfRefChild.objects.exclude(self_data=72), [
obj.pk
],
attrgetter("pk")
)
def test_concrete_abstract_concrete_pk(self):
"""
Primary key set correctly with concrete->abstract->concrete inheritance.
"""
# Regression test for #13987: Primary key is incorrectly determined
# when more than one model has a concrete->abstract->concrete
# inheritance hierarchy.
self.assertEqual(
len([field for field in BusStation._meta.local_fields if field.primary_key]),
1
)
self.assertEqual(
len([field for field in TrainStation._meta.local_fields if field.primary_key]),
1
)
self.assertIs(BusStation._meta.pk.model, BusStation)
self.assertIs(TrainStation._meta.pk.model, TrainStation)
def test_inherited_unique_field_with_form(self):
"""
Test that a model which has different primary key for the parent model
passes unique field checking correctly. Refs #17615.
"""
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
User.objects.create(username="user_only")
p = Profile.objects.create(username="user_with_profile")
form = ProfileForm({'username': "user_with_profile", 'extra': "hello"},
instance=p)
self.assertTrue(form.is_valid())
def test_inheritance_joins(self):
# Test for #17502 - check that filtering through two levels of
# inheritance chain doesn't generate extra joins.
qs = ItalianRestaurant.objects.all()
self.assertEqual(str(qs.query).count('JOIN'), 2)
qs = ItalianRestaurant.objects.filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 2)
@expectedFailure
def test_inheritance_values_joins(self):
# It would be nice (but not too important) to skip the middle join in
# this case. Skipping is possible as nothing from the middle model is
# used in the qs and top contains direct pointer to the bottom model.
qs = ItalianRestaurant.objects.values_list('serves_gnocchi').filter(name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_issue_21554(self):
senator = Senator.objects.create(
name='John Doe', title='X', state='Y'
)
senator = Senator.objects.get(pk=senator.pk)
self.assertEqual(senator.name, 'John Doe')
self.assertEqual(senator.title, 'X')
self.assertEqual(senator.state, 'Y')
def test_inheritance_resolve_columns(self):
Restaurant.objects.create(name='Bobs Cafe', address="Somewhere",
serves_pizza=True, serves_hot_dogs=True)
p = Place.objects.all().select_related('restaurant')[0]
self.assertIsInstance(p.restaurant.serves_pizza, bool)
def test_inheritance_select_related(self):
# Regression test for #7246
r1 = Restaurant.objects.create(
name="Nobu", serves_hot_dogs=True, serves_pizza=False
)
r2 = Restaurant.objects.create(
name="Craft", serves_hot_dogs=False, serves_pizza=True
)
Supplier.objects.create(name="John", restaurant=r1)
Supplier.objects.create(name="Jane", restaurant=r2)
self.assertQuerysetEqual(
Supplier.objects.order_by("name").select_related(), [
"Jane",
"John",
],
attrgetter("name")
)
jane = Supplier.objects.order_by("name").select_related("restaurant")[0]
self.assertEqual(jane.restaurant.name, "Craft")
def test_related_filtering_query_efficiency_ticket_15844(self):
r = Restaurant.objects.create(
name="Guido's House of Pasta",
address='944 W. Fullerton',
serves_hot_dogs=True,
serves_pizza=False,
)
s = Supplier.objects.create(restaurant=r)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
Supplier.objects.filter(restaurant=r),
[s], lambda x: x,
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
r.supplier_set.all(),
[s], lambda x: x,
)
|
import os
# import torch
import argparse
import base64
import sys
import io
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
model_list = []
f = open(sys.argv[1], "r")
models = f.read().split(",")
f.close()
print(models)
for m in models:
model_list.append(base642fullmodel(m))
new_model_state = model_list[0].state_dict()
#sum the weight of the model
for m in model_list[1:]:
state_m = m.state_dict()
for key in state_m:
new_model_state[key] += state_m[key]
#average the model weight
for key in new_model_state:
new_model_state[key] /= len(model_list)
new_model = model_list[0]
new_model.load_state_dict(new_model_state)
output = fullmodel2base64(new_model)
print(output)
|
#Question No 6
#Risen Each Year For Next 25 Years
year =1
millimeter= 1.6
while(year<=25):
years=(year * millimeter)
print(" The ocean will rises each year is=" , years,)
year+=1
|
from fastapi import FastAPI
import uvicorn
from src.routes import (
user,
employee,
car,
inventory,
product,
service,
dealership,
department,
)
from fastapi.middleware.cors import CORSMiddleware
from src.settings.envvariables import Settings
Settings().check_variables()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include/define our routes
app.include_router(user.app, prefix="/users", tags=["Users"])
app.include_router(employee.app, prefix="/employees", tags=["Employees"])
app.include_router(car.app, prefix="/cars", tags=["Cars"])
app.include_router(inventory.app, prefix="/inventory", tags=["Inventory"])
app.include_router(product.app, prefix="/products", tags=["Product"])
app.include_router(service.app, prefix="/services/requests", tags=["Service"])
app.include_router(dealership.app, prefix="/dealerships", tags=["Dealership"])
app.include_router(department.app, prefix="/departments", tags=["Department"])
# Launch the app with uvicorn and handle environment
# if Settings().ENV == "prod":
# if __name__ == "__main__":
# print("Launching Production Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=False, workers=3)
# else:
# if __name__ == "__main__":
# print("Launching Development Environment")
# uvicorn.run("main:app", host="0.0.0.0", port=Settings().PORT, reload=True, workers=1)
|
from django.db.models import Q
from apps.configattribute.models import ConfigAttribute
from apps.property.models import GenericProperty
from apps.utils.data_helpers.manager import DataManager
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.timezone_utils import display_formatted_ts
class TripInfo(object):
block = None
data = {}
slug = None
last_update = None
def __init__(self, block):
self.block = block
self.slug = block.slug
self.data = {
'summary': {},
'properties': {}
}
self.last_update = None
def add_property(self, key, value):
self.data['properties'][key] = value
def add_summary_event(self, event):
if 'summary' in self.data:
if self.last_update and self.last_update > event.timestamp:
return
self.data['summary'] = event.extra_data
# Trip Summary should win over Trip Update
self.last_update = event.timestamp
def to_representation(self):
data = {
'slug': self.slug,
'label': self.block.title,
'summary_date': display_formatted_ts(self.last_update) if self.last_update else '',
'data': self.data
}
return data
class TripOrgQualityReport(object):
org = None
results = {}
config = {}
def __init__(self, org):
self.org = org
self.results = {}
self.config = self._get_config_attributes()
def _get_config_attributes(self):
config_name = ':report:trip_quality:config'
attribute = ConfigAttribute.objects.get_attribute_by_priority(name=config_name, target_slug=self.org.obj_target_slug)
if attribute:
return attribute.data
# Return empty if it does not exist
return {
'summary_keys': [
"Device",
"START (UTC)",
"END (UTC)",
"Duration (Days)",
"Event Count",
"First event at (UTC)",
"Last event at (UTC)",
"Max Humidity (% RH)",
"Min Humidity (% RH)",
"Median Humidity (% RH)",
"Max Pressure (Mbar)",
"Min Pressure (Mbar)",
"Median Pressure (Mbar)",
"Max Temp (C)",
"Min Temp (C)",
"Median Temp (C)",
"Above 30C",
"Below 17C",
"Max Peak (G)",
"TimeStamp(MaxPeak) (UTC)",
"DeltaV at Max Peak (in/s)",
"MaxDeltaV (in/s)",
"TimeStamp(MaxDeltaV) (UTC)",
"Peak at MaxDeltaV (G)"
],
'property_keys': []
}
def analyze(self):
"""
Get all archives for an organization and fill a TripInfo object for each with the following
- Selected trip properties (based on project's configAttribute)
- Last Update Event, if any
- Last Trip Summary Event, if any
:return: Nothing
"""
blocks = self.org.data_blocks.all()
for block in blocks:
self.results[block.slug] = TripInfo(block)
block_slugs = [block.slug for block in blocks]
if self.config and 'property_keys' in self.config:
for property_item in self.config['property_keys']:
properties = GenericProperty.objects.filter(target__in=block_slugs, name=property_item)
for p in properties:
self.results[p.target].add_property(property_item, p.value)
# Not great, but we seem to have blocks with project as None and blocks as p--0000
q = Q(project_slug='') | Q(project_slug='p--0000-0000')
q = q & Q(device_slug__in=block_slugs, variable_slug__icontains=SYSTEM_VID['TRIP_SUMMARY'])
events = DataManager.filter_qs_using_q(
'event',
q=q
)
for event in events:
self.results[event.device_slug].add_summary_event(event)
# Cleanup reports that don't look complete (No Summary or Properties)
to_delete = []
for slug, trip in self.results.items():
if trip.data['summary'] == {}:
# Delete Archive that does not represent a real trip
to_delete.append(slug)
for slug in to_delete:
del(self.results[slug])
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import collections
import contextlib
import copy
import inspect
import itertools
import json
import logging
import os
import re
import unittest
from constants import constants
from core.controllers import base
from core.domain import auth_domain
from core.domain import caching_domain
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from core.domain import interaction_registry
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.search import elastic_search_services
from core.platform.taskqueue import cloud_tasks_emulator
import feconf
import main
import main_mail
import main_taskqueue
from proto import text_classifier_pb2
import python_utils
import schema_utils
import utils
import contextlib2
import elasticsearch
from google.appengine.api import mail
from google.appengine.ext import deferred
from google.appengine.ext import testbed
import requests_mock
import webtest
(
auth_models, exp_models, feedback_models, question_models, skill_models,
story_models, suggestion_models, topic_models,) = (
models.Registry.import_models([
models.NAMES.auth, models.NAMES.exploration, models.NAMES.feedback,
models.NAMES.question, models.NAMES.skill, models.NAMES.story,
models.NAMES.suggestion, models.NAMES.topic]))
current_user_services = models.Registry.import_current_user_services()
datastore_services = models.Registry.import_datastore_services()
email_services = models.Registry.import_email_services()
memory_cache_services = models.Registry.import_cache_services()
platform_auth_services = models.Registry.import_auth_services()
platform_taskqueue_services = models.Registry.import_taskqueue_services()
# Prefix to append to all lines printed by tests to the console.
# We are using the b' prefix as all the stdouts are in bytes.
LOG_LINE_PREFIX = b'LOG_INFO_TEST: '
# List of model classes that don't have Wipeout or Takeout, related class
# methods defined because they're not used directly but only as
# base classes for the other models.
BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES = (
'BaseCommitLogEntryModel',
'BaseHumanMaintainedModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
)
def get_filepath_from_filename(filename, rootdir):
"""Returns filepath using the filename. Different files are present in
different subdirectories in the rootdir. So, we walk through the rootdir and
match the all the filenames with the given filename. When a match is found
the function returns the complete path of the filename by using
os.path.join(root, filename).
For example signup-page.mainpage.html is present in
core/templates/pages/signup-page and error-page.mainpage.html is present in
core/templates/pages/error-pages. So we walk through core/templates/pages
and a match for signup-page.component.html is found in signup-page
subdirectory and a match for error-page.directive.html is found in
error-pages subdirectory.
Args:
filename: str. The name of the file.
rootdir: str. The directory to search the file in.
Returns:
str | None. The path of the file if file is found otherwise
None.
"""
# This is required since error files are served according to error status
# code. The file served is error-page.mainpage.html but it is compiled and
# stored as error-page-{status_code}.mainpage.html. So, we need to swap the
# name here to obtain the correct filepath.
if filename.startswith('error-page'):
filename = 'error-page.mainpage.html'
matches = list(itertools.chain.from_iterable(
(os.path.join(subdir, f) for f in filenames if f == filename)
for subdir, _, filenames in os.walk(rootdir)))
if len(matches) > 1:
raise Exception('Multiple files found with name: %s' % filename)
return matches[0] if matches else None
def mock_load_template(filename):
"""Mock for load_template function. This mock is required for backend tests
since we do not have webpack compilation before backend tests. The folder to
search templates is webpack_bundles which is generated after webpack
compilation. Since this folder will be missing, load_template function will
return an error. So, we use a mock for load_template which returns the html
file from the source directory instead.
Args:
filename: str. The name of the file for which template is to be
returned.
Returns:
str. The contents of the given file.
"""
filepath = get_filepath_from_filename(
filename, os.path.join('core', 'templates', 'pages'))
with python_utils.open_file(filepath, 'r') as f:
return f.read()
def check_image_png_or_webp(image_string):
"""Checks if the image is in png or webp format only.
Args:
image_string: str. Image url in base64 format.
Returns:
bool. Returns true if image is in WebP format.
"""
return image_string.startswith(('data:image/png', 'data:image/webp'))
def get_storage_model_module_names():
"""Get all module names in storage."""
# As models.NAMES is an enum, it cannot be iterated over. So we use the
# __dict__ property which can be iterated over.
for name in models.NAMES.__dict__:
if '__' not in name:
yield name
def get_storage_model_classes():
"""Get all model classes in storage."""
for module_name in get_storage_model_module_names():
(module,) = models.Registry.import_models([module_name])
for member_name, member_obj in inspect.getmembers(module):
if inspect.isclass(member_obj):
clazz = getattr(module, member_name)
all_base_classes = [
base_class.__name__ for base_class in inspect.getmro(
clazz)]
if 'Model' in all_base_classes:
yield clazz
class ElasticSearchStub(python_utils.OBJECT):
"""This stub class mocks the functionality of ES in
elastic_search_services.py.
IMPORTANT NOTE TO DEVELOPERS: These mock functions are NOT guaranteed to
be exact implementations of elasticsearch functionality. If the results of
this mock and the local dev elasticsearch instance differ, the mock
functions should be updated so that their behaviour matches what a local
dev instance would return. (For example, this mock always has a 'version'
of 1 in the return dict and an arbitrary '_seq_no', although the version
number increments with every PUT in the elasticsearch Python client
library and the '_seq_no' increments with every operation.)
"""
_DB = {}
def reset(self):
"""Helper method that clears the mock database."""
self._DB.clear()
def _generate_index_not_found_error(self, index_name):
"""Helper method that generates an elasticsearch 'index not found' 404
error.
Args:
index_name: str. The index that was not found.
Returns:
elasticsearch.NotFoundError. A manually-constructed error
indicating that the index was not found.
"""
raise elasticsearch.NotFoundError(
404, 'index_not_found_exception', {
'status': 404,
'error': {
'reason': 'no such index [%s]' % index_name,
'root_cause': [{
'reason': 'no such index [%s]' % index_name,
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}],
'index': index_name,
'index_uuid': '_na_',
'type': 'index_not_found_exception',
'resource.type': 'index_or_alias',
'resource.id': index_name
}
}
)
def mock_create_index(self, index_name):
"""Creates an index with the given name.
Args:
index_name: str. The name of the index to create.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name in self._DB:
raise elasticsearch.RequestError(
400, 'resource_already_exists_exception',
'index [%s/RaNdOmStRiNgOfAlPhAs] already exists' % index_name)
self._DB[index_name] = []
return {
'index': index_name,
'acknowledged': True,
'shards_acknowledged': True
}
def mock_index(self, index_name, document, id=None): # pylint: disable=redefined-builtin
"""Adds a document with the given ID to the index.
Note that, unfortunately, we have to keep the name of "id" for the
last kwarg, although it conflicts with a Python builtin. This is
because the name is an existing part of the API defined at
https://elasticsearch-py.readthedocs.io/en/v7.10.1/api.html
Args:
index_name: str. The name of the index to create.
document: dict. The document to store.
id: str. The unique identifier of the document.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
elasticsearch.RequestError. An index with the given name already
exists.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
self._DB[index_name] = [
d for d in self._DB[index_name] if d['id'] != id]
self._DB[index_name].append(document)
return {
'_index': index_name,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0,
},
'_seq_no': 96,
'_primary_term': 1,
'result': 'created',
'_id': id,
'_version': 1,
'_type': '_doc',
}
def mock_exists(self, index_name, doc_id):
"""Checks whether a document with the given ID exists in the mock
database.
Args:
index_name: str. The name of the index to check.
doc_id: str. The document id to check.
Returns:
bool. Whether the document exists in the index.
Raises:
elasticsearch.NotFoundError: The given index name was not found.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
return any([d['id'] == doc_id for d in self._DB[index_name]])
def mock_delete(self, index_name, doc_id):
"""Deletes a document from an index in the mock database. Does nothing
if the document is not in the index.
Args:
index_name: str. The name of the index to delete the document from.
doc_id: str. The document id to be deleted from the index.
Returns:
dict. A dict representing the ElasticSearch API response.
Raises:
Exception. The document does not exist in the index.
elasticsearch.NotFoundError. The given index name was not found, or
the given doc_id was not found in the given index.
"""
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
docs = [d for d in self._DB[index_name] if d['id'] != doc_id]
if len(self._DB[index_name]) != len(docs):
self._DB[index_name] = docs
return {
'_type': '_doc',
'_seq_no': 99,
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'result': 'deleted',
'_primary_term': 1,
'_index': index_name,
'_version': 4,
'_id': '0'
}
raise elasticsearch.NotFoundError(
404, {
'_index': index_name,
'_type': '_doc',
'_id': doc_id,
'_version': 1,
'result': 'not_found',
'_shards': {
'total': 2,
'successful': 1,
'failed': 0
},
'_seq_no': 103,
'_primary_term': 1
})
def mock_delete_by_query(self, index_name, query):
"""Deletes documents from an index based on the given query.
Note that this mock only supports a specific for the query, i.e. the
one which clears the entire index. It asserts that all calls to this
function use that query format.
Args:
index_name: str. The name of the index to delete the documents from.
query: dict. The query that defines which documents to delete.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The query is not in the correct form.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert query.keys() == ['query']
assert query['query'] == {
'match_all': {}
}
if index_name not in self._DB:
raise self._generate_index_not_found_error(index_name)
index_size = len(self._DB[index_name])
del self._DB[index_name][:]
return {
'took': 72,
'version_conflicts': 0,
'noops': 0,
'throttled_until_millis': 0,
'failures': [],
'throttled_millis': 0,
'total': index_size,
'batches': 1,
'requests_per_second': -1.0,
'retries': {u'search': 0, u'bulk': 0},
'timed_out': False,
'deleted': index_size
}
def mock_search(self, body=None, index=None, params=None):
"""Searches and returns documents that match the given query.
Args:
body: dict. A dictionary search definition that uses Query DSL.
index: str. The name of the index to search.
params: dict. A dict with two keys: `size` and `from`. The
corresponding values are ints which represent the number of
results to fetch, and the offset from which to fetch them,
respectively.
Returns:
dict. A dict representing the ElasticSearch response.
Raises:
AssertionError. The given arguments are not supported by this mock.
elasticsearch.NotFoundError. The given index name was not found.
"""
assert body is not None
# "_all" and "" are special index names that are used to search across
# all indexes. We do not allow their use.
assert index not in ['_all', '', None]
assert sorted(params.keys()) == ['from', 'size']
if index not in self._DB:
raise self._generate_index_not_found_error(index)
result_docs = []
result_doc_ids = set([])
for doc in self._DB[index]:
if not doc['id'] in result_doc_ids:
result_docs.append(doc)
result_doc_ids.add(doc['id'])
filters = body['query']['bool']['filter']
terms = body['query']['bool']['must']
for f in filters:
for k, v in f['match'].items():
result_docs = [doc for doc in result_docs if doc[k] in v]
if terms:
filtered_docs = []
for term in terms:
for _, v in term.items():
values = v['query'].split(' ')
for doc in result_docs:
strs = [val for val in doc.values() if isinstance(
val, python_utils.BASESTRING)]
words = []
for s in strs:
words += s.split(' ')
if all([value in words for value in values]):
filtered_docs.append(doc)
result_docs = filtered_docs
formatted_result_docs = [{
'_id': doc['id'],
'_score': 0.0,
'_type': '_doc',
'_index': index,
'_source': doc
} for doc in result_docs[
params['from']: params['from'] + params['size']
]]
return {
'timed_out': False,
'_shards': {
'failed': 0,
'total': 1,
'successful': 1,
'skipped': 0
},
'took': 4,
'hits': {
'hits': formatted_result_docs
},
'total': {
'value': len(formatted_result_docs),
'relation': 'eq'
},
'max_score': max(
[0.0] + [d['_score'] for d in formatted_result_docs]),
}
class AuthServicesStub(python_utils.OBJECT):
"""Test-only implementation of the public API in core.platform.auth."""
def __init__(self):
"""Initializes a new instance that emulates an empty auth server."""
self._user_id_by_auth_id = {}
self._external_user_id_associations = set()
@classmethod
def install_stub(cls, test):
"""Installs a new instance of the stub onto the given test instance.
Args:
test: GenericTestBase. The test instance to install the stub on.
Returns:
callable. A function that will uninstall the stub when called.
"""
with contextlib2.ExitStack() as stack:
stub = cls()
stack.enter_context(test.swap(
platform_auth_services, 'establish_auth_session',
stub.establish_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'destroy_auth_session',
stub.destroy_auth_session))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_claims_from_request',
stub.get_auth_claims_from_request))
stack.enter_context(test.swap(
platform_auth_services, 'mark_user_for_deletion',
stub.mark_user_for_deletion))
stack.enter_context(test.swap(
platform_auth_services, 'delete_external_auth_associations',
stub.delete_external_auth_associations))
stack.enter_context(test.swap(
platform_auth_services,
'verify_external_auth_associations_are_deleted',
stub.verify_external_auth_associations_are_deleted))
stack.enter_context(test.swap(
platform_auth_services, 'get_auth_id_from_user_id',
stub.get_auth_id_from_user_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_user_id_from_auth_id',
stub.get_user_id_from_auth_id))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_user_ids_from_auth_ids',
stub.get_multi_user_ids_from_auth_ids))
stack.enter_context(test.swap(
platform_auth_services, 'get_multi_auth_ids_from_user_ids',
stub.get_multi_auth_ids_from_user_ids))
stack.enter_context(test.swap(
platform_auth_services, 'associate_auth_id_with_user_id',
stub.associate_auth_id_with_user_id))
stack.enter_context(test.swap(
platform_auth_services,
'associate_multi_auth_ids_with_user_ids',
stub.associate_multi_auth_ids_with_user_ids))
# Standard usage of ExitStack: enter a bunch of context managers
# from the safety of an ExitStack's context. Once they've all been
# opened, pop_all() of them off of the original context so they can
# *stay* open. Calling the function returned will exit all of them
# in reverse order.
# https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
return stack.pop_all().close
@classmethod
def establish_auth_session(cls, unused_request, unused_response):
"""Sets login cookies to maintain a user's sign-in session.
Args:
unused_request: webapp2.Request. Unused because os.environ handles
sessions.
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def destroy_auth_session(cls, unused_response):
"""Clears login cookies from the given response headers.
Args:
unused_response: webapp2.Response. Unused because os.environ handles
sessions.
"""
pass
@classmethod
def get_auth_claims_from_request(cls, unused_request):
"""Authenticates the request and returns claims about its authorizer.
This stub obtains authorization information from os.environ. To make the
operation more authentic, this method also creates a new "external"
association for the user to simulate a genuine "provided" value.
Args:
unused_request: webapp2.Request. The HTTP request to authenticate.
Unused because auth-details are extracted from environment
variables.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no
user is signed in, then returns None.
"""
auth_id = os.environ.get('USER_ID', '')
email = os.environ.get('USER_EMAIL', '')
role_is_super_admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None
def mark_user_for_deletion(self, user_id):
"""Marks the user, and all of their auth associations, as deleted.
Since the stub does not use models, this operation actually deletes the
user's association. The "external" associations, however, are not
deleted yet.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._user_id_by_auth_id = {
a: u for a, u in self._user_id_by_auth_id.items() if u != user_id
}
def delete_external_auth_associations(self, user_id):
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
self._external_user_id_associations.discard(user_id)
def verify_external_auth_associations_are_deleted(self, user_id):
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
return user_id not in self._external_user_id_associations
def get_auth_id_from_user_id(self, user_id):
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
Returns:
str|None. The auth ID associated with the given user ID, or None if
no association exists.
"""
return python_utils.NEXT(
(a for a, u in self._user_id_by_auth_id.items() if u == user_id),
None)
def get_user_id_from_auth_id(self, auth_id):
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
Returns:
str|None. The user ID associated with the given auth ID, or None if
no association exists.
"""
return self._user_id_by_auth_id.get(auth_id, None)
def get_multi_user_ids_from_auth_ids(self, auth_ids):
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth
IDs, or None for associations which don't exist.
"""
return [self._user_id_by_auth_id.get(a, None) for a in auth_ids]
def get_multi_auth_ids_from_user_ids(self, user_ids):
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user
IDs, or None for associations which don't exist.
"""
auth_id_by_user_id = {u: a for a, u in self._user_id_by_auth_id.items()}
return [auth_id_by_user_id.get(u, None) for u in user_ids]
def associate_auth_id_with_user_id(self, auth_id_user_id_pair):
"""Commits the association between auth ID and user ID.
This method also adds the user to the "external" set of associations.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association
to commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
if auth_id in self._user_id_by_auth_id:
raise Exception(
'auth_id=%r is already associated with user_id=%r' % (
auth_id, self._user_id_by_auth_id[auth_id]))
auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id).put()
self._external_user_id_associations.add(user_id)
self._user_id_by_auth_id[auth_id] = user_id
def associate_multi_auth_ids_with_user_ids(self, auth_id_user_id_pairs):
"""Commits the associations between auth IDs and user IDs.
This method also adds the users to the "external" set of associations.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
collisions = ', '.join(
'{auth_id=%r: user_id=%r}' % (a, self._user_id_by_auth_id[a])
for a, _ in auth_id_user_id_pairs if a in self._user_id_by_auth_id)
if collisions:
raise Exception('already associated: %s' % collisions)
datastore_services.put_multi(
[auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id in auth_id_user_id_pairs])
self._external_user_id_associations.add(
u for _, u in auth_id_user_id_pairs)
self._user_id_by_auth_id.update(auth_id_user_id_pairs)
class TaskqueueServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.taskqueue taskqueue services API.
"""
def __init__(self, test_base):
"""Initializes a taskqueue services stub that replaces the API
functionality of core.platform.taskqueue.
Args:
test_base: GenericTestBase. The current test base.
"""
self._test_base = test_base
self._client = cloud_tasks_emulator.Emulator(
task_handler=self._task_handler, automatic_task_handling=False)
def _task_handler(self, url, payload, queue_name, task_name=None):
"""Makes a POST request to the task URL in the test app.
Args:
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults
to None if no payload is required.
queue_name: str. The name of the queue to add the task to.
task_name: str|None. Optional. The name of the task.
"""
headers = {
'X-Appengine-QueueName': python_utils.convert_to_bytes(queue_name),
'X-Appengine-TaskName': (
# Maps empty strings to None so the output can become 'None'.
python_utils.convert_to_bytes(task_name or None)),
'X-AppEngine-Fake-Is-Admin': python_utils.convert_to_bytes(1),
}
csrf_token = self._test_base.get_new_csrf_token()
self._test_base.post_task(url, payload, headers, csrf_token=csrf_token)
def create_http_task(
self, queue_name, url, payload=None, scheduled_for=None,
task_name=None):
"""Creates a Task in the corresponding queue that will be executed when
the 'scheduled_for' countdown expires using the cloud tasks emulator.
Args:
queue_name: str. The name of the queue to add the task to.
url: str. URL of the handler function.
payload: dict(str : *). Payload to pass to the request. Defaults to
None if no payload is required.
scheduled_for: datetime|None. The naive datetime object for the time
to execute the task. Ignored by this stub.
task_name: str|None. Optional. The name of the task.
"""
# Causes the task to execute immediately by setting the scheduled_for
# time to 0. If we allow scheduled_for to be non-zero, then tests that
# rely on the actions made by the task will become unreliable.
scheduled_for = 0
self._client.create_task(
queue_name, url, payload, scheduled_for=scheduled_for,
task_name=task_name)
def count_jobs_in_taskqueue(self, queue_name=None):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_number_of_tasks(queue_name=queue_name)
def process_and_flush_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._client.process_and_flush_tasks(queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._client.get_tasks(queue_name=queue_name)
class MemoryCacheServicesStub(python_utils.OBJECT):
"""The stub class that mocks the API functionality offered by the platform
layer, namely the platform.cache cache services API.
"""
_CACHE_DICT = {}
def get_memory_cache_stats(self):
"""Returns a mock profile of the cache dictionary. This mock does not
have the functionality to test for peak memory usage and total memory
usage so the values for those attributes will be 0.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total
number of keys in the cache dictionary.
"""
return caching_domain.MemoryCacheStats(0, 0, len(self._CACHE_DICT))
def flush_cache(self):
"""Wipes the cache dictionary clean."""
self._CACHE_DICT.clear()
def get_multi(self, keys):
"""Looks up a list of keys in cache dictionary.
Args:
keys: list(str). A list of keys (strings) to look up.
Returns:
list(str). A list of values in the cache dictionary corresponding to
the keys that are passed in.
"""
assert isinstance(keys, list)
return [self._CACHE_DICT.get(key, None) for key in keys]
def set_multi(self, key_value_mapping):
"""Sets multiple keys' values at once in the cache dictionary.
Args:
key_value_mapping: dict(str, str). Both the key and value are
strings. The value can either be a primitive binary-safe string
or the JSON-encoded string version of the object.
Returns:
bool. Whether the set action succeeded.
"""
assert isinstance(key_value_mapping, dict)
self._CACHE_DICT.update(key_value_mapping)
return True
def delete_multi(self, keys):
"""Deletes multiple keys in the cache dictionary.
Args:
keys: list(str). The keys to delete.
Returns:
int. Number of successfully deleted keys.
"""
assert all(isinstance(key, python_utils.BASESTRING) for key in keys)
keys_to_delete = [key for key in keys if key in self._CACHE_DICT]
for key in keys_to_delete:
del self._CACHE_DICT[key]
return len(keys_to_delete)
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
# A test unicode string.
UNICODE_TEST_STRING = 'unicode ¡马!'
def _get_unicode_test_string(self, suffix):
"""Returns a string that contains unicode characters and ends with the
given suffix. This is used to test that functions behave correctly when
handling strings with unicode characters.
Args:
suffix: str. The suffix to append to the UNICODE_TEST_STRING.
Returns:
str. A string that contains unicode characters and ends with the
given suffix.
"""
return '%s%s' % (self.UNICODE_TEST_STRING, suffix)
def _assert_validation_error(self, item, error_substring):
"""Checks that the given item passes default validation."""
with self.assertRaisesRegexp(utils.ValidationError, error_substring):
item.validate()
def log_line(self, line):
"""Print the line with a prefix that can be identified by the script
that calls the test.
"""
# We are using the b' prefix as all the stdouts are in bytes.
python_utils.PRINT(
b'%s%s' % (LOG_LINE_PREFIX, python_utils.convert_to_bytes(line)))
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_updated_param_dict(
self, param_dict, param_changes, exp_param_specs):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter changes
later in the list may depend on parameter changes that have been set
earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for param_change in param_changes:
try:
obj_type = exp_param_specs[param_change.name].obj_type
except:
raise Exception('Parameter %s not found' % param_change.name)
new_param_dict[param_change.name] = (
param_change.get_normalized_value(obj_type, new_param_dict))
return new_param_dict
def get_static_asset_filepath(self):
"""Returns filepath to the static files on disk ('' or 'build/')."""
return '' if constants.DEV_MODE else os.path.join('build')
def get_static_asset_url(self, asset_suffix):
"""Returns the relative path for the asset, appending it to the
corresponding cache slug. asset_suffix should have a leading slash.
"""
return '/assets%s%s' % (utils.get_asset_dir_prefix(), asset_suffix)
@contextlib.contextmanager
def capture_logging(self, min_level=logging.NOTSET):
"""Context manager that captures logs into a list.
Strips whitespace from messages for convenience.
https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
Args:
min_level: int. The minimum logging level captured by the context
manager. By default, all logging levels are captured. Values
should be one of the following values from the logging module:
NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL.
Yields:
list(str). A live-feed of the logging messages captured so-far.
"""
captured_logs = []
class ListStream(python_utils.OBJECT):
"""Stream-like object that appends writes to the captured logs."""
def write(self, msg):
"""Appends stripped messages to captured logs."""
captured_logs.append(msg.strip())
def flush(self):
"""Does nothing."""
pass
list_stream_handler = logging.StreamHandler(stream=ListStream())
logger = logging.getLogger()
old_level = logger.level
logger.addHandler(list_stream_handler)
logger.setLevel(min_level)
try:
yield captured_logs
finally:
logger.setLevel(old_level)
logger.removeHandler(list_stream_handler)
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Example usage:
import math
with self.swap(math, 'sqrt', lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
To mock class methods, pass the function to the classmethod decorator
first, for example:
import types
with self.swap(
SomePythonClass, 'some_classmethod',
classmethod(new_classmethod)):
NOTE: self.swap and other context managers that are created using
contextlib.contextmanager use generators that yield exactly once. This
means that you can only use them once after construction, otherwise,
the generator will immediately raise StopIteration, and contextlib will
raise a RuntimeError.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
@contextlib.contextmanager
def swap_to_always_return(self, obj, attr, value=None):
"""Swap obj.attr with a function that always returns the given value."""
def function_that_always_returns(*unused_args, **unused_kwargs):
"""Returns the input value."""
return value
with self.swap(obj, attr, function_that_always_returns):
yield
@contextlib.contextmanager
def swap_to_always_raise(self, obj, attr, error=Exception):
"""Swap obj.attr with a function that always raises the given error."""
def function_that_always_raises(*unused_args, **unused_kwargs):
"""Raises the input exception."""
raise error
with self.swap(obj, attr, function_that_always_raises):
yield
@contextlib.contextmanager
def swap_with_checks(
self, obj, attr, new_value, expected_args=None,
expected_kwargs=None, called=True):
"""Swap an object's function value within the context of a 'with'
statement. The object can be anything that supports getattr and setattr,
such as class instances, modules, etc.
Examples:
If you want to check subprocess.Popen is invoked twice like
`subprocess.Popen(['python'], shell=True)` and
`subprocess.Popen(['python2], shell=False), you can first define the
mock function, then the swap, and just run the target function in
context, as follows:
def mock_popen(command, shell):
return
popen_swap = self.swap_with_checks(
subprocess, 'Popen', mock_popen,
expected_args=[(['python'],), (['python2'],)],
expected_kwargs=[{'shell': True}, {'shell': False}])
with popen_swap:
function_that_invokes_popen()
Args:
obj: *. The Python object whose attribute you want to swap.
attr: str. The name of the function to be swapped.
new_value: function. The new function you want to use.
expected_args: None|list(tuple). The expected args that you want
this function to be invoked with. When its value is None, args
will not be checked. If the value type is list, the function
will check whether the called args is the first element in the
list. If matched, this tuple will be removed from the list.
expected_kwargs: None|list(dict). The expected keyword args you want
this function to be invoked with. Similar to expected_args.
called: bool. Whether the function is expected to be invoked. This
will always be checked.
Yields:
context. The context with function replaced.
"""
original = getattr(obj, attr)
# The actual error message will also include detail assert error message
# via the `self.longMessage` below.
msg = 'Expected checks failed when swapping out in %s.%s tests.' % (
obj.__name__, attr)
def wrapper(*args, **kwargs):
"""Wrapper function for the new value. This function will do the
check before the wrapped function is invoked. After the function
finished, the wrapper will update how many times this function is
invoked.
Args:
*args: list(*). The args passed into `attr` function.
**kwargs: dict. The key word args passed into `attr` function.
Returns:
*. Result of `new_value`.
"""
wrapper.called = True
if expected_args is not None:
self.assertEqual(args, expected_args[0], msg=msg)
expected_args.pop(0)
if expected_kwargs is not None:
self.assertEqual(kwargs, expected_kwargs[0], msg=msg)
expected_kwargs.pop(0)
result = new_value(*args, **kwargs)
return result
wrapper.called = False
setattr(obj, attr, wrapper)
error_occurred = False
try:
# This will show the detailed assert message.
self.longMessage = True
yield
except Exception:
error_occurred = True
# Raise issues thrown by the called function or assert error.
raise
finally:
setattr(obj, attr, original)
if not error_occurred:
self.assertEqual(wrapper.called, called, msg=msg)
self.assertFalse(expected_args, msg=msg)
self.assertFalse(expected_kwargs, msg=msg)
self.longMessage = False
def assertRaises(self, *args, **kwargs):
raise NotImplementedError(
'self.assertRaises should not be used in these tests. Please use '
'self.assertRaisesRegexp instead.')
def assertRaisesRegexp( # pylint: disable=keyword-arg-before-vararg
self, expected_exception, expected_regexp, callable_obj=None,
*args, **kwargs):
if not expected_regexp:
raise Exception(
'Please provide a sufficiently strong regexp string to '
'validate that the correct error is being raised.')
return super(TestBase, self).assertRaisesRegexp(
expected_exception, expected_regexp,
callable_obj=callable_obj, *args, **kwargs)
def assert_matches_regexps(self, items, regexps, full_match=False):
"""Asserts that each item matches the corresponding regexp.
If there are any missing or extra items that do not correspond to a
regexp element, then the assertion fails.
Args:
items: list(str). The string elements being matched.
regexps: list(str|RegexObject). The patterns that each item is
expected to match.
full_match: bool. Whether to require items to match exactly with the
corresponding pattern.
Raises:
AssertionError. At least one item does not match its corresponding
pattern, or the number of items does not match the number of
regexp patterns.
"""
get_match = re.match if full_match else re.search
differences = [
'~ [i=%d]:\t%r does not match: %r' % (i, item, regexp)
for i, (regexp, item) in enumerate(python_utils.ZIP(regexps, items))
if get_match(regexp, item, re.DOTALL) is None
]
if len(items) < len(regexps):
extra_regexps = regexps[len(items):]
differences.extend(
'- [i=%d]:\tmissing item expected to match: %r' % (i, regexp)
for i, regexp in enumerate(extra_regexps, start=len(items)))
if len(regexps) < len(items):
extra_items = items[len(regexps):]
differences.extend(
'+ [i=%d]:\textra item %r' % (i, item)
for i, item in enumerate(extra_items, start=len(regexps)))
if differences:
error_message = 'Lists differ:\n\t%s' % '\n\t'.join(differences)
raise AssertionError(error_message)
class AppEngineTestBase(TestBase):
"""Minimal base class for tests that need Google App Engine functionality.
This class is primarily designed for unit tests in core.platform, where we
write adapters around Oppia's third-party dependencies. Generally, our unit
tests depend on stub implementations of these adapters to protect them from
platform-specific behavior. Such stubs are installed in the
GenericTestBase.run() method.
Most of the unit tests in our code base do, and should, inherit from
`GenericTestBase` to stay platform-agnostic. The platform layer itself,
however, can _not_ mock out platform-specific behavior. Those unit tests
need to interact with a real implementation. This base class provides the
bare-minimum functionality and stubs necessary to do so.
"""
# Environment values that our tests depend on.
AUTH_DOMAIN = 'example.com'
HTTP_HOST = 'localhost'
SERVER_NAME = 'localhost'
SERVER_PORT = '8080'
DEFAULT_VERSION_HOSTNAME = '%s:%s' % (HTTP_HOST, SERVER_PORT)
def __init__(self, *args, **kwargs):
super(AppEngineTestBase, self).__init__(*args, **kwargs)
# Defined outside of setUp() because we access it from methods, but can
# only install it during the run() method. Defining it in __init__
# satisfies pylint's attribute-defined-outside-init warning.
self._platform_taskqueue_services_stub = TaskqueueServicesStub(self)
def setUp(self):
super(AppEngineTestBase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(
overwrite=True,
auth_domain=self.AUTH_DOMAIN, http_host=self.HTTP_HOST,
server_name=self.SERVER_NAME, server_port=self.SERVER_PORT,
default_version_hostname=self.DEFAULT_VERSION_HOSTNAME)
# Google App Engine service stubs.
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_memcache_stub()
self.testbed.init_search_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_user_stub()
policy = (
datastore_services.make_instantaneous_global_consistency_policy())
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
# The root path tells the testbed where to find the queue.yaml file.
self.testbed.init_taskqueue_stub(root_path=os.getcwd())
self._testbed_taskqueue_stub = (
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME))
# Set up apps for testing.
self.testapp = webtest.TestApp(main.app)
self.taskqueue_testapp = webtest.TestApp(main_taskqueue.app)
self.mail_testapp = webtest.TestApp(main_mail.app)
def tearDown(self):
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
AppEngineTestBase's override of run() wraps super().run() in "swap"
contexts which stub out the platform taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
platform_taskqueue_services_swap = self.swap(
platform_taskqueue_services, 'create_http_task',
self._platform_taskqueue_services_stub.create_http_task)
with platform_taskqueue_services_swap:
super(AppEngineTestBase, self).run(result=result)
def _get_all_queue_names(self):
"""Returns a list of all queue names."""
return [q['name'] for q in self._testbed_taskqueue_stub.GetQueues()]
def count_jobs_in_taskqueue(self, queue_name):
"""Returns the total number of tasks in a single queue if a queue name
is specified or the entire taskqueue if no queue name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
int. The total number of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.count_jobs_in_taskqueue(
queue_name=queue_name)
def process_and_flush_pending_tasks(self, queue_name=None):
"""Executes all of the tasks in a single queue if a queue name is
specified or all of the tasks in the taskqueue if no queue name is
specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
"""
self._platform_taskqueue_services_stub.process_and_flush_tasks(
queue_name=queue_name)
def get_pending_tasks(self, queue_name=None):
"""Returns a list of the tasks in a single queue if a queue name is
specified or a list of all of the tasks in the taskqueue if no queue
name is specified.
Args:
queue_name: str|None. Name of the queue. Pass in None if no specific
queue is designated.
Returns:
list(Task). List of tasks in a single queue or in the entire
taskqueue.
"""
return self._platform_taskqueue_services_stub.get_pending_tasks(
queue_name=queue_name)
def count_jobs_in_mapreduce_taskqueue(self, queue_name):
"""Counts the jobs in the given MapReduce taskqueue."""
return len(self.get_pending_mapreduce_tasks(queue_name=queue_name))
def get_pending_mapreduce_tasks(self, queue_name=None):
"""Returns the jobs in the given MapReduce taskqueue. If queue_name is
None, defaults to returning the jobs in all available queues.
"""
queue_names = None if queue_name is None else [queue_name]
return self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
def _execute_mapreduce_tasks(self, tasks):
"""Execute MapReduce queued tasks.
Args:
tasks: list(google.appengine.api.taskqueue.taskqueue.Task). The
queued tasks.
"""
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks will be for MapReduce or taskqueue.
params = task.payload or ''
headers = {
'Content-Length': python_utils.convert_to_bytes(len(params))
}
headers.update(
(key, python_utils.convert_to_bytes(val))
for key, val in task.headers.items())
app = (
self.taskqueue_testapp if task.url.startswith('/task') else
self.testapp)
response = app.post(
task.url, params=params, headers=headers,
expect_errors=True)
if response.status_code != 200:
raise RuntimeError('MapReduce task failed: %r' % task)
def process_and_flush_pending_mapreduce_tasks(self, queue_name=None):
"""Runs and flushes pending MapReduce tasks. If queue_name is None, does
so for all queues; otherwise, this only runs and flushes tasks for the
specified queue.
For more information on taskqueue_stub, see:
https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/api/taskqueue/taskqueue_stub.py
"""
queue_names = (
self._get_all_queue_names() if queue_name is None else [queue_name])
get_enqueued_tasks = lambda: list(
self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names))
# Loops until get_enqueued_tasks() returns an empty list.
for tasks in iter(get_enqueued_tasks, []):
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
def run_but_do_not_flush_pending_mapreduce_tasks(self):
""""Runs, but does not flush, the pending MapReduce tasks."""
queue_names = self._get_all_queue_names()
tasks = self._testbed_taskqueue_stub.get_filtered_tasks(
queue_names=queue_names)
for queue in queue_names:
self._testbed_taskqueue_stub.FlushQueue(queue)
self._execute_mapreduce_tasks(tasks)
class GenericTestBase(AppEngineTestBase):
"""Base test class with common/generic helper methods.
Unless a class is testing for "platform"-specific behavior (e.g., testing
third-party library code or database model implementations), always inherit
from this base class. Otherwise, inherit from unittest.TestCase (preferred)
or AppEngineTestBase if Google App Engine services/behavior is needed.
TODO(#12135): Split this enormous test base into smaller, focused pieces.
"""
# NOTE: For tests that do not/can not use the default super-admin, authors
# can override the following class-level constant.
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = True
# This is the value that gets returned by default when
# app_identity.get_application_id() is called during tests.
EXPECTED_TEST_APP_ID = 'dummy-cloudsdk-project-id'
SUPER_ADMIN_EMAIL = 'tmpsuperadmin@example.com'
SUPER_ADMIN_USERNAME = 'tmpsuperadm1n'
# Dummy strings representing user attributes. Note that it is up to the
# individual test to actually register these users as editors, admins, etc.
ADMIN_EMAIL = 'admin@example.com'
# Usernames containing the string 'admin' are reserved, so we use 'adm'
# instead.
ADMIN_USERNAME = 'adm'
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EDITOR_EMAIL = 'editor@example.com'
EDITOR_USERNAME = 'editor'
TOPIC_MANAGER_EMAIL = 'topicmanager@example.com'
TOPIC_MANAGER_USERNAME = 'topicmanager'
VOICE_ARTIST_EMAIL = 'voiceartist@example.com'
VOICE_ARTIST_USERNAME = 'voiceartist'
VIEWER_EMAIL = 'viewer@example.com'
VIEWER_USERNAME = 'viewer'
NEW_USER_EMAIL = 'new.user@example.com'
NEW_USER_USERNAME = 'newuser'
DEFAULT_END_STATE_NAME = 'End'
PSEUDONYMOUS_ID = 'pid_%s' % ('a' * 32)
VERSION_0_STATES_DICT = {
feconf.DEFAULT_INIT_STATE_NAME: {
'content': [{'type': 'text', 'value': ''}],
'param_changes': [],
'interaction': {
'customization_args': {},
'id': 'Continue',
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': 'END',
'feedback': [],
'param_changes': [],
'definition': {'rule_type': 'default'},
}],
}],
},
},
}
VERSION_27_STATE_DICT = {
'content': {'content_id': 'content', 'html': ''},
'param_changes': [],
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {},
'hint_1': {},
'solution': {},
},
},
'interaction': {
'solution': {
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>Solution explanation</p>',
},
'answer_is_exclusive': False,
},
'answer_groups': [],
'default_outcome': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': '',
},
'dest': None,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': True,
},
'customization_args': {
'rows': {'value': 1},
'placeholder': {'value': 'Enter text here'},
},
'confirmed_unclassified_answers': [],
'id': 'TextInput',
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hint 1</p>',
},
}],
},
'classifier_model_id': None,
}
VERSION_21_STATE_DICT = {
'END': {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': 'Congratulations, you have finished!',
},
'content_ids_to_audio_translations': {'content': {}},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'recommendedExplorationIds': {'value': []},
},
'default_outcome': None,
'hints': [],
'id': 'EndExploration',
'solution': None,
},
'param_changes': [],
},
'Introduction': {
'classifier_model_id': None,
'content': {'content_id': 'content', 'html': ''},
'content_ids_to_audio_translations': {
'content': {},
'default_outcome': {},
'feedback_1': {},
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': 'END',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Correct!</p>',
},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {'x': 'InputString'},
'rule_type': 'Equals',
}],
'tagged_misconception_id': None,
'training_data': ['answer1', 'answer2', 'answer3'],
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {'value': ''},
'rows': {'value': 1},
},
'default_outcome': {
'dest': 'Introduction',
'feedback': {'content_id': 'default_outcome', 'html': ''},
'labelled_as_correct': False,
'missing_prerequisite_skill_id': None,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': 'TextInput',
'solution': None,
},
'param_changes': [],
},
}
VERSION_1_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_2_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_3_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math '
'raw_latex-with-value="&quot;+,-,-,+&quot;">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_4_STORY_CONTENTS_DICT = {
'nodes': [{
'outline': (
'<p>Value</p>'
'<oppia-noninteractive-math math_content-with-value="{'
'&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, '
'&quot;svg_filename&quot;: &quot;&quot;'
'}">'
'</oppia-noninteractive-math>'),
'exploration_id': None,
'destination_node_ids': [],
'outline_is_finalized': False,
'acquired_skill_ids': [],
'id': 'node_1',
'title': 'Chapter 1',
'description': '',
'prerequisite_skill_ids': [],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
}],
'initial_node_id': 'node_1',
'next_node_id': 'node_2',
}
VERSION_1_SUBTOPIC_DICT = {
'skill_ids': ['skill_1'],
'id': 1,
'title': 'A subtitle',
}
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with YAML generation tests. The
# indentations are also important, since it is used to define nesting (just
# like Python).
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
next_content_id_index: 0
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_STATE_SCHEMA_VERSION)
def run(self, result=None):
"""Run the test, collecting the result into the specified TestResult.
Reference URL:
https://docs.python.org/3/library/unittest.html#unittest.TestCase.run
GenericTestBase's override of run() wraps super().run() in swap
contexts to mock out the cache and taskqueue services.
Args:
result: TestResult | None. Holds onto the results of each test. If
None, a temporary result object is created (by calling the
defaultTestResult() method) and used instead.
"""
memory_cache_services_stub = MemoryCacheServicesStub()
memory_cache_services_stub.flush_cache()
es_stub = ElasticSearchStub()
es_stub.reset()
with contextlib2.ExitStack() as stack:
stack.callback(AuthServicesStub.install_stub(self))
stack.enter_context(self.swap(
elastic_search_services.ES.indices, 'create',
es_stub.mock_create_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'index',
es_stub.mock_index))
stack.enter_context(self.swap(
elastic_search_services.ES, 'exists',
es_stub.mock_exists))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete',
es_stub.mock_delete))
stack.enter_context(self.swap(
elastic_search_services.ES, 'delete_by_query',
es_stub.mock_delete_by_query))
stack.enter_context(self.swap(
elastic_search_services.ES, 'search',
es_stub.mock_search))
stack.enter_context(self.swap(
memory_cache_services, 'flush_cache',
memory_cache_services_stub.flush_cache))
stack.enter_context(self.swap(
memory_cache_services, 'get_multi',
memory_cache_services_stub.get_multi))
stack.enter_context(self.swap(
memory_cache_services, 'set_multi',
memory_cache_services_stub.set_multi))
stack.enter_context(self.swap(
memory_cache_services, 'get_memory_cache_stats',
memory_cache_services_stub.get_memory_cache_stats))
stack.enter_context(self.swap(
memory_cache_services, 'delete_multi',
memory_cache_services_stub.delete_multi))
super(GenericTestBase, self).run(result=result)
def setUp(self):
super(GenericTestBase, self).setUp()
if self.AUTO_CREATE_DEFAULT_SUPERADMIN_USER:
self.signup_superadmin_user()
def tearDown(self):
datastore_services.delete_multi(
datastore_services.query_everything().iter(keys_only=True))
super(GenericTestBase, self).tearDown()
def login(self, email, is_super_admin=False):
"""Sets the environment variables to simulate a login.
Args:
email: str. The email of the user who is to be logged in.
is_super_admin: bool. Whether the user is a super admin.
"""
self.testbed.setup_env(
overwrite=True,
user_email=email, user_id=self.get_auth_id_from_email(email),
user_is_admin=('1' if is_super_admin else '0'))
def logout(self):
"""Simulates a logout by resetting the environment variables."""
self.testbed.setup_env(
overwrite=True, user_email='', user_id='', user_is_admin='0')
@contextlib.contextmanager
def mock_datetime_utcnow(self, mocked_datetime):
"""Mocks response from datetime.datetime.utcnow method.
Example usage:
import datetime
mocked_datetime_utcnow = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
print datetime.datetime.utcnow() # prints time reduced by 1 day
print datetime.datetime.utcnow() # prints current time.
Args:
mocked_datetime: datetime.datetime. The datetime which will be used
instead of the current UTC datetime.
Yields:
None. Empty yield statement.
"""
with datastore_services.mock_datetime_for_datastore(mocked_datetime):
yield
@contextlib.contextmanager
def login_context(self, email, is_super_admin=False):
"""Log in with the given email under the context of a 'with' statement.
Args:
email: str. An email associated with a user account.
is_super_admin: bool. Whether the user is a super admin.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
self.login(email, is_super_admin=is_super_admin)
try:
yield self.get_user_id_from_email(email)
finally:
self.logout()
@contextlib.contextmanager
def super_admin_context(self):
"""Log in as a global admin under the context of a 'with' statement.
Yields:
str. The id of the user associated with the given email, who is now
'logged in'.
"""
email = self.SUPER_ADMIN_EMAIL
with self.login_context(email, is_super_admin=True) as user_id:
yield user_id
def signup(self, email, username):
"""Complete the signup process for the user with the given username.
Args:
email: str. Email of the given user.
username: str. Username of the given user.
"""
user_services.create_new_user(self.get_auth_id_from_email(email), email)
with self.login_context(email), requests_mock.Mocker() as m:
# We mock out all HTTP requests while trying to signup to avoid
# calling out to real backend services.
m.request(requests_mock.ANY, requests_mock.ANY)
response = self.get_html_response(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.post(feconf.SIGNUP_DATA_URL, params={
'csrf_token': self.get_new_csrf_token(),
'payload': json.dumps(
{'username': username, 'agreed_to_terms': True}),
})
self.assertEqual(response.status_int, 200)
def signup_superadmin_user(self):
"""Signs up a superadmin user. Must be called at the end of setUp()."""
self.signup(self.SUPER_ADMIN_EMAIL, self.SUPER_ADMIN_USERNAME)
def set_config_property(self, config_obj, new_config_value):
"""Sets a given configuration object's value to the new value specified
using a POST request.
"""
with self.super_admin_context():
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_obj.name: new_config_value,
},
}, csrf_token=self.get_new_csrf_token())
def set_user_role(self, username, user_role):
"""Sets the given role for this user.
Args:
username: str. Username of the given user.
user_role: str. Role of the given user.
"""
with self.super_admin_context():
self.post_json('/adminrolehandler', {
'username': username,
'role': user_role,
}, csrf_token=self.get_new_csrf_token())
def set_admins(self, admin_usernames):
"""Sets role of given users as ADMIN.
Args:
admin_usernames: list(str). List of usernames.
"""
for name in admin_usernames:
self.set_user_role(name, feconf.ROLE_ID_ADMIN)
def set_topic_managers(self, topic_manager_usernames):
"""Sets role of given users as TOPIC_MANAGER.
Args:
topic_manager_usernames: list(str). List of usernames.
"""
for name in topic_manager_usernames:
self.set_user_role(name, feconf.ROLE_ID_TOPIC_MANAGER)
def set_moderators(self, moderator_usernames):
"""Sets role of given users as MODERATOR.
Args:
moderator_usernames: list(str). List of usernames.
"""
for name in moderator_usernames:
self.set_user_role(name, feconf.ROLE_ID_MODERATOR)
def set_banned_users(self, banned_usernames):
"""Sets role of given users as BANNED_USER.
Args:
banned_usernames: list(str). List of usernames.
"""
for name in banned_usernames:
self.set_user_role(name, feconf.ROLE_ID_BANNED_USER)
def set_collection_editors(self, collection_editor_usernames):
"""Sets role of given users as COLLECTION_EDITOR.
Args:
collection_editor_usernames: list(str). List of usernames.
"""
for name in collection_editor_usernames:
self.set_user_role(name, feconf.ROLE_ID_COLLECTION_EDITOR)
def get_user_id_from_email(self, email):
"""Gets the user ID corresponding to the given email.
Args:
email: str. A valid email stored in the App Engine database.
Returns:
str|None. ID of the user possessing the given email, or None if
the user does not exist.
"""
user_settings = user_services.get_user_settings_by_auth_id(
self.get_auth_id_from_email(email))
return user_settings and user_settings.user_id
@classmethod
def get_auth_id_from_email(cls, email):
"""Returns a mock auth ID corresponding to the given email.
This method can use any algorithm to produce results as long as, during
the runtime of each test case/method, it is:
1. Pure (same input always returns the same output).
2. One-to-one (no two distinct inputs return the same output).
3. An integer byte-string (integers are always valid in auth IDs).
Args:
email: str. The email address of the user.
Returns:
bytes. The mock auth ID of a user possessing the given email.
"""
# Although the hash function doesn't guarantee a one-to-one mapping, in
# practice it is sufficient for our tests. We make it a positive integer
# because those are always valid auth IDs.
return python_utils.convert_to_bytes(abs(hash(email)))
def _get_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
if expect_errors:
self.assertTrue(response.status_int >= 400)
else:
self.assertTrue(200 <= response.status_int < 400)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(response.status_int, expected_status_int)
self.assertEqual(response.content_type, expected_content_type)
return response
def get_html_response(self, url, params=None, expected_status_int=200):
"""Get a HTML response, transformed to a Python object.
Args:
url: str. The URL to fetch the response.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will
be 200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
return self._get_response(
url, 'text/html', params=params,
expected_status_int=expected_status_int)
def get_custom_response(
self, url, expected_content_type, params=None,
expected_status_int=200):
"""Get a response other than HTML or JSON as a Python object.
Args:
url: str. The URL to fetch the response.
expected_content_type: str. The content type to expect.
params: dict. A dictionary that will be encoded into a query string.
expected_status_int: int. The integer status code to expect. Will be
200 if not specified.
Returns:
webtest.TestResponse. The test response.
"""
self.assertNotIn(
expected_content_type, ['text/html', 'application/json'])
return self._get_response(
url, expected_content_type, params=params,
expected_status_int=expected_status_int)
def get_response_without_checking_for_errors(
self, url, expected_status_int_list, params=None):
"""Get a response, transformed to a Python object and checks for a list
of status codes.
Args:
url: str. The URL to fetch the response.
expected_status_int_list: list(int). A list of integer status code
to expect.
params: dict. A dictionary that will be encoded into a query string.
Returns:
webtest.TestResponse. The test response.
"""
if params is not None:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
# This swap is required to ensure that the templates are fetched from
# source directory instead of webpack_bundles since webpack_bundles is
# only produced after webpack compilation which is not performed during
# backend tests.
with self.swap(base, 'load_template', mock_load_template):
response = self.testapp.get(url, params=params, expect_errors=True)
self.assertIn(response.status_int, expected_status_int_list)
return response
def _parse_json_response(self, json_response, expect_errors):
"""Convert a JSON server response to an object (such as a dict)."""
if expect_errors:
self.assertTrue(json_response.status_int >= 400)
else:
self.assertTrue(200 <= json_response.status_int < 400)
self.assertEqual(json_response.content_type, 'application/json')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url, params=None, expected_status_int=200):
"""Get a JSON response, transformed to a Python object."""
if params is not None:
self.assertIsInstance(params, dict)
expect_errors = expected_status_int >= 400
json_response = self.testapp.get(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def post_json(
self, url, payload, csrf_token=None, expected_status_int=200,
upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self._send_post_request(
self.testapp, url, data, expect_errors,
expected_status_int=expected_status_int, upload_files=upload_files)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def delete_json(self, url, params='', expected_status_int=200):
"""Delete object on the server using a JSON call."""
if params:
self.assertIsInstance(
params, dict,
msg='Expected params to be a dict, received %s' % params)
expect_errors = expected_status_int >= 400
json_response = self.testapp.delete(
url, params=params, expect_errors=expect_errors,
status=expected_status_int)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def _send_post_request(
self, app, url, data, expect_errors, expected_status_int=200,
upload_files=None, headers=None):
"""Sends a post request with the data provided to the url specified.
Args:
app: TestApp. The WSGI application which receives the request and
produces response.
url: str. The URL to send the POST request to.
data: *. To be put in the body of the request. If params is an
iterator, it will be urlencoded. If it is a string, it will not
be encoded, but placed in the body directly. Can be a
collections.OrderedDict with webtest.forms.Upload fields
included.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code.
upload_files: list(tuple). List of
(fieldname, filename, file_content) tuples. Can also provide
just (fieldname, filename) to have the file contents will be
read from disk.
headers: dict(str, *). Extra headers to send.
Returns:
webtest.TestResponse. The response of the POST request.
"""
# Convert the files to bytes.
if upload_files is not None:
upload_files = tuple(
tuple(python_utils.convert_to_bytes(f) for f in upload_file)
for upload_file in upload_files)
return app.post(
url, params=data, headers=headers, status=expected_status_int,
upload_files=upload_files, expect_errors=expect_errors)
def post_email(
self, recipient_email, sender_email, subject, body, html_body=None,
expect_errors=False, expected_status_int=200):
"""Post an email from the sender to the recipient.
Args:
recipient_email: str. The email of the recipient.
sender_email: str. The email of the sender.
subject: str. The subject of the email.
body: str. The body of the email.
html_body: str. The HTML body of the email.
expect_errors: bool. Whether errors are expected.
expected_status_int: int. The expected status code of the JSON
response.
Returns:
json. A JSON response generated by _send_post_request function.
"""
email = mail.EmailMessage(
sender=sender_email, to=recipient_email, subject=subject, body=body)
if html_body is not None:
email.html = html_body
mime_email = email.to_mime_message()
headers = {
'Content-Type': mime_email.get_content_type(),
}
data = mime_email.as_string()
incoming_email_url = '/_ah/mail/%s' % recipient_email
return self._send_post_request(
self.mail_testapp, incoming_email_url, data, expect_errors,
headers=headers, expected_status_int=expected_status_int)
def post_task(
self, url, payload, headers, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Posts an object to the server by JSON with the specific headers
specified; return the received object.
"""
if csrf_token:
payload['csrf_token'] = csrf_token
return self.taskqueue_testapp.post(
url, params=json.dumps(payload), headers=headers,
status=expected_status_int, expect_errors=expect_errors,
content_type='application/json')
def put_json(self, url, payload, csrf_token=None, expected_status_int=200):
"""PUT an object to the server with JSON and return the response."""
params = {'payload': json.dumps(payload)}
if csrf_token:
params['csrf_token'] = csrf_token
expect_errors = expected_status_int >= 400
json_response = self.testapp.put(
url, params=params, expect_errors=expect_errors)
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
#
# Reference URL:
# https://github.com/Pylons/webtest/blob/bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(json_response, expect_errors)
def get_new_csrf_token(self):
"""Generates CSRF token for test."""
response = self.get_json('/csrfhandler')
return response['token']
def save_new_default_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category='Algebra')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def set_interaction_for_state(self, state, interaction_id):
"""Sets the interaction_id, sets the fully populated default interaction
customization arguments, and increments next_content_id_index as needed.
Args:
state: State. The state domain object to set the interaction for.
interaction_id: str. The interaction id to set. Also sets the
default customization args for the given interaction id.
"""
# We wrap next_content_id_index in a dict so that modifying it in the
# inner function modifies the value.
next_content_id_index_dict = {'value': state.next_content_id_index}
def traverse_schema_and_assign_content_ids(value, schema, contentId):
"""Generates content_id from recursively traversing the schema, and
assigning to the current value.
Args:
value: *. The current traversed value in customization
arguments.
schema: dict. The current traversed schema.
contentId: str. The content_id generated so far.
"""
is_subtitled_html_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML)
is_subtitled_unicode_spec = (
schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and
schema['obj_type'] ==
schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE)
if is_subtitled_html_spec or is_subtitled_unicode_spec:
value['content_id'] = '%s_%i' % (
contentId, next_content_id_index_dict['value'])
next_content_id_index_dict['value'] += 1
elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST:
for x in value:
traverse_schema_and_assign_content_ids(
x, schema['items'], contentId)
elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT:
for schema_property in schema['properties']:
traverse_schema_and_assign_content_ids(
x[schema_property.name],
schema_property['schema'],
'%s_%s' % (contentId, schema_property.name))
interaction = (
interaction_registry.Registry.get_interaction_by_id(interaction_id))
ca_specs = interaction.customization_arg_specs
customization_args = {}
for ca_spec in ca_specs:
ca_name = ca_spec.name
ca_value = ca_spec.default_value
traverse_schema_and_assign_content_ids(
ca_value, ca_spec.schema, 'ca_%s' % ca_name)
customization_args[ca_name] = {'value': ca_value}
state.update_interaction_id(interaction_id)
state.update_interaction_customization_args(customization_args)
state.update_next_content_id_index(next_content_id_index_dict['value'])
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE, end_state_name=None,
interaction_id='TextInput', correctness_feedback_enabled=False):
"""Saves a new strictly-validated exploration.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
end_state_name: str. The name of the end state for the exploration.
interaction_id: str. The id of the interaction.
correctness_feedback_enabled: bool. Whether correctness feedback is
enabled for the exploration.
Returns:
Exploration. The exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, category=category,
language_code=language_code)
self.set_interaction_for_state(
exploration.states[exploration.init_state_name], interaction_id)
exploration.objective = objective
exploration.correctness_feedback_enabled = correctness_feedback_enabled
# If an end state name is provided, add terminal node with that name.
if end_state_name is not None:
exploration.add_states([end_state_name])
end_state = exploration.states[end_state_name]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
# Link first state to ending state (to maintain validity).
init_state = exploration.states[exploration.init_state_name]
init_interaction = init_state.interaction
init_interaction.default_outcome.dest = end_state_name
if correctness_feedback_enabled:
init_interaction.default_outcome.labelled_as_correct = True
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_linear_exp_with_state_names_and_interactions(
self, exploration_id, owner_id, state_names, interaction_ids,
title='A title', category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new strictly-validated exploration with a sequence of states.
Args:
exploration_id: str. The id of the new validated exploration.
owner_id: str. The user_id of the creator of the exploration.
state_names: list(str). The names of states to be linked
sequentially in the exploration. Must be a non-empty list and
contain no duplicates.
interaction_ids: list(str). The names of the interaction ids to be
assigned to each state. Values will be cycled, so it doesn't
need to be the same size as state_names, but it must be
non-empty.
title: str. The title of the exploration.
category: str. The category this exploration belongs to.
objective: str. The objective of this exploration.
language_code: str. The language_code of this exploration.
Returns:
Exploration. The exploration domain object.
"""
if not state_names:
raise ValueError('must provide at least one state name')
if not interaction_ids:
raise ValueError('must provide at least one interaction type')
interaction_ids = itertools.cycle(interaction_ids)
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title=title, init_state_name=state_names[0],
category=category, objective=objective, language_code=language_code)
exploration.add_states(state_names[1:])
for from_state_name, dest_state_name in (
python_utils.ZIP(state_names[:-1], state_names[1:])):
from_state = exploration.states[from_state_name]
self.set_interaction_for_state(
from_state, python_utils.NEXT(interaction_ids))
from_state.interaction.default_outcome.dest = dest_state_name
end_state = exploration.states[state_names[-1]]
self.set_interaction_for_state(end_state, 'EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_exp_with_states_schema_v0(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 0 states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=0,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_0_STATES_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
# Create an ExplorationIssues model to match the behavior of creating
# new explorations.
stats_services.create_exp_issues_for_new_exploration(exp_id, 1)
def save_new_exp_with_custom_states_schema_version(
self, exp_id, user_id, states_dict, version):
"""Saves a new default exploration with the given version of state dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
states_dict: dict. The dict representation of all the states.
version: int. Custom states schema version.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title='title',
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=version,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME, states=states_dict,
param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'title\'.'
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title='title', category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def save_new_exp_with_states_schema_v21(self, exp_id, user_id, title):
"""Saves a new default exploration with a default version 21 states
dictionary. Version 21 is where training data of exploration is stored
with the states dict.
This function should only be used for creating explorations in tests
involving migration of datastore explorations that use an old states
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating explorations. This is
because the latter approach would result in an exploration with the
*current* states schema version.
Args:
exp_id: str. The exploration ID.
user_id: str. The user_id of the creator.
title: str. The title of the exploration.
"""
exp_model = exp_models.ExplorationModel(
id=exp_id, category='category', title=title,
objective='Old objective', language_code='en', tags=[], blurb='',
author_notes='', states_schema_version=21,
init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
states=self.VERSION_21_STATE_DICT, param_specs={}, param_changes=[])
rights_manager.create_new_exploration_rights(exp_id, user_id)
commit_message = 'New exploration created with title \'%s\'.' % title
exp_model.commit(user_id, commit_message, [{
'cmd': 'create_new',
'title': 'title',
'category': 'category',
}])
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exp_id)
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_id, title=title, category='category',
objective='Old objective', language_code='en', tags=[],
ratings=feconf.get_empty_ratings(),
scaled_average_rating=feconf.EMPTY_SCALED_AVERAGE_RATING,
status=exp_rights.status,
community_owned=exp_rights.community_owned,
owner_ids=exp_rights.owner_ids, contributor_ids=[],
contributors_summary={})
exp_summary_model.put()
def publish_exploration(self, owner_id, exploration_id):
"""Publish the exploration with the given exploration_id.
Args:
owner_id: str. The user_id of the owner of the exploration.
exploration_id: str. The ID of the new exploration.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_exploration(committer, exploration_id)
def save_new_default_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default collection written by owner_id.
Args:
collection_id: str. The id of the new default collection.
owner_id: str. The user_id of the creator of the collection.
title: str. The title of the collection.
category: str. The category this collection belongs to.
objective: str. The objective of this collection.
language_code: str. The language_code of this collection.
Returns:
Collection. The collection domain object.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
collection_services.save_new_collection(owner_id, collection)
return collection
def save_new_valid_collection(
self, collection_id, owner_id, title='A title',
category='A category', objective='An objective',
language_code=constants.DEFAULT_LANGUAGE_CODE,
exploration_id='an_exploration_id',
end_state_name=DEFAULT_END_STATE_NAME):
"""Creates an Oppia collection and adds a node saving the exploration
details.
Args:
collection_id: str. ID for the collection to be created.
owner_id: str. The user_id of the creator of the collection.
title: str. Title for the collection.
category: str. The category of the exploration.
objective: str. Objective for the exploration.
language_code: str. The language code for the exploration.
exploration_id: str. The exploration_id for the Oppia exploration.
end_state_name: str. The name of the end state for the exploration.
Returns:
Collection. A newly-created collection containing the corresponding
exploration details.
"""
collection = collection_domain.Collection.create_default_collection(
collection_id, title=title, category=category, objective=objective,
language_code=language_code)
# Check whether exploration with given exploration_id exists or not.
exploration = (
exp_fetchers.get_exploration_by_id(exploration_id, strict=False))
if exploration is None:
exploration = self.save_new_valid_exploration(
exploration_id, owner_id, title=title, category=category,
objective=objective, end_state_name=end_state_name)
collection.add_node(exploration.id)
collection_services.save_new_collection(owner_id, collection)
return collection
def publish_collection(self, owner_id, collection_id):
"""Publish the collection with the given collection_id.
Args:
owner_id: str. The user_id of the owner of the collection.
collection_id: str. ID of the collection to be published.
"""
committer = user_services.UserActionsInfo(owner_id)
rights_manager.publish_collection(committer, collection_id)
def save_new_story(
self, story_id, owner_id, corresponding_topic_id,
title='Title', description='Description', notes='Notes',
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='title', meta_tag_content='story meta tag content'):
"""Creates an Oppia Story and saves it.
NOTE: Callers are responsible for ensuring that the
'corresponding_topic_id' provided is valid, unless a test explicitly
requires it to be invalid.
Args:
story_id: str. ID for the story to be created.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The url fragment of the story.
meta_tag_content: str. The meta tag content of the story.
Returns:
Story. A newly-created story.
"""
story = story_domain.Story.create_default_story(
story_id, title, description, corresponding_topic_id, url_fragment)
story.title = title
story.description = description
story.notes = notes
story.language_code = language_code
story.url_fragment = url_fragment
story.meta_tag_content = meta_tag_content
story_services.save_new_story(owner_id, story)
return story
def save_new_story_with_story_contents_schema_v1(
self, story_id, thumbnail_filename, thumbnail_bg_color,
owner_id, title, description, notes, corresponding_topic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
url_fragment='story-frag',
meta_tag_content='story meta tag content'):
"""Saves a new story with a default version 1 story contents data dict.
This function should only be used for creating stories in tests
involving migration of datastore stories that use an old story contents
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating stories. This is because
the latter approach would result in a story with the *current* story
contents schema version.
Args:
story_id: str. ID for the story to be created.
thumbnail_filename: str|None. Thumbnail filename for the story.
thumbnail_bg_color: str|None. Thumbnail background color for the
story.
owner_id: str. The user_id of the creator of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters, main
storyline, and setting.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
language_code: str. The ISO 639-1 code for the language this story
is written in.
url_fragment: str. The URL fragment for the story.
meta_tag_content: str. The meta tag content of the story.
"""
story_model = story_models.StoryModel(
id=story_id, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color, description=description,
title=title, language_code=language_code,
story_contents_schema_version=1, notes=notes,
corresponding_topic_id=corresponding_topic_id,
story_contents=self.VERSION_1_STORY_CONTENTS_DICT,
url_fragment=url_fragment, meta_tag_content=meta_tag_content)
commit_message = 'New story created with title \'%s\'.' % title
story_model.commit(
owner_id, commit_message,
[{'cmd': story_domain.CMD_CREATE_NEW, 'title': title}])
def save_new_subtopic(self, subtopic_id, owner_id, topic_id):
"""Creates an Oppia subtopic and saves it.
Args:
subtopic_id: str. ID for the subtopic to be created.
owner_id: str. The user_id of the creator of the topic.
topic_id: str. ID for the topic that the subtopic belongs to.
Returns:
SubtopicPage. A newly-created subtopic.
"""
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
subtopic_id, topic_id))
subtopic_changes = [
subtopic_page_domain.SubtopicPageChange({
'cmd': subtopic_page_domain.CMD_CREATE_NEW,
'topic_id': topic_id,
'subtopic_id': subtopic_id,
})
]
subtopic_page_services.save_subtopic_page(
owner_id, subtopic_page, 'Create new subtopic', subtopic_changes)
return subtopic_page
def save_new_topic(
self, topic_id, owner_id, name='topic', abbreviated_name='topic',
url_fragment='topic',
thumbnail_filename='topic.svg',
thumbnail_bg_color=(
constants.ALLOWED_THUMBNAIL_BG_COLORS['topic'][0]),
description='description', canonical_story_ids=None,
additional_story_ids=None, uncategorized_skill_ids=None,
subtopics=None, next_subtopic_id=0,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Creates an Oppia Topic and saves it.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
thumbnail_filename: str|None. The thumbnail filename of the topic.
thumbnail_bg_color: str|None. The thumbnail background color of the
topic.
description: str. The description of the topic.
canonical_story_ids: list(str). The list of ids of canonical stories
that are part of the topic.
additional_story_ids: list(str). The list of ids of additional
stories that are part of the topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
subtopics: list(Subtopic). The different subtopics that are part of
this topic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
Returns:
Topic. A newly-created topic.
"""
canonical_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (canonical_story_ids or [])
]
additional_story_references = [
topic_domain.StoryReference.create_default_story_reference(story_id)
for story_id in (additional_story_ids or [])
]
uncategorized_skill_ids = uncategorized_skill_ids or []
subtopics = subtopics or []
topic = topic_domain.Topic(
topic_id, name, abbreviated_name, url_fragment, thumbnail_filename,
thumbnail_bg_color, description, canonical_story_references,
additional_story_references, uncategorized_skill_ids, subtopics,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION, next_subtopic_id,
language_code, 0, feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION,
meta_tag_content, practice_tab_is_displayed,
page_title_fragment_for_web)
topic_services.save_new_topic(owner_id, topic)
return topic
def save_new_topic_with_subtopic_schema_v1(
self, topic_id, owner_id, name, abbreviated_name, url_fragment,
canonical_name, description, thumbnail_filename, thumbnail_bg_color,
canonical_story_references, additional_story_references,
uncategorized_skill_ids, next_subtopic_id,
language_code=constants.DEFAULT_LANGUAGE_CODE,
meta_tag_content='topic meta tag content',
practice_tab_is_displayed=False,
page_title_fragment_for_web='topic page title'):
"""Saves a new topic with a default version 1 subtopic data dict.
This function should only be used for creating topics in tests involving
migration of datastore topics that use an old subtopic schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating topics. This is because
the latter approach would result in a topic with the *current* subtopic
schema version.
Args:
topic_id: str. ID for the topic to be created.
owner_id: str. The user_id of the creator of the topic.
name: str. The name of the topic.
abbreviated_name: str. The abbreviated name of the topic.
url_fragment: str. The url fragment of the topic.
canonical_name: str. The canonical name (lowercase) of the topic.
description: str. The description of the topic.
thumbnail_filename: str. The thumbnail file name of the topic.
thumbnail_bg_color: str. The thumbnail background color of the
topic.
canonical_story_references: list(StoryReference). A set of story
reference objects representing the canonical stories that are
part of this topic.
additional_story_references: list(StoryReference). A set of story
reference object representing the additional stories that are
part of this topic.
uncategorized_skill_ids: list(str). The list of ids of skills that
are not part of any subtopic.
next_subtopic_id: int. The id for the next subtopic.
language_code: str. The ISO 639-1 code for the language this topic
is written in.
meta_tag_content: str. The meta tag content for the topic.
practice_tab_is_displayed: bool. Whether the practice tab should be
displayed.
page_title_fragment_for_web: str. The page title fragment for the
topic.
"""
topic_rights_model = topic_models.TopicRightsModel(
id=topic_id, manager_ids=[], topic_is_published=True)
topic_model = topic_models.TopicModel(
id=topic_id, name=name, abbreviated_name=abbreviated_name,
url_fragment=url_fragment, thumbnail_filename=thumbnail_filename,
thumbnail_bg_color=thumbnail_bg_color,
canonical_name=canonical_name, description=description,
language_code=language_code,
canonical_story_references=canonical_story_references,
additional_story_references=additional_story_references,
uncategorized_skill_ids=uncategorized_skill_ids,
subtopic_schema_version=1,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=next_subtopic_id,
subtopics=[self.VERSION_1_SUBTOPIC_DICT],
meta_tag_content=meta_tag_content,
practice_tab_is_displayed=practice_tab_is_displayed,
page_title_fragment_for_web=page_title_fragment_for_web)
commit_message = 'New topic created with name \'%s\'.' % name
topic_rights_model.commit(
committer_id=owner_id,
commit_message='Created new topic rights',
commit_cmds=[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
owner_id, commit_message,
[{'cmd': topic_domain.CMD_CREATE_NEW, 'name': name}])
def save_new_question(
self, question_id, owner_id, question_state_data,
linked_skill_ids, inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Creates an Oppia Question and saves it.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
question_state_data: State. The state data for the question.
linked_skill_ids: list(str). List of skill IDs linked to the
question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
Returns:
Question. A newly-created question.
"""
# This needs to be done because default arguments can not be of list
# type.
question = question_domain.Question(
question_id, question_state_data,
feconf.CURRENT_STATE_SCHEMA_VERSION, language_code, 0,
linked_skill_ids, inapplicable_skill_misconception_ids or [])
question_services.add_question(owner_id, question)
return question
def save_new_question_with_state_data_schema_v27(
self, question_id, owner_id, linked_skill_ids,
inapplicable_skill_misconception_ids=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default question with a default version 27 state data
dict.
This function should only be used for creating questions in tests
involving migration of datastore questions that use an old state data
schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
Args:
question_id: str. ID for the question to be created.
owner_id: str. The id of the user creating the question.
linked_skill_ids: list(str). The skill IDs linked to the question.
inapplicable_skill_misconception_ids: list(str). List of skill
misconceptions ids that are not applicable to the question.
language_code: str. The ISO 639-1 code for the language this
question is written in.
"""
# This needs to be done because default arguments can not be of list
# type.
question_model = question_models.QuestionModel(
id=question_id, question_state_data=self.VERSION_27_STATE_DICT,
language_code=language_code, version=1,
question_state_data_schema_version=27,
linked_skill_ids=linked_skill_ids,
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids or []))
question_model.commit(
owner_id, 'New question created',
[{'cmd': question_domain.CMD_CREATE_NEW}])
def save_new_question_suggestion_with_state_data_schema_v27(
self, author_id, skill_id, suggestion_id=None,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new question suggestion with a default version 27 state data
dict.
This function should only be used for creating question suggestion in
tests involving migration of datastore question suggestions that use an
old state data schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating questions. This is because
the latter approach would result in an question with the *current* state
data schema version.
"""
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + skill_id)
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': language_code,
'linked_skill_ids': [skill_id],
'inapplicable_skill_misconception_ids': []
},
'skill_id': skill_id,
'skill_difficulty': 0.3
}
if suggestion_id is None:
suggestion_id = (
feedback_models.GeneralFeedbackThreadModel.
generate_new_thread_id(
feconf.ENTITY_TYPE_SKILL, skill_id))
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, skill_id, 1,
suggestion_models.STATUS_IN_REVIEW, author_id, None, change,
score_category, suggestion_id, language_code)
return suggestion_id
def save_new_skill(
self, skill_id, owner_id, description='description',
misconceptions=None, rubrics=None, skill_contents=None,
language_code=constants.DEFAULT_LANGUAGE_CODE,
prerequisite_skill_ids=None):
"""Creates an Oppia Skill and saves it.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
misconceptions: list(Misconception)|None. A list of Misconception
objects that contains the various misconceptions of the skill.
rubrics: list(Rubric)|None. A list of Rubric objects that contain
the rubric for each difficulty of the skill.
skill_contents: SkillContents|None. A SkillContents object
containing the explanation and examples of the skill.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
prerequisite_skill_ids: list(str)|None. The prerequisite skill IDs
for the skill.
Returns:
Skill. A newly-created skill.
"""
skill = (
skill_domain.Skill.create_default_skill(skill_id, description, []))
if misconceptions is not None:
skill.misconceptions = misconceptions
skill.next_misconception_id = len(misconceptions) + 1
if skill_contents is not None:
skill.skill_contents = skill_contents
if prerequisite_skill_ids is not None:
skill.prerequisite_skill_ids = prerequisite_skill_ids
if rubrics is not None:
skill.rubrics = rubrics
else:
skill.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']),
]
skill.language_code = language_code
skill.version = 0
skill_services.save_new_skill(owner_id, skill)
return skill
def save_new_skill_with_defined_schema_versions(
self, skill_id, owner_id, description, next_misconception_id,
misconceptions=None, rubrics=None, skill_contents=None,
misconceptions_schema_version=1, rubric_schema_version=1,
skill_contents_schema_version=1,
language_code=constants.DEFAULT_LANGUAGE_CODE):
"""Saves a new default skill with the given versions for misconceptions
and skill contents.
This function should only be used for creating skills in tests involving
migration of datastore skills that use an old schema version.
Note that it makes an explicit commit to the datastore instead of using
the usual functions for updating and creating skills. This is because
the latter approach would result in a skill with the *current* schema
version.
Args:
skill_id: str. ID for the skill to be created.
owner_id: str. The user_id of the creator of the skill.
description: str. The description of the skill.
next_misconception_id: int. The misconception id to be used by the
next misconception added.
misconceptions: list(Misconception.to_dict()). The list of
misconception dicts associated with the skill.
rubrics: list(Rubric.to_dict()). The list of rubric dicts associated
with the skill.
skill_contents: SkillContents.to_dict(). A SkillContents dict
containing the explanation and examples of the skill.
misconceptions_schema_version: int. The schema version for the
misconceptions object.
rubric_schema_version: int. The schema version for the rubric
object.
skill_contents_schema_version: int. The schema version for the
skill_contents object.
language_code: str. The ISO 639-1 code for the language this skill
is written in.
"""
skill_model = skill_models.SkillModel(
id=skill_id, description=description, language_code=language_code,
misconceptions=misconceptions, rubrics=rubrics,
skill_contents=skill_contents,
next_misconception_id=next_misconception_id,
misconceptions_schema_version=misconceptions_schema_version,
rubric_schema_version=rubric_schema_version,
skill_contents_schema_version=skill_contents_schema_version,
superseding_skill_id=None, all_questions_merged=False)
skill_model.commit(
owner_id, 'New skill created.',
[{'cmd': skill_domain.CMD_CREATE_NEW}])
def _create_valid_question_data(self, default_dest_state_name):
"""Creates a valid question_data dict.
Args:
default_dest_state_name: str. The default destination state.
Returns:
dict. The default question_data dict.
"""
state = state_domain.State.create_default_state(
default_dest_state_name, is_initial_state=True)
state.update_interaction_id('TextInput')
solution_dict = {
'answer_is_exclusive': False,
'correct_answer': 'Solution',
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>',
},
}
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')),
]
solution = state_domain.Solution.from_dict(
state.interaction.id, solution_dict)
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder',
'unicode_str': 'Enter text here',
},
},
'rows': {'value': 1},
})
state.update_next_content_id_index(2)
state.interaction.default_outcome.labelled_as_correct = True
state.interaction.default_outcome.dest = None
return state
class LinterTestBase(GenericTestBase):
"""Base class for linter tests."""
def setUp(self):
super(LinterTestBase, self).setUp()
self.linter_stdout = []
def mock_print(*args):
"""Mock for python_utils.PRINT. Append the values to print to
linter_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.linter_stdout.append(
' '.join(python_utils.UNICODE(arg) for arg in args))
self.print_swap = self.swap(python_utils, 'PRINT', mock_print)
def assert_same_list_elements(self, phrases, stdout):
"""Checks to see if all of the phrases appear in at least one of the
stdout outputs.
Args:
phrases: list(str). A list of phrases we are trying to find in one
of the stdout outputs. For example, python linting outputs a
success string that includes data we don't have easy access to,
like how long the test took, so we may want to search for a
substring of that success string in stdout.
stdout: list(str). A list of the output results from the method's
execution.
"""
self.assertTrue(
any(all(p in output for p in phrases) for output in stdout))
def assert_failed_messages_count(self, stdout, expected_failed_count):
"""Assert number of expected failed checks to actual number of failed
checks.
Args:
stdout: list(str). A list of linter output messages.
expected_failed_count: int. Expected number of failed messages.
"""
failed_count = sum(msg.startswith('FAILED') for msg in stdout)
self.assertEqual(failed_count, expected_failed_count)
class AuditJobsTestBase(GenericTestBase):
"""Base class for audit jobs tests."""
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output.
Args:
expected_output: list(*). The expected result of the job.
sort: bool. Whether to sort the outputs before comparison.
literal_eval: bool. Whether to use ast.literal_eval before
comparison.
"""
self.process_and_flush_pending_tasks()
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in (ast.literal_eval(value) for value in actual_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in (ast.literal_eval(value) for value in expected_output):
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertItemsEqual(actual_output_dict, expected_output_dict)
for key in actual_output_dict:
self.assertEqual(
actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
class EmailMessageMock(python_utils.OBJECT):
"""Mock for core.platform.models email services messages."""
def __init__(
self, sender_email, recipient_email, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Inits a mock email message with all the necessary data.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_email: str. The email address of the recipient. Must be
utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Emails
must be utf-8.
reply_to: str|None. Optional argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
"""
self.sender = sender_email
self.to = recipient_email
self.subject = subject
self.body = plaintext_body
self.html = html_body
self.bcc = bcc
self.reply_to = reply_to
self.recipient_variables = recipient_variables
class GenericEmailTestBase(GenericTestBase):
"""Base class for tests requiring email services."""
emails_dict = collections.defaultdict(list)
def run(self, result=None):
"""Adds a context swap on top of the test_utils.run() method so that
test classes extending GenericEmailTestBase will automatically have a
mailgun api key, mailgun domain name and mocked version of
send_email_to_recipients().
"""
with self.swap(
email_services, 'send_email_to_recipients',
self._send_email_to_recipients):
super(EmailTestBase, self).run(result=result)
def setUp(self):
super(GenericEmailTestBase, self).setUp()
self._wipe_emails_dict()
def _wipe_emails_dict(self):
"""Reset email dictionary for a new test."""
self.emails_dict = collections.defaultdict(list)
def _send_email_to_recipients(
self, sender_email, recipient_emails, subject, plaintext_body,
html_body, bcc=None, reply_to=None, recipient_variables=None):
"""Mocks sending an email to each email in recipient_emails.
Args:
sender_email: str. The email address of the sender. This should be
in the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Must be utf-8.
recipient_emails: list(str). The email addresses of the recipients.
Must be utf-8.
subject: str. The subject line of the email, Must be utf-8.
plaintext_body: str. The plaintext body of the email. Must be utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Must be utf-8.
bcc: list(str)|None. Optional argument. List of bcc emails. Must be
utf-8.
reply_to: str|None. Optional Argument. Reply address formatted like
“reply+<reply_id>@<incoming_email_domain_name> reply_id is the
unique id of the sender.
recipient_variables: dict|None. Optional Argument. If batch sending
requires differentiating each email based on the recipient, we
assign a unique id to each recipient, including info relevant to
that recipient so that we can reference it when composing the
email like so:
recipient_variables = {
'bob@example.com': {'first': 'Bob', 'id': 1},
'alice@example.com': {'first': 'Alice', 'id': 2},
}
subject = 'Hey, %recipient.first%'
For more information about this format, see:
https://documentation.mailgun.com/en/latest/user_manual.html#batch-sending
Returns:
bool. Whether the emails are sent successfully.
"""
bcc_emails = None
if bcc:
bcc_emails = bcc[0] if len(bcc) == 1 else bcc
new_email = EmailMessageMock(
sender_email, recipient_emails, subject, plaintext_body, html_body,
bcc=bcc_emails, reply_to=(reply_to if reply_to else None),
recipient_variables=(
recipient_variables if recipient_variables else None))
for recipient_email in recipient_emails:
self.emails_dict[recipient_email].append(new_email)
return True
def _get_sent_email_messages(self, to):
"""Gets messages to a single recipient email.
Args:
to: str. The recipient email address.
Returns:
list(EmailMessageMock). The list of email messages corresponding to
that recipient email.
"""
return self.emails_dict[to] if to in self.emails_dict else []
def _get_all_sent_email_messages(self):
"""Gets the entire messages dictionary.
Returns:
dict(str, list(EmailMessageMock)). The dict keyed by recipient
email. Each value contains a list of EmailMessageMock objects
corresponding to that recipient email; in other words, all
individual emails sent to that specific recipient email.
"""
return self.emails_dict
EmailTestBase = GenericEmailTestBase
class ClassifierTestBase(GenericEmailTestBase):
"""Base class for classifier test classes that need common functions
for related to reading classifier data and mocking the flow of the
storing the trained models through post request.
This class is derived from GenericEmailTestBase because the
TrainedClassifierHandlerTests test suite requires email services test
functions in addition to the classifier functions defined below.
"""
def post_blob(self, url, payload, expected_status_int=200):
"""Post a BLOB object to the server; return the received object.
Note that this method should only be used for
classifier.TrainedClassifierHandler handler and for no one else. The
reason being, we don't have any general mechanism for security for
transferring binary data. TrainedClassifierHandler implements a
specific mechanism which is restricted to the handler.
Args:
url: str. The URL to which BLOB object in payload should be sent
through a post request.
payload: bytes. Binary data which needs to be sent.
expected_status_int: int. The status expected as a response of post
request.
Returns:
dict. Parsed JSON response received upon invoking the post request.
"""
data = payload
expect_errors = False
if expected_status_int >= 400:
expect_errors = True
response = self._send_post_request(
self.testapp, url, data,
expect_errors, expected_status_int=expected_status_int,
headers={b'content-type': b'application/octet-stream'})
# Testapp takes in a status parameter which is the expected status of
# the response. However this expected status is verified only when
# expect_errors=False. For other situations we need to explicitly check
# the status.
# Reference URL:
# https://github.com/Pylons/webtest/blob/
# bf77326420b628c9ea5431432c7e171f88c5d874/webtest/app.py#L1119 .
self.assertEqual(response.status_int, expected_status_int)
return self._parse_json_response(response, expect_errors)
def _get_classifier_data_from_classifier_training_job(
self, classifier_training_job):
"""Retrieves classifier training job from GCS using metadata stored in
classifier_training_job.
Args:
classifier_training_job: ClassifierTrainingJob. Domain object
containing metadata of the training job which is used to
retrieve the trained model.
Returns:
FrozenModel. Protobuf object containing classifier data.
"""
filename = classifier_training_job.classifier_data_filename
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(
feconf.ENTITY_TYPE_EXPLORATION, classifier_training_job.exp_id))
classifier_data = utils.decompress_from_zlib(fs.get(filename))
classifier_data_proto = text_classifier_pb2.TextClassifierFrozenModel()
classifier_data_proto.ParseFromString(classifier_data)
return classifier_data_proto
class FunctionWrapper(python_utils.OBJECT):
"""A utility for making function wrappers. Create a subclass and override
any or both of the pre_call_hook and post_call_hook methods. See these
methods for more info.
"""
def __init__(self, func):
"""Creates a new FunctionWrapper instance.
Args:
func: a callable, or data descriptor. If it's a descriptor, then
__get__ should return a bound method. For example, func can be
a function, a method, a static or class method, but not a
@property.
"""
self._func = func
self._instance = None
def __call__(self, *args, **kwargs):
"""Overrides the call method for the function to call pre_call_hook
method which would be called before the function is executed and
post_call_hook which would be called after the function is executed.
"""
if self._instance is not None:
args = [self._instance] + list(args)
args_dict = inspect.getcallargs(self._func, *args, **kwargs)
self.pre_call_hook(args_dict)
result = self._func(*args, **kwargs)
self.post_call_hook(args_dict, result)
return result
def __get__(self, instance, owner):
# We have to implement __get__ because otherwise, we don't have a chance
# to bind to the instance self._func was bound to. See the following SO
# answer: https://stackoverflow.com/a/22555978/675311
self._instance = instance
return self
def pre_call_hook(self, args):
"""Override this to do tasks that should be executed before the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
"""
pass
def post_call_hook(self, args, result):
"""Override this to do tasks that should be executed after the actual
function call.
Args:
args: list(*). Set of arguments that the function accepts.
result: *. Result returned from the function.
"""
pass
class CallCounter(FunctionWrapper):
"""A function wrapper that keeps track of how often the function is called.
Note that the counter is incremented before each call, so it is also
increased when the function raises an exception.
"""
def __init__(self, f):
"""Counts the number of times the given function has been called. See
FunctionWrapper for arguments.
"""
super(CallCounter, self).__init__(f)
self._times_called = 0
@property
def times_called(self):
"""Property that returns the number of times the wrapped function has
been called.
Returns:
int. The number of times the wrapped function has been called.
"""
return self._times_called
def pre_call_hook(self, args):
"""Method that is called before each function call to increment the
counter tracking the number of times a function is called. This will
also be called even when the function raises an exception.
Args:
args: list(*). Set of arguments that the function accepts.
"""
self._times_called += 1
class FailingFunction(FunctionWrapper):
"""A function wrapper that makes a function fail, raising a given exception.
It can be set to succeed after a given number of calls.
"""
INFINITY = 'infinity'
def __init__(self, f, exception, num_tries_before_success):
"""Create a new Failing function.
Args:
f: func. See FunctionWrapper.
exception: Exception. The exception to be raised.
num_tries_before_success: int. The number of times to raise an
exception, before a call succeeds. If this is 0, all calls will
succeed, if it is FailingFunction. INFINITY, all calls will
fail.
"""
super(FailingFunction, self).__init__(f)
self._exception = exception
self._num_tries_before_success = num_tries_before_success
self._always_fail = (
self._num_tries_before_success == FailingFunction.INFINITY)
self._times_called = 0
if not (self._num_tries_before_success >= 0 or self._always_fail):
raise ValueError(
'num_tries_before_success should either be an '
'integer greater than or equal to 0, '
'or FailingFunction.INFINITY')
def pre_call_hook(self, args):
"""Method that is called each time before the actual function call to
check if the exception is to be raised based on the number of tries
before success.
Args:
args: list(*). Set of arguments this function accepts.
"""
self._times_called += 1
call_should_fail = (
self._num_tries_before_success >= self._times_called)
if call_should_fail or self._always_fail:
raise self._exception
|
"""
:author: Thomas Delaet <thomas@delaet.org>
"""
from velbus.modules.vmb4ry import VMB4RYModule
from velbus.modules.vmbin import VMB6INModule
from velbus.modules.vmbin import VMB7INModule
|
from unittest.mock import MagicMock
from django.urls import reverse
from hijack.contrib.admin import HijackUserAdminMixin
from hijack.tests.test_app.models import Post
class TestHijackUserAdminMixin:
def test_user_admin(self, admin_client):
url = reverse("admin:test_app_customuser_changelist")
response = admin_client.get(url)
assert response.status_code == 200
assert (
b'<button type="submit" class="button">HIJACK</button>' in response.content
)
def test_related_user(self, admin_client, admin_user):
url = reverse("admin:test_app_post_changelist")
Post.objects.create(author=admin_user)
response = admin_client.get(url)
assert response.status_code == 200
assert b"Hijack admin" in response.content
def test_get_hijack_success_url__obj_absolute_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/path/to/obj/"
def test_get_hijack_success_url__obj_no_absolute_url(self, rf):
obj = Post()
admin = HijackUserAdminMixin()
assert admin.get_hijack_success_url(None, obj) == "/accounts/profile/"
def test_get_hijack_success_url__hijack_success_url(self, rf):
obj = Post()
obj.get_absolute_url = MagicMock(return_value="/path/to/obj/")
admin = HijackUserAdminMixin()
admin.hijack_success_url = "/custom/success/path/"
assert admin.get_hijack_success_url(None, obj) == "/custom/success/path/"
|
"""
Django settings for modelpractice project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ligk%x$+)qey=q+&d_nca7%s-_@zn4%g=kg_4+p!ga7n)-4nb@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'modelpractice.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'modelpractice.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
import os
import numpy as np
import pandas as pd
import binascii
import warnings
import tempfile
from math import ceil
from multiprocessing import cpu_count, sharedctypes
from multiprocessing.pool import Pool
from sklearn.metrics import r2_score
from deepimpute.net import Net
from deepimpute.normalizer import Normalizer
from deepimpute.util import get_input_genes,get_target_genes
from deepimpute.util import score_model
def newCoreInitializer(arr_to_populate):
global sharedArray
sharedArray = arr_to_populate
def trainNet(in_out, NN_param_i, data_i, labels):
features, targets = in_out
net = Net(**NN_param_i)
net.fit(data_i, targetGenes=targets, predictorGenes=features, labels=labels)
# retrieve the array
params = list(NN_param_i.keys()) + ['targetGenes', 'NNid', 'predictorGenes']
args2return = [(attr, getattr(net, attr)) for attr in params]
return {k: v if k[0] != '_' else (k[1:], v) for k, v in args2return}
def predictNet(data_i, NN_param_i, labels):
net = Net(**NN_param_i)
data_i_ok = pd.DataFrame(np.reshape(data_i, list(map(len, labels))),
index=labels[0], columns=labels[1])
return net.predict(data_i_ok)
def trainOrPredict(args):
in_out, NN_param_i, labels, mode = args
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
data_i = np.ctypeslib.as_array(sharedArray)
if mode == "predict":
return predictNet(data_i, NN_param_i, labels)
return trainNet(in_out, NN_param_i, data_i, labels)
class MultiNet(object):
def __init__(self, n_cores=4, predictorLimit=10, preproc='log_or_exp', runDir=os.path.join(tempfile.gettempdir(),'run'), seed=0, **NN_params):
self._maxcores = n_cores
self.predictorLimit = predictorLimit
self.norm = Normalizer.fromName(preproc)
self.runDir = runDir
self.seed = seed
self.NN_params = NN_params
self.seed = seed
self.NN_params['seed'] = seed
if 'dims' not in self.NN_params.keys():
self.NN_params['dims'] = [20,500]
@property
def maxcores(self):
if self._maxcores == 'all':
return cpu_count()
else:
return self._maxcores
@maxcores.setter
def maxcores(self, value):
self._maxcores = value
def get_params(self, deep=False):
return self.__dict__
def setIDandRundir(self,data):
# set runID
runID = binascii.b2a_hex(os.urandom(5))
if type(runID) is bytes:
runID = runID.decode()
self.NN_params['runDir'] = os.path.join(self.runDir, str(runID))
def getCores(self,NN_genes):
n_runs = int(ceil(1.*len(NN_genes) / self.NN_params['dims'][1]))
n_cores = min(self.maxcores, n_runs)
self.NN_params['n_cores'] = max(1, int(self.maxcores / n_cores))
return n_runs,n_cores
def fit(self, data, NN_lim='auto', cell_subset=None):
np.random.seed(seed=self.seed)
df = pd.DataFrame(data)
self.setIDandRundir(df)
# Change the output dimension if the data has too few genes
if df.shape[1] < self.NN_params['dims'][1]:
self.NN_params['dims'][1] = df.shape[1]
# Choose genes to impute
genes_sort = df.quantile(.99).sort_values(ascending=False)
NN_genes = get_target_genes(genes_sort,NN_lim=NN_lim)
df_to_impute = df[NN_genes]
n_runs,n_cores = self.getCores(NN_genes)
# ------------------------# Subnetworks #------------------------#
predictors = np.intersect1d(genes_sort.index[genes_sort>self.predictorLimit], NN_genes)
print('Using {} genes as potential predictors'.format(len(predictors)))
n_choose = int(len(NN_genes)/self.NN_params['dims'][1])
subGenelists = np.random.choice(NN_genes,
[n_choose, self.NN_params['dims'][1]],
replace=False).tolist()
if n_choose < n_runs:
# Special case: for the last run, the output layer will have less nodes
selectedGenes = np.reshape(subGenelists, -1)
subGenelists.append(np.setdiff1d(NN_genes, selectedGenes).tolist())
# ------------------------# Extracting input genes #------------------------#
corrMatrix = 1 - np.abs(pd.DataFrame(np.corrcoef(df_to_impute.T),
index=NN_genes, columns=NN_genes)[predictors])
in_out_genes = get_input_genes(df_to_impute,self.NN_params['dims'],distanceMatrix=corrMatrix,
targets=subGenelists,predictorLimit=self.predictorLimit)
# ------------------------# Subsets for fitting #------------------------#
n_cells = df_to_impute.shape[0]
if type(cell_subset) is float or cell_subset == 1:
n_cells = int(cell_subset * n_cells)
elif type(cell_subset) is int:
n_cells = cell_subset
self.trainCells = df_to_impute.sample(n_cells,replace=False).index
print('Starting training with {} cells ({:.1%}) on {} threads ({} cores/thread).'.
format(n_cells, 1.*n_cells/df_to_impute.shape[0], n_cores, self.NN_params['n_cores']))
# -------------------# Preprocessing (if any) #--------------------#
df_to_impute = self.norm.fit(df_to_impute).transform(df_to_impute)
# -------------------# Share matrix between subprocesses #--------------------#
''' Create memory chunk and put the matrix in it '''
idx, cols = self.trainCells, df_to_impute.columns
trainData = df_to_impute.loc[self.trainCells, :].values
''' Parallelize process with shared array '''
childJobs = [(in_out, self.NN_params, (idx, cols), 'train')
for in_out in in_out_genes]
output_dicts = self.runOnMultipleCores(n_cores, trainData.flatten(), childJobs)
self.networks = []
for dictionnary in output_dicts:
self.networks.append(Net(**dictionnary))
return self
def runOnMultipleCores(self, cores, data, childJobs):
sharedArray = sharedctypes.RawArray('d', data)
pool = Pool(processes=cores, initializer=newCoreInitializer, initargs=(sharedArray,))
output_dicts = pool.map(trainOrPredict, childJobs)
pool.close()
pool.join()
return output_dicts
def predict(self, data, imputed_only=False, restore_pos_values=True):
df = pd.DataFrame(data)
''' Create memory chunk and put the matrix in it '''
idx, cols = df.index, df.columns
df_norm = self.norm.fit(df).transform(df).values.flatten()
''' Parallelize process with shared array '''
childJobs = [((12, 15), net.__dict__, (idx, cols), 'predict')
for net in self.networks]
output_dicts = self.runOnMultipleCores(self.maxcores, df_norm, childJobs)
Y_imputed = pd.concat(output_dicts, axis=1)
Y_not_imputed = df[[gene for gene in df.columns if gene not in Y_imputed.columns]]
Y_total = self.norm.transform(pd.concat([Y_imputed, Y_not_imputed], axis=1)[df.columns],
rev=True)
if restore_pos_values:
Y_total = Y_total.mask(df>0,df)
if imputed_only:
Y_total = Y_total[Y_imputed.columns]
if type(data) == type(pd.DataFrame()):
return Y_total
else:
return Y_total.values
def score(self, data, metric=r2_score):
imputedGenes = list(zip(*[ net.targetGenes for net in self.networks ]))
return score_model(self,pd.DataFrame(data),metric=r2_score, cols=imputedGenes)
|
# -*- coding: utf-8 -*-
""" Command line configuration parser """
import sys
import os.path
import argparse
import configparser
def parse():
""" Parse command line options """
parser = argparse.ArgumentParser(
description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB')
parser.add_argument(
'-c', '--config',
help='Read configuration from a configuration file')
parser.add_argument(
'--dry-run',
action='store_true',
help='Run without making any changes to your DynamoDB table')
parser.add_argument(
'--run-once',
action='store_true',
help='Run once and then exit Dynamic DynamoDB, instead of looping')
parser.add_argument(
'--show-config',
action='store_true',
help='Parse config files, print parsed data and then exit Dynamic DynamoDB')
parser.add_argument(
'--check-interval',
type=int,
help="""How many seconds should we wait between
the checks (default: 300)""")
parser.add_argument(
'--log-file',
help='Send output to the given log file')
parser.add_argument(
'--log-level',
choices=['debug', 'info', 'warning', 'error'],
help='Log level to use (default: info)')
parser.add_argument(
'--log-config-file',
help=(
'Use a custom Python logging configuration file. Overrides both '
'--log-level and --log-file.'
))
parser.add_argument(
'--version',
action='store_true',
help='Print current version number')
parser.add_argument(
'--aws-access-key-id',
help="Override Boto configuration with the following AWS access key")
parser.add_argument(
'--aws-secret-access-key',
help="Override Boto configuration with the following AWS secret key")
daemon_ag = parser.add_argument_group('Daemon options')
daemon_ag.add_argument(
'--daemon',
help=(
'Run Dynamic DynamoDB in daemon mode. Valid modes are '
'[start|stop|restart|foreground]'))
daemon_ag.add_argument(
'--instance',
default='default',
help=(
'Name of the Dynamic DynamoDB instance. '
'Used to run multiple instances of Dynamic DynamoDB. '
'Give each instance a unique name and control them separately '
'with the --daemon flag. (default: default)'))
daemon_ag.add_argument(
'--pid-file-dir',
default='/tmp',
help='Directory where pid file is located in. Defaults to /tmp')
dynamodb_ag = parser.add_argument_group('DynamoDB options')
dynamodb_ag.add_argument(
'-r', '--region',
help='AWS region to operate in (default: us-east-1')
dynamodb_ag.add_argument(
'-t', '--table-name',
help=(
'Table(s) to target. '
'The name is treated as a regular expression. '
'E.g. "^my_table.*$" or "my_table"'))
r_scaling_ag = parser.add_argument_group('Read units scaling properties')
r_scaling_ag.add_argument(
'--reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the currently consumed read units reaches this many
percent (default: 90)""")
r_scaling_ag.add_argument(
'--throttled-reads-upper-threshold',
type=int,
help="""Scale up the reads with --increase-reads-with if
the count of throttled read events exceeds this
count (default: 0)""")
r_scaling_ag.add_argument(
'--reads-lower-threshold',
type=int,
help="""Scale down the reads with --decrease-reads-with if the
currently consumed read units is as low as this
percentage (default: 30)""")
r_scaling_ag.add_argument(
'--increase-reads-with',
type=int,
help="""How much should we increase the read units with?
(default: 50, max: 100 if --increase-reads-unit = percent)""")
r_scaling_ag.add_argument(
'--decrease-reads-with',
type=int,
help="""How much should we decrease the read units with?
(default: 50)""")
r_scaling_ag.add_argument(
'--increase-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--decrease-reads-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
r_scaling_ag.add_argument(
'--min-provisioned-reads',
type=int,
help="""Minimum number of provisioned reads""")
r_scaling_ag.add_argument(
'--max-provisioned-reads',
type=int,
help="""Maximum number of provisioned reads""")
r_scaling_ag.add_argument(
'--num-read-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
r_scaling_ag.add_argument(
'--num-read-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_read_checks_before
scale_down var to reset back to 0""")
w_scaling_ag = parser.add_argument_group('Write units scaling properties')
w_scaling_ag.add_argument(
'--writes-upper-threshold',
type=int,
help="""Scale up the writes with --increase-writes-with
if the currently consumed write units reaches this
many percent (default: 90)""")
w_scaling_ag.add_argument(
'--throttled-writes-upper-threshold',
type=int,
help="""Scale up the reads with --increase-writes-with if
the count of throttled write events exceeds this
count (default: 0)""")
w_scaling_ag.add_argument(
'--writes-lower-threshold',
type=int,
help="""Scale down the writes with --decrease-writes-with
if the currently consumed write units is as low as this
percentage (default: 30)""")
w_scaling_ag.add_argument(
'--increase-writes-with',
type=int,
help="""How much should we increase the write units with?
(default: 50,
max: 100 if --increase-writes-unit = 'percent')""")
w_scaling_ag.add_argument(
'--decrease-writes-with',
type=int,
help="""How much should we decrease the write units with?
(default: 50)""")
w_scaling_ag.add_argument(
'--increase-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--decrease-writes-unit',
type=str,
help='Do you want to scale in percent or units? (default: percent)')
w_scaling_ag.add_argument(
'--min-provisioned-writes',
type=int,
help="""Minimum number of provisioned writes""")
w_scaling_ag.add_argument(
'--max-provisioned-writes',
type=int,
help="""Maximum number of provisioned writes""")
w_scaling_ag.add_argument(
'--num-write-checks-before-scale-down',
type=int,
help="""Number of consecutive checks that must meet criteria
before a scale down event occurs""")
w_scaling_ag.add_argument(
'--num-write-checks-reset-percent',
type=int,
help="""Percentage Value that will cause the num_write_checks_before
scale_down var to reset back to 0""")
args = parser.parse_args()
# Print the version and quit
if args.version:
# Read the dynamic-dynamodb.conf configuration file
internal_config_file = configparser.RawConfigParser()
internal_config_file.optionxform = lambda option: option
internal_config_file.read(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), '../dynamic-dynamodb.conf')))
print('Dynamic DynamoDB version: {0}'.format(
internal_config_file.get('general', 'version')))
sys.exit(0)
# Replace any new values in the configuration
configuration = {}
for arg in args.__dict__:
if args.__dict__.get(arg) is not None:
configuration[arg] = args.__dict__.get(arg)
return configuration
|
# -*- coding: utf-8 -*-
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
import json
from time import sleep, time
from pprint import pprint
from itertools import cycle
from .storage import nodes, api_total
#from .proxy import Proxy
class Http():
http = Session()
proxies = None
class RpcClient(Http):
RPS_DELAY = 0.10 # ~3 requests per second
last_request = 0.0
""" Simple Steem JSON-RPC API
This class serves as an abstraction layer for easy use of the Steem API.
rpc = RpcClient(nodes=nodes) or rpc = RpcClient()
Args:
nodes (list): A list of Steem HTTP RPC nodes to connect to.
any call available to that port can be issued using the instance
rpc.call('command', *parameters)
"""
headers = {'User-Agent': 'thallid', 'content-type': 'application/json'}
def __init__(self, report=False, **kwargs):
self.api_total = api_total
self.report = report
self.PROXY = kwargs.get("PROXY", False)
if self.PROXY: self.proxies = Proxy()
self.nodes = cycle(kwargs.get("nodes", nodes)) # Перебор нод
self.url = next(self.nodes)
self.num_retries = kwargs.get("num_retries", 3) # Количество попыток подключения к ноде
adapter = HTTPAdapter(max_retries=self.num_retries)
for node in nodes:
self.http.mount(node, adapter)
def get_response(self, payload):
data = json.dumps(payload, ensure_ascii=False).encode('utf8')
while True:
n = 1
proxies = self.proxies.get_http() if self.PROXY else None
while n < self.num_retries:
try:
# Ограничение по запросам в секунду
delay = self.RPS_DELAY - (time() - self.last_request)
if delay > 0: sleep(delay)
#response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, auth=auth)
response = self.http.post(self.url, data=data, headers=self.headers, proxies=proxies, timeout=30)
self.last_request = time()
if response.status_code == 503:
proxies = self.proxies.new_http() if self.PROXY else None # next proxy
print('new proxy', proxies)
else:
return response
#except ConnectionError as ce:
except:
#print('ce', ce)
sleeptime = (n - 1) * 2
if self.report:
print("Lost connection to node during rpcconnect(): %s (%d/%d) " % (self.url, n, self.num_retries))
print("Retrying in %d seconds" % sleeptime)
sleep(sleeptime)
n += 1
self.url = next(self.nodes) # next node
print("Trying to connect to node %s" % self.url, 'error in get_response rpc_client', proxies)
return False
def call(self, name, *params, **kwargs):
# Определяем для name своё api
api = self.api_total[name]
#method = kwargs.get('method', 'condenser_api.') #steem
method = kwargs.get('method', 'call')
parameters = kwargs.get('params', [api, name, params])
#payload = {"method": method + name, "params": parameters, "id": 1, "jsonrpc": '2.0'} #steem
payload = {"method": method, "params": parameters, "id": 1, "jsonrpc": '2.0'}
result = None
n = 1
while n < self.num_retries:
response = self.get_response(payload)
if response:
if response.status_code == 200:
try:
res = response.json()
if 'error' in res:
if self.report:
#pprint(res["error"]["message"])
print('ERROR IN RES', res["error"]["message"])
else:
result = res["result"]
break
except:
print('ERROR JSON', response)
#elif response.status_code == 503:
# proxies = self.proxies.new_http() if self.PROXY else None # next proxy
# print('new proxy', proxies)
else:
if self.report:
print(n, 'ERROR status_code', response.status_code, response.text)
else:
print('not connection to node', self.url)
print('response', response)
n += 1
self.url = next(self.nodes) # next node
sleep(n * 2)
print("Trying to connect to node %s" % self.url, 'for method', name)
return result
#----- main -----
if __name__ == '__main__':
pass
|
import json
import pathlib
import sys
import time
from typing import Sequence
import bentoml
from bentoml.adapters import (
DataframeInput,
FileInput,
ImageInput,
JsonInput,
MultiImageInput,
)
from bentoml.frameworks.sklearn import SklearnModelArtifact
from bentoml.handlers import DataframeHandler # deprecated
from bentoml.service.artifacts.pickle import PickleArtifact
from bentoml.types import InferenceResult, InferenceTask
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PickleArtifact("model"), SklearnModelArtifact('sk_model')])
class ExampleService(bentoml.BentoService):
"""
Example BentoService class made for testing purpose
"""
@bentoml.api(
input=DataframeInput(dtype={"col1": "int"}),
mb_max_latency=1000,
mb_max_batch_size=2000,
batch=True,
)
def predict_dataframe(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(DataframeHandler, dtype={"col1": "int"}, batch=True) # deprecated
def predict_dataframe_v1(self, df):
return self.artifacts.model.predict_dataframe(df)
@bentoml.api(
input=MultiImageInput(input_names=('original', 'compared')), batch=True
)
def predict_multi_images(self, originals, compareds):
return self.artifacts.model.predict_multi_images(originals, compareds)
@bentoml.api(input=ImageInput(), batch=True)
def predict_image(self, images):
return self.artifacts.model.predict_image(images)
@bentoml.api(
input=JsonInput(), mb_max_latency=1000, mb_max_batch_size=2000, batch=True,
)
def predict_with_sklearn(self, jsons):
return self.artifacts.sk_model.predict(jsons)
@bentoml.api(input=FileInput(), batch=True)
def predict_file(self, files):
return self.artifacts.model.predict_file(files)
@bentoml.api(input=JsonInput(), batch=True)
def predict_json(self, input_datas):
return self.artifacts.model.predict_json(input_datas)
@bentoml.api(input=JsonInput(), batch=True)
def predict_strict_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
return self.artifacts.model.predict_json(filtered_jsons)
@bentoml.api(input=JsonInput(), batch=True)
def predict_direct_json(self, input_datas, tasks: Sequence[InferenceTask] = None):
filtered_jsons = []
for j, t in zip(input_datas, tasks):
if t.http_headers.content_type != "application/json":
t.discard(http_status=400, err_msg="application/json only")
else:
filtered_jsons.append(j)
rets = self.artifacts.model.predict_json(filtered_jsons)
return [
InferenceResult(http_status=200, data=json.dumps(result)) for result in rets
]
@bentoml.api(input=JsonInput(), mb_max_latency=10000 * 1000, batch=True)
def echo_with_delay(self, input_datas):
data = input_datas[0]
time.sleep(data['b'] + data['a'] * len(input_datas))
return input_datas
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = ExampleService()
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
|
from PIL import Image
images = []
for i in range(9):
images.append(Image.open(f"../examples/lf/results/render_0{i}_{i}.0.png"))
images[0].save("../examples/lf/out.gif", save_all=True, append_images=images[1:], duration=100, loop=0)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import FieldTracker
from waldur_core.core import models as core_models
from waldur_core.structure import models as structure_models
class VirtualMachineMixin(models.Model):
class Meta:
abstract = True
guest_os = models.CharField(
max_length=50,
help_text=_(
'Defines the valid guest operating system '
'types used for configuring a virtual machine'
),
)
cores = models.PositiveSmallIntegerField(
default=0, help_text=_('Number of cores in a VM')
)
cores_per_socket = models.PositiveSmallIntegerField(
default=1, help_text=_('Number of cores per socket in a VM')
)
ram = models.PositiveIntegerField(
default=0, help_text=_('Memory size in MiB'), verbose_name=_('RAM')
)
disk = models.PositiveIntegerField(default=0, help_text=_('Disk size in MiB'))
class VirtualMachine(
VirtualMachineMixin, core_models.RuntimeStateMixin, structure_models.BaseResource
):
class RuntimeStates:
POWERED_OFF = 'POWERED_OFF'
POWERED_ON = 'POWERED_ON'
SUSPENDED = 'SUSPENDED'
CHOICES = (
(POWERED_OFF, 'Powered off'),
(POWERED_ON, 'Powered on'),
(SUSPENDED, 'Suspended'),
)
class GuestPowerStates:
RUNNING = 'RUNNING'
SHUTTING_DOWN = 'SHUTTING_DOWN'
RESETTING = 'RESETTING'
STANDBY = 'STANDBY'
NOT_RUNNING = 'NOT_RUNNING'
UNAVAILABLE = 'UNAVAILABLE'
CHOICES = (
(RUNNING, 'Running'),
(SHUTTING_DOWN, 'Shutting down'),
(RESETTING, 'Resetting'),
(STANDBY, 'Standby'),
(NOT_RUNNING, 'Not running'),
(UNAVAILABLE, 'Unavailable'),
)
class ToolsStates:
STARTING = 'STARTING'
RUNNING = 'RUNNING'
NOT_RUNNING = 'NOT_RUNNING'
CHOICES = (
(STARTING, 'Starting'),
(RUNNING, 'Running'),
(NOT_RUNNING, 'Not running'),
)
template = models.ForeignKey('Template', null=True, on_delete=models.SET_NULL)
cluster = models.ForeignKey('Cluster', null=True, on_delete=models.SET_NULL)
datastore = models.ForeignKey('Datastore', null=True, on_delete=models.SET_NULL)
folder = models.ForeignKey('Folder', null=True, on_delete=models.SET_NULL)
networks = models.ManyToManyField('Network', blank=True)
guest_power_enabled = models.BooleanField(
default=False,
help_text='Flag indicating if the virtual machine is ready to process soft power operations.',
)
guest_power_state = models.CharField(
'The power state of the guest operating system.',
max_length=150,
blank=True,
choices=GuestPowerStates.CHOICES,
)
tools_installed = models.BooleanField(default=False)
tools_state = models.CharField(
'Current running status of VMware Tools running in the guest operating system.',
max_length=50,
blank=True,
choices=ToolsStates.CHOICES,
)
tracker = FieldTracker()
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + (
'runtime_state',
'cores',
'cores_per_socket',
'ram',
'disk',
'tools_installed',
'tools_state',
)
@classmethod
def get_url_name(cls):
return 'vmware-virtual-machine'
@property
def total_disk(self):
return self.disks.aggregate(models.Sum('size'))['size__sum'] or 0
def __str__(self):
return self.name
class Port(core_models.RuntimeStateMixin, structure_models.BaseResource):
vm = models.ForeignKey(on_delete=models.CASCADE, to=VirtualMachine)
network = models.ForeignKey(on_delete=models.CASCADE, to='Network')
mac_address = models.CharField(
max_length=32, blank=True, verbose_name=_('MAC address')
)
@classmethod
def get_backend_fields(cls):
return super(Port, cls).get_backend_fields() + ('name', 'mac_address')
@classmethod
def get_url_name(cls):
return 'vmware-port'
def __str__(self):
return self.name
class Disk(structure_models.BaseResource):
size = models.PositiveIntegerField(help_text=_('Size in MiB'))
vm = models.ForeignKey(
on_delete=models.CASCADE, to=VirtualMachine, related_name='disks'
)
@classmethod
def get_url_name(cls):
return 'vmware-disk'
def __str__(self):
return self.name
@classmethod
def get_backend_fields(cls):
return super(Disk, cls).get_backend_fields() + ('name', 'size')
class Template(
VirtualMachineMixin, core_models.DescribableMixin, structure_models.ServiceProperty
):
created = models.DateTimeField()
modified = models.DateTimeField()
@classmethod
def get_url_name(cls):
return 'vmware-template'
def __str__(self):
return self.name
class Cluster(structure_models.ServiceProperty):
@classmethod
def get_url_name(cls):
return 'vmware-cluster'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerCluster(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
cluster = models.ForeignKey('Cluster', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.cluster)
class Meta:
unique_together = ('customer', 'cluster')
class Network(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
@classmethod
def get_url_name(cls):
return 'vmware-network'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerNetwork(models.Model):
# This model allows to specify allowed networks for VM provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class CustomerNetworkPair(models.Model):
# This model allows to specify allowed networks for existing VM NIC provision
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
network = models.ForeignKey('Network', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.network)
class Meta:
unique_together = ('customer', 'network')
class Datastore(structure_models.ServiceProperty):
type = models.CharField(max_length=255)
capacity = models.PositiveIntegerField(
help_text="Capacity, in MB.", null=True, blank=True
)
free_space = models.PositiveIntegerField(
help_text="Available space, in MB.", null=True, blank=True
)
@classmethod
def get_url_name(cls):
return 'vmware-datastore'
def __str__(self):
return '%s / %s' % (self.settings, self.name)
class CustomerDatastore(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
datastore = models.ForeignKey('Datastore', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.datastore)
class Meta:
unique_together = ('customer', 'datastore')
class Folder(structure_models.ServiceProperty):
def __str__(self):
return '%s / %s' % (self.settings, self.name)
@classmethod
def get_url_name(cls):
return 'vmware-folder'
class CustomerFolder(models.Model):
customer = models.ForeignKey(structure_models.Customer, on_delete=models.CASCADE)
folder = models.ForeignKey('Folder', on_delete=models.CASCADE)
def __str__(self):
return '%s / %s' % (self.customer, self.folder)
class Meta:
unique_together = ('customer', 'folder')
|
# -*- coding: utf-8 -*-
# Meta
__version__ = "0.0.4"
__author__ = 'Rhys Elsmore'
__email__ = 'me@rhys.io'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Rhys Elsmore'
# Module Namespace
from .core import MetricsLogger, GroupMetricsLogger
from .api import timer, increment, sample, measure, unique, group
|
## Copyright 2019 The Rules Protobuf Authors. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
load("//proto:repositories.bzl", "rules_proto_dependencies")
load("//proto:repositories.bzl", "rules_proto_toolchains")
_DEPRECATED_REPOSITORY_RULE_MESSAGE = " ".join([
"{old_rule}() is deprecated.",
"Please import @build_bazel_rules_proto//proto:repositories.bzl and use {new_rule}().",
"See https://github.com/Yannic/rules_proto/issues/6",
])
def proto_import_dependencies():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_import_dependencies",
new_rule = "rules_proto_dependencies",
))
rules_proto_dependencies()
def proto_register_toolchains():
print(_DEPRECATED_REPOSITORY_RULE_MESSAGE.format(
old_rule = "proto_register_toolchains",
new_rule = "rules_proto_toolchains",
))
rules_proto_toolchains()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# =============================================================================
from __future__ import division
import math
import unittest
import random
import numpy as np
from singa import tensor
from singa import singa_wrap as singa_api
from singa import autograd
from cuda_helper import gpu_dev, cpu_dev
class TestTensorMethods(unittest.TestCase):
def setUp(self):
self.shape = (2, 3)
self.t = tensor.Tensor(self.shape)
self.s = tensor.Tensor(self.shape)
self.t.set_value(0)
self.s.set_value(0)
def test_tensor_fields(self):
t = self.t
shape = self.shape
self.assertTupleEqual(t.shape, shape)
self.assertEqual(t.shape[0], shape[0])
self.assertEqual(t.shape[1], shape[1])
self.assertEqual(tensor.product(shape), 2 * 3)
self.assertEqual(t.ndim(), 2)
self.assertEqual(t.size(), 2 * 3)
self.assertEqual(t.memsize(), 2 * 3 * tensor.sizeof(tensor.float32))
self.assertFalse(t.is_transpose())
def test_unary_operators(self):
t = self.t
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 0.0)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
t -= 0.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23 - 0.23)
t *= 2.5
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5)
t /= 2
self.assertAlmostEqual(
tensor.to_numpy(t)[0, 0], (1.23 - 0.23) * 2.5 / 2)
def test_binary_operators(self):
t = self.t
t += 3.2
s = self.s
s += 2.1
a = t + s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 + 2.1, 5)
a = t - s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 - 2.1, 5)
a = t * s
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], 3.2 * 2.1, 5)
''' not implemented yet
a = t / s
self.assertAlmostEqual(tensor.to_numpy(a)[0,0], 3.2/2.1, 5)
'''
def test_comparison_operators(self):
t = self.t
t += 3.45
a = t < 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t <= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t > 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = t >= 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = t == 3.45
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.lt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.le(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.gt(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 0)
a = tensor.ge(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
a = tensor.eq(t, 3.45)
self.assertEqual(tensor.to_numpy(a)[0, 0], 1)
def test_tensor_copy(self):
t = tensor.Tensor((2, 3))
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tc = t.copy()
tdc = t.deepcopy()
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 1.23)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
t += 1.23
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tc)[0, 0], 2.46)
self.assertAlmostEqual(tensor.to_numpy(tdc)[0, 0], 1.23)
def test_copy_data(self):
t = self.t
t += 1.23
s = self.s
s += 5.43
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 1.23)
tensor.copy_data_to_from(t, s, 2)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 0], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 1], 5.43, 5)
self.assertAlmostEqual(tensor.to_numpy(t)[0, 2], 1.23)
def test_global_method(self):
t = self.t
t += 12.34
a = tensor.log(t)
self.assertAlmostEqual(tensor.to_numpy(a)[0, 0], math.log(12.34))
def test_random(self):
x = tensor.Tensor((1000,))
x.gaussian(1, 0.01)
self.assertAlmostEqual(tensor.average(x), 1, 3)
def test_radd(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 + x
self.assertEqual(tensor.average(y), 2.)
def test_rsub(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 1 - x
self.assertEqual(tensor.average(y), 0.)
def test_rmul(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 * x
self.assertEqual(tensor.average(y), 2.)
def test_rdiv(self):
x = tensor.Tensor((3,))
x.set_value(1)
y = 2 / x
self.assertEqual(tensor.average(y), 2.)
def matmul_high_dim_helper(self, dev):
configs = [
[(1, 12, 7, 64), (1, 12, 64, 7)],
[(1, 7, 768), (768, 768)],
]
print()
for config in configs:
X = np.random.random(config[0]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random(config[1]).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
y_t = np.matmul(X, W)
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), y_t, 3)
def test_matmul_high_dim_cpu(self):
self.matmul_high_dim_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_high_dim_gpu(self):
self.matmul_high_dim_helper(gpu_dev)
def test_tensor_inplace_api(self):
""" tensor inplace methods alter internal state and also return self
"""
x = tensor.Tensor((3,))
y = x.set_value(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.uniform(1, 2)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.bernoulli(1)
self.assertTrue(y is x)
x = tensor.Tensor((3,))
y = x.gaussian(1, 2)
self.assertTrue(y is x)
def test_numpy_convert(self):
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0)
a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
t = tensor.from_numpy(a)
b = tensor.to_numpy(t)
self.assertEqual(np.sum(a - b), 0.)
def test_transpose(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
A1 = np.transpose(a)
tA1 = tensor.transpose(ta)
TA1 = tensor.to_numpy(tA1)
A2 = np.transpose(a, [0, 2, 1])
tA2 = tensor.transpose(ta, [0, 2, 1])
TA2 = tensor.to_numpy(tA2)
np.testing.assert_array_almost_equal(TA1, A1)
np.testing.assert_array_almost_equal(TA2, A2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gpu_6d_transpose(self,dev=gpu_dev):
s0 = (2,3,4,5,6,7)
axes1=[5,4,3,2,1,0]
s1 = (2,7,6,5,4,3)
s2 = (2,4,3,5,7,6)
a = np.random.random(s1)
ta = tensor.from_numpy(a)
ta.to_device(dev)
ta = tensor.reshape(ta,s1)
ta = tensor.transpose(ta,axes1)
ta = tensor.reshape(ta,s2)
a = np.reshape(a,s1)
a = np.transpose(a,axes1)
a = np.reshape(a,s2)
np.testing.assert_array_almost_equal(tensor.to_numpy(ta), a)
def test_einsum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.einsum('kij,kij->kij', a, a)
tres1 = tensor.einsum('kij,kij->kij', ta, ta)
Tres1 = tensor.to_numpy(tres1)
res2 = np.einsum('kij,kih->kjh', a, a)
tres2 = tensor.einsum('kij,kih->kjh', ta, ta)
Tres2 = tensor.to_numpy(tres2)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
self.assertAlmostEqual(np.sum(Tres2 - res2), 0., places=3)
def test_repeat(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
ta_repeat1 = tensor.repeat(ta, 2, axis=None)
a_repeat1 = np.repeat(a, 2, axis=None)
Ta_repeat1 = tensor.to_numpy(ta_repeat1)
ta_repeat2 = tensor.repeat(ta, 4, axis=1)
a_repeat2 = np.repeat(a, 4, axis=1)
Ta_repeat2 = tensor.to_numpy(ta_repeat2)
self.assertAlmostEqual(np.sum(Ta_repeat1 - a_repeat1), 0., places=3)
self.assertAlmostEqual(np.sum(Ta_repeat2 - a_repeat2), 0., places=3)
def test_sum(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
a_sum0 = np.sum(a)
ta_sum0 = tensor.sum(ta)
Ta_sum0 = tensor.to_numpy(ta_sum0)
a_sum1 = np.sum(a, axis=1)
ta_sum1 = tensor.sum(ta, axis=1)
Ta_sum1 = tensor.to_numpy(ta_sum1)
a_sum2 = np.sum(a, axis=2)
ta_sum2 = tensor.sum(ta, axis=2)
Ta_sum2 = tensor.to_numpy(ta_sum2)
self.assertAlmostEqual(np.sum(a_sum0 - Ta_sum0), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum1 - Ta_sum1), 0., places=3)
self.assertAlmostEqual(np.sum(a_sum2 - Ta_sum2), 0., places=3)
def test_tensordot(self):
a = np.array(
[1.1, 1.1, 1.1, 1.1, 1.4, 1.3, 1.1, 1.6, 1.1, 1.1, 1.1, 1.2])
a = np.reshape(a, (2, 3, 2))
ta = tensor.from_numpy(a)
res1 = np.tensordot(a, a, axes=1)
tres1 = tensor.tensordot(ta, ta, axes=1)
Tres1 = tensor.to_numpy(tres1)
self.assertAlmostEqual(np.sum(Tres1 - res1), 0., places=3)
np.testing.assert_array_almost_equal(Tres1, res1)
res2 = np.tensordot(a, a, axes=([0, 1], [2, 1]))
tres2 = tensor.tensordot(ta, ta, axes=([0, 1], [2, 1]))
np.testing.assert_array_almost_equal(tensor.to_numpy(tres2), res2)
def test_reshape(self):
a = np.array([[[1.1, 1.1, 1.4], [1.1, 1.1, 1.1]],
[[1.1, 1.1, 1.3], [1.6, 1.1, 1.2]]])
ta = tensor.from_numpy(a)
tb = tensor.reshape(ta, [2, 6])
self.assertAlmostEqual(tb.shape[0], 2., places=3)
self.assertAlmostEqual(tb.shape[1], 6., places=3)
np.testing.assert_array_almost_equal(tensor.to_numpy(tb),
a.reshape((2, 6)))
def test_transpose_then_reshape(self):
a = np.array([[[1.1, 1.1], [1.1, 1.1], [1.4, 1.3]],
[[1.1, 1.6], [1.1, 1.1], [1.1, 1.2]]])
TRANSPOSE_AXES = (2, 0, 1)
RESHAPE_DIMS = (2, 6)
ta = tensor.from_numpy(a)
ta = ta.transpose(TRANSPOSE_AXES)
ta = ta.reshape(RESHAPE_DIMS)
np.testing.assert_array_almost_equal(
tensor.to_numpy(ta),
np.reshape(a.transpose(TRANSPOSE_AXES), RESHAPE_DIMS))
def _concatenate_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
np3 = np.concatenate((np1, np2), axis=3)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.Tensor(device=dev, data=np2)
t3 = tensor.concatenate((t1, t2), 3)
np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
def test_concatenate_cpu(self):
self._concatenate_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concatenate_gpu(self):
self._concatenate_helper(gpu_dev)
def _subscription_helper(self, dev):
np1 = np.random.random((5, 5, 5, 5)).astype(np.float32)
sg_tensor = tensor.Tensor(device=dev, data=np1)
sg_tensor_ret = sg_tensor[1:3, :, 1:, :-1]
np.testing.assert_array_almost_equal((tensor.to_numpy(sg_tensor_ret)),
np1[1:3, :, 1:, :-1])
def test_subscription_cpu(self):
self._subscription_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_subscription_gpu(self):
self._subscription_helper(gpu_dev)
def _ceil_helper(self, dev):
np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
np1 = np1 * 10
np2 = np.ceil(np1)
t1 = tensor.Tensor(device=dev, data=np1)
t2 = tensor.ceil(t1)
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np2)
def test_ceil_cpu(self):
self._ceil_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ceil_gpu(self):
self._ceil_helper(gpu_dev)
def _astype_helper(self, dev):
shape1 = [2, 3]
shape2 = [3, 2]
np_flt = np.random.random(shape1).astype(np.float32)
np_flt = np_flt * 10 - 5
np_int = np_flt.astype(np.int32)
np_flt2 = np_int.astype(np.float32)
t2 = tensor.Tensor(device=dev, data=np_flt)
t2 = t2.as_type('int')
np.testing.assert_array_almost_equal(tensor.to_numpy(t2), np_int)
t1 = t2.reshape(shape2)
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_int.reshape(shape2))
t1 = t1.as_type('float')
np.testing.assert_array_almost_equal(tensor.to_numpy(t1),
np_flt2.reshape(shape2))
def test_astype_cpu(self):
self._astype_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_astype_gpu(self):
self._astype_helper(gpu_dev)
def _3d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 3).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 3, 4).astype(np.float32)
np_x2 = np.random.randn(2, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_3d_matmul_cpu(self):
self._3d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_3d_matmul_gpu(self):
self._3d_matmul_helper(gpu_dev)
def _4d_matmul_helper(self, dev):
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 256).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
np_x1 = np.random.randn(2, 12, 256, 64).astype(np.float32)
np_x2 = np.random.randn(2, 12, 64, 1024).astype(np.float32)
x1 = tensor.from_numpy(np_x1)
x1.to_device(dev)
x2 = tensor.from_numpy(np_x2)
x2.to_device(dev)
y = autograd.matmul(x1, x2)
np_y = np.matmul(np_x1, np_x2)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), np_y)
def test_4d_matmul_cpu(self):
self._4d_matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_4d_matmul_gpu(self):
self._4d_matmul_helper(gpu_dev)
def _matmul_transpose_helper(self, dev):
X = np.random.random((1, 256, 12, 64)).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
W = np.random.random((1, 256, 12, 64)).astype(np.float32)
w = tensor.from_numpy(W)
w.to_device(dev)
X = np.transpose(X, (0, 2, 1, 3))
W = np.transpose(W, (0, 2, 1, 3))
W = np.transpose(W, (0, 1, 3, 2))
Y = np.matmul(X, W)
x = autograd.transpose(x, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 2, 1, 3))
w = autograd.transpose(w, (0, 1, 3, 2))
y = autograd.matmul(x, w)
np.testing.assert_array_almost_equal(tensor.to_numpy(x), X)
np.testing.assert_array_almost_equal(tensor.to_numpy(w), W)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), Y)
def test_matmul_transpose_cpu(self):
self._matmul_transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_transpose_gpu(self):
self._matmul_transpose_helper(gpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gaussian_gpu(self, dev=gpu_dev):
x = tensor.Tensor((3, 5, 3, 5), device=dev)
x.gaussian(0, 1)
x = tensor.Tensor((4, 5, 3, 2), device=dev)
x.gaussian(0, 1)
def _kfloat32_int(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.random((2, 3)).astype(np.float32) * 10
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = np.random.random((1,))[0] * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kfloat32_int_gpu(self):
self._kfloat32_int(gpu_dev)
def test_kfloat32_int_cpu(self):
self._kfloat32_int(cpu_dev)
def _kint_float(self, dev=gpu_dev):
np.random.seed(0)
x_val = np.random.randint(0, 10, (2, 3))
x = tensor.from_numpy(x_val)
x.to_device(dev)
scalar = random.random() * 100
y = x + scalar
self.assertEqual(y.dtype, tensor.float32)
np.testing.assert_array_almost_equal(tensor.to_numpy(y), x_val + scalar, 5)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_float_gpu(self):
self._kint_float(gpu_dev)
def test_kint_float_cpu(self):
self._kint_float(cpu_dev)
def _kint_kint(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
[[-11, 9, 4, -15, 14], [18, 11, -1, -10, 10],
[-4, 12, 2, 9, 3], [7, 0, 17, 1, 4]],
[[18, -13, -12, 9, -11], [19, -4, -7, 19, 14],
[18, 9, -8, 19, -2], [8, 9, -1, 6, 9]]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_cpu(self, dev=cpu_dev):
self._kint_kint(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_gpu(self, dev=gpu_dev):
self._kint_kint(gpu_dev)
def _kint_kint_bc(self, dev=gpu_dev):
a_np = np.array([[[17, 4, 9, 22, 18], [-9, 9, -1, -1, 4],
[1, 14, 7, 1, 4], [3, 14, -2, 3, -8]],
[[-25, 6, 8, -7, 22], [-14, 0, -1, 15, 14],
[1, 3, -8, -19, -3], [1, 12, 12, -3, -3]],
[[-10, -14, -17, 19, -5], [-4, -12, 7, -16, -2],
[-8, 3, -5, -11, 0], [4, 0, 3, -6, -3]]],
dtype=np.int32)
b_np = np.array([[-6, -3, -8, -17, 1], [-4, -16, 4, -9, 0],
[7, 1, 11, -12, 4], [-6, -8, -5, -3, 0]],
dtype=np.int32)
ta = tensor.from_numpy(a_np)
tb = tensor.from_numpy(b_np)
ta.to_device(dev)
tb.to_device(dev)
y = ta - tb
np.testing.assert_array_almost_equal(tensor.to_numpy(y), a_np - b_np)
def test_kint_kint_bc_cpu(self, dev=cpu_dev):
self._kint_kint_bc(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_kint_kint_bc_gpu(self, dev=gpu_dev):
self._kint_kint_bc(gpu_dev)
if __name__ == '__main__':
unittest.main()
|
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
""" Prepares table for tests """
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
"""
This tests getting all articles successfully
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
"""
Unauthorized error returned if no token is passed in
"""
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
"""
Tests the pk of the article is true
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
"""
This tests if the returned articles are paginated
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# this checks the number of articles in the database
self.assertIn(b"1", response.content)
# next is null since there is only one article posted
self.assertIn(b"null", response.content)
# previous is null since only one article has been posted
# the page_size holds ten articles per page
self.assertIn(b"null", response.content) # previous
def test_get_specific_article(self):
"""
This gets a specific article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
"""
This checks if the right content of an article is returned
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# checks if the body passed during posting is the one returned
self.assertIn(b"This is the real life body.", response.content)
# checks if id returned is 1
self.assertIn(b"1", response.content)
def test_wrong_request(self):
"""
Checks request for a non existing article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "test_user@email.com",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
|
from abc import ABCMeta, abstractmethod
from frozendict import frozendict
class ResourceManager(metaclass=ABCMeta):
def __init__(self):
self.wv_filename = ""
self.parsed_filename = ""
@abstractmethod
def write(self):
"""
parse the raw file/files and write the data to disk
:return:
"""
pass
@abstractmethod
def read(self):
"""
read the parsed file from disk
:return:
"""
pass
def read_hashable(self):
return frozendict(self.read())
|
import torch.utils.data as data
import os
import os.path
from numpy.random import randint
from ops.io import load_proposal_file
from transforms import *
from ops.utils import temporal_iou
class SSNInstance:
def __init__(
self,
start_frame,
end_frame,
video_frame_count,
fps=1,
label=None,
best_iou=None,
overlap_self=None,
):
self.start_frame = start_frame
self.end_frame = min(end_frame, video_frame_count)
self._label = label
self.fps = fps
self.coverage = (end_frame - start_frame) / video_frame_count
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
def compute_regression_targets(self, gt_list, fg_thresh):
if self.best_iou < fg_thresh:
# background proposals do not need this
return
# find the groundtruth instance with the highest IOU
ious = [
temporal_iou(
(self.start_frame, self.end_frame), (gt.start_frame, gt.end_frame)
)
for gt in gt_list
]
best_gt_id = np.argmax(ious)
best_gt = gt_list[best_gt_id]
prop_center = (self.start_frame + self.end_frame) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame) / 2
prop_size = self.end_frame - self.start_frame + 1
gt_size = best_gt.end_frame - best_gt.start_frame + 1
# get regression target:
# (1). center shift propotional to the proposal duration
# (2). logarithm of the groundtruth duration over proposal duraiton
self.loc_reg = (gt_center - prop_center) / prop_size
try:
self.size_reg = math.log(gt_size / prop_size)
except:
print((gt_size, prop_size, self.start_frame, self.end_frame))
raise
@property
def start_time(self):
return self.start_frame / self.fps
@property
def end_time(self):
return self.end_frame / self.fps
@property
def label(self):
return self._label if self._label is not None else -1
@property
def regression_targets(self):
return [self.loc_reg, self.size_reg] if self.loc_reg is not None else [0, 0]
class SSNVideoRecord:
def __init__(self, prop_record):
self._data = prop_record
frame_count = int(self._data[1])
# build instance record
self.gt = [
SSNInstance(
int(x[1]), int(x[2]), frame_count, label=int(x[0]), best_iou=1.0
)
for x in self._data[2]
if int(x[2]) > int(x[1])
]
self.gt = list([x for x in self.gt if x.start_frame < frame_count])
self.proposals = [
SSNInstance(
int(x[3]),
int(x[4]),
frame_count,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]),
)
for x in self._data[3]
if int(x[4]) > int(x[3])
]
self.proposals = list(
[x for x in self.proposals if x.start_frame < frame_count]
)
@property
def id(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
def get_fg(self, fg_thresh, with_gt=True):
fg = [p for p in self.proposals if p.best_iou > fg_thresh]
if with_gt:
fg.extend(self.gt)
for x in fg:
x.compute_regression_targets(self.gt, fg_thresh)
return fg
def get_negatives(
self,
incomplete_iou_thresh,
bg_iou_thresh,
bg_coverage_thresh=0.01,
incomplete_overlap_thresh=0.7,
):
tag = [0] * len(self.proposals)
incomplete_props = []
background_props = []
for i in range(len(tag)):
if (
self.proposals[i].best_iou < incomplete_iou_thresh
and self.proposals[i].overlap_self > incomplete_overlap_thresh
):
tag[i] = 1 # incomplete
incomplete_props.append(self.proposals[i])
for i in range(len(tag)):
if (
tag[i] == 0
and self.proposals[i].best_iou < bg_iou_thresh
and self.proposals[i].coverage > bg_coverage_thresh
):
background_props.append(self.proposals[i])
return incomplete_props, background_props
class SSNDataSet(data.Dataset):
def __init__(
self,
root_path,
prop_file=None,
body_seg=5,
aug_seg=2,
video_centric=True,
new_length=1,
modality="RGB",
image_tmpl="img_{:05d}.jpg",
transform=None,
random_shift=True,
test_mode=False,
prop_per_video=8,
fg_ratio=1,
bg_ratio=1,
incomplete_ratio=6,
fg_iou_thresh=0.7,
bg_iou_thresh=0.01,
incomplete_iou_thresh=0.3,
bg_coverage_thresh=0.02,
incomplete_overlap_thresh=0.7,
gt_as_fg=True,
reg_stats=None,
test_interval=6,
verbose=True,
exclude_empty=True,
epoch_multiplier=1,
):
self.root_path = root_path
self.prop_file = prop_file
self.verbose = verbose
self.body_seg = body_seg
self.aug_seg = aug_seg
self.video_centric = video_centric
self.exclude_empty = exclude_empty
self.epoch_multiplier = epoch_multiplier
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.test_interval = test_interval
self.fg_iou_thresh = fg_iou_thresh
self.incomplete_iou_thresh = incomplete_iou_thresh
self.bg_iou_thresh = bg_iou_thresh
self.bg_coverage_thresh = bg_coverage_thresh
self.incomplete_overlap_thresh = incomplete_overlap_thresh
self.starting_ratio = 0.5
self.ending_ratio = 0.5
self.gt_as_fg = gt_as_fg
denum = fg_ratio + bg_ratio + incomplete_ratio
self.fg_per_video = int(prop_per_video * (fg_ratio / denum))
self.bg_per_video = int(prop_per_video * (bg_ratio / denum))
self.incomplete_per_video = (
prop_per_video - self.fg_per_video - self.bg_per_video
)
self._parse_prop_file(stats=reg_stats)
def _load_image(self, directory, idx):
if self.modality == "RGB" or self.modality == "RGBDiff":
return [
Image.open(
os.path.join(directory, self.image_tmpl.format(idx))
).convert("RGB")
]
elif self.modality == "Flow":
x_img = Image.open(
os.path.join(directory, self.image_tmpl.format("x", idx))
).convert("L")
y_img = Image.open(
os.path.join(directory, self.image_tmpl.format("y", idx))
).convert("L")
return [x_img, y_img]
def _parse_prop_file(self, stats=None):
prop_info = load_proposal_file(self.prop_file)
self.video_list = [SSNVideoRecord(p) for p in prop_info]
if self.exclude_empty:
self.video_list = list([x for x in self.video_list if len(x.gt) > 0])
self.video_dict = {v.id: v for v in self.video_list}
# construct three pools:
# 1. Foreground
# 2. Background
# 3. Incomplete
self.fg_pool = []
self.bg_pool = []
self.incomp_pool = []
for v in self.video_list:
self.fg_pool.extend(
[(v.id, prop) for prop in v.get_fg(self.fg_iou_thresh, self.gt_as_fg)]
)
incomp, bg = v.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
self.incomp_pool.extend([(v.id, prop) for prop in incomp])
self.bg_pool.extend([(v.id, prop) for prop in bg])
if stats is None:
self._compute_regresssion_stats()
else:
self.stats = stats
if self.verbose:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
There are {pnum} usable proposals from {vnum} videos.
{fnum} foreground proposals
{inum} incomplete_proposals
{bnum} background_proposals
Sampling config:
FG/BG/INC: {fr}/{br}/{ir}
Video Centric: {vc}
Epoch size multiplier: {em}
Regression Stats:
Location: mean {stats[0][0]:.05f} std {stats[1][0]:.05f}
Duration: mean {stats[0][1]:.05f} std {stats[1][1]:.05f}
""".format(
prop_file=self.prop_file,
pnum=len(self.fg_pool)
+ len(self.bg_pool)
+ len(self.incomp_pool),
fnum=len(self.fg_pool),
inum=len(self.incomp_pool),
bnum=len(self.bg_pool),
fr=self.fg_per_video,
br=self.bg_per_video,
ir=self.incomplete_per_video,
vnum=len(self.video_dict),
vc=self.video_centric,
stats=self.stats,
em=self.epoch_multiplier,
)
)
)
else:
print(
(
"""
SSNDataset: Proposal file {prop_file} parsed.
""".format(
prop_file=self.prop_file
)
)
)
def _video_centric_sampling(self, video):
fg = video.get_fg(self.fg_iou_thresh, self.gt_as_fg)
incomp, bg = video.get_negatives(
self.incomplete_iou_thresh,
self.bg_iou_thresh,
self.bg_coverage_thresh,
self.incomplete_overlap_thresh,
)
def sample_video_proposals(
proposal_type, video_id, video_pool, requested_num, dataset_pool
):
if len(video_pool) == 0:
# if there is nothing in the video pool, go fetch from the dataset pool
return [
(dataset_pool[x], proposal_type)
for x in np.random.choice(
len(dataset_pool), requested_num, replace=False
)
]
else:
replicate = len(video_pool) < requested_num
idx = np.random.choice(
len(video_pool), requested_num, replace=replicate
)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_props = []
out_props.extend(
sample_video_proposals(0, video.id, fg, self.fg_per_video, self.fg_pool)
) # sample foreground
out_props.extend(
sample_video_proposals(
1, video.id, incomp, self.incomplete_per_video, self.incomp_pool
)
) # sample incomp.
out_props.extend(
sample_video_proposals(2, video.id, bg, self.bg_per_video, self.bg_pool)
) # sample background
return out_props
def _random_sampling(self):
out_props = []
out_props.extend(
[
(x, 0)
for x in np.random.choice(
self.fg_pool, self.fg_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 1)
for x in np.random.choice(
self.incomp_pool, self.incomplete_per_video, replace=False
)
]
)
out_props.extend(
[
(x, 2)
for x in np.random.choice(
self.bg_pool, self.bg_per_video, replace=False
)
]
)
return out_props
def _sample_indices(self, valid_length, num_seg):
"""
:param record: VideoRecord
:return: list
"""
average_duration = (valid_length + 1) // num_seg
if average_duration > 0:
# normal cases
offsets = np.multiply(list(range(num_seg)), average_duration) + randint(
average_duration, size=num_seg
)
elif valid_length > num_seg:
offsets = np.sort(randint(valid_length, size=num_seg))
else:
offsets = np.zeros((num_seg,))
return offsets
def _get_val_indices(self, valid_length, num_seg):
if valid_length > num_seg:
tick = valid_length / float(num_seg)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_seg)])
else:
offsets = np.zeros((num_seg,))
return offsets
def _sample_ssn_indices(self, prop, frame_cnt):
start_frame = prop.start_frame + 1
end_frame = prop.end_frame
duration = end_frame - start_frame + 1
assert duration != 0, (prop.start_frame, prop.end_frame, prop.best_iou)
valid_length = duration - self.new_length
valid_starting = max(1, start_frame - int(duration * self.starting_ratio))
valid_ending = min(
frame_cnt - self.new_length + 1,
end_frame + int(duration * self.ending_ratio),
)
valid_starting_length = start_frame - valid_starting - self.new_length + 1
valid_ending_length = valid_ending - end_frame - self.new_length + 1
starting_scale = (valid_starting_length + self.new_length - 1) / (
duration * self.starting_ratio
)
ending_scale = (valid_ending_length + self.new_length - 1) / (
duration * self.ending_ratio
)
# get starting
starting_offsets = (
self._sample_indices(valid_starting_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_starting_length, self.aug_seg)
) + valid_starting
course_offsets = (
self._sample_indices(valid_length, self.body_seg)
if self.random_shift
else self._get_val_indices(valid_length, self.body_seg)
) + start_frame
ending_offsets = (
self._sample_indices(valid_ending_length, self.aug_seg)
if self.random_shift
else self._get_val_indices(valid_ending_length, self.aug_seg)
) + end_frame
offsets = np.concatenate((starting_offsets, course_offsets, ending_offsets))
stage_split = [
self.aug_seg,
self.aug_seg + self.body_seg,
self.aug_seg * 2 + self.body_seg,
]
return offsets, starting_scale, ending_scale, stage_split
def _load_prop_data(self, prop):
# read frame count
frame_cnt = self.video_dict[prop[0][0]].num_frames
# sample segment indices
prop_indices, starting_scale, ending_scale, stage_split = self._sample_ssn_indices(
prop[0][1], frame_cnt
)
# turn prop into standard format
# get label
if prop[1] == 0:
label = prop[0][1].label
elif prop[1] == 1:
label = prop[0][1].label # incomplete
elif prop[1] == 2:
label = 0 # background
else:
raise ValueError()
frames = []
for idx, seg_ind in enumerate(prop_indices):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(prop[0][0], min(frame_cnt, p + x)))
# get regression target
if prop[1] == 0:
reg_targets = prop[0][1].regression_targets
reg_targets = (
(reg_targets[0] - self.stats[0][0]) / self.stats[1][0],
(reg_targets[1] - self.stats[0][1]) / self.stats[1][1],
)
else:
reg_targets = (0.0, 0.0)
return (
frames,
label,
reg_targets,
starting_scale,
ending_scale,
stage_split,
prop[1],
)
def _compute_regresssion_stats(self):
if self.verbose:
print("computing regression target normalizing constants")
targets = []
for video in self.video_list:
fg = video.get_fg(self.fg_iou_thresh, False)
for p in fg:
targets.append(list(p.regression_targets))
self.stats = np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def get_test_data(self, video, test_interval, gen_batchsize=4):
props = video.proposals
video_id = video.id
frame_cnt = video.num_frames
frame_ticks = (
np.arange(0, frame_cnt - self.new_length, test_interval, dtype=np.int) + 1
)
num_sampled_frames = len(frame_ticks)
# avoid empty proposal list
if len(props) == 0:
props.append(SSNInstance(0, frame_cnt - 1, frame_cnt))
# process proposals to subsampled sequences
rel_prop_list = []
proposal_tick_list = []
scaling_list = []
for proposal in props:
rel_prop = proposal.start_frame / frame_cnt, proposal.end_frame / frame_cnt
rel_duration = rel_prop[1] - rel_prop[0]
rel_starting_duration = rel_duration * self.starting_ratio
rel_ending_duration = rel_duration * self.ending_ratio
rel_starting = rel_prop[0] - rel_starting_duration
rel_ending = rel_prop[1] + rel_ending_duration
real_rel_starting = max(0.0, rel_starting)
real_rel_ending = min(1.0, rel_ending)
starting_scaling = (rel_prop[0] - real_rel_starting) / rel_starting_duration
ending_scaling = (real_rel_ending - rel_prop[1]) / rel_ending_duration
proposal_ticks = (
int(real_rel_starting * num_sampled_frames),
int(rel_prop[0] * num_sampled_frames),
int(rel_prop[1] * num_sampled_frames),
int(real_rel_ending * num_sampled_frames),
)
rel_prop_list.append(rel_prop)
proposal_tick_list.append(proposal_ticks)
scaling_list.append((starting_scaling, ending_scaling))
# load frames
# Since there are many frames for each video during testing, instead of returning the read frames,
# we return a generator which gives the frames in small batches, this lower the memory burden
# and runtime overhead. Usually setting batchsize=4 would fit most cases.
def frame_gen(batchsize):
frames = []
cnt = 0
for idx, seg_ind in enumerate(frame_ticks):
p = int(seg_ind)
for x in range(self.new_length):
frames.extend(self._load_image(video_id, min(frame_cnt, p + x)))
cnt += 1
if cnt % batchsize == 0:
frames = self.transform(frames)
yield frames
frames = []
if len(frames):
frames = self.transform(frames)
yield frames
return (
frame_gen(gen_batchsize),
len(frame_ticks),
torch.from_numpy(np.array(rel_prop_list)),
torch.from_numpy(np.array(proposal_tick_list)),
torch.from_numpy(np.array(scaling_list)),
)
def get_training_data(self, index):
if self.video_centric:
video = self.video_list[index]
props = self._video_centric_sampling(video)
else:
props = self._random_sampling()
out_frames = []
out_prop_len = []
out_prop_scaling = []
out_prop_type = []
out_prop_labels = []
out_prop_reg_targets = []
out_stage_split = []
for idx, p in enumerate(props):
prop_frames, prop_label, reg_targets, starting_scale, ending_scale, stage_split, prop_type = self._load_prop_data(
p
)
processed_frames = self.transform(prop_frames)
out_frames.append(processed_frames)
out_prop_len.append(self.body_seg + 2 * self.aug_seg)
out_prop_scaling.append([starting_scale, ending_scale])
out_prop_labels.append(prop_label)
out_prop_reg_targets.append(reg_targets)
out_prop_type.append(prop_type)
out_stage_split.append(stage_split)
out_prop_len = torch.from_numpy(np.array(out_prop_len))
out_prop_scaling = torch.from_numpy(
np.array(out_prop_scaling, dtype=np.float32)
)
out_prop_labels = torch.from_numpy(np.array(out_prop_labels))
out_prop_reg_targets = torch.from_numpy(
np.array(out_prop_reg_targets, dtype=np.float32)
)
out_prop_type = torch.from_numpy(np.array(out_prop_type))
out_stage_split = torch.from_numpy(np.array(out_stage_split))
out_frames = torch.cat(out_frames)
return (
out_frames,
out_prop_len,
out_prop_scaling,
out_prop_type,
out_prop_labels,
out_prop_reg_targets,
out_stage_split,
)
def get_all_gt(self):
gt_list = []
for video in self.video_list:
vid = video.id
gt_list.extend(
[
[
vid,
x.label - 1,
x.start_frame / video.num_frames,
x.end_frame / video.num_frames,
]
for x in video.gt
]
)
return gt_list
def __getitem__(self, index):
real_index = index % len(self.video_list)
if self.test_mode:
return self.get_test_data(self.video_list[real_index], self.test_interval)
else:
return self.get_training_data(real_index)
def __len__(self):
return len(self.video_list) * self.epoch_multiplier
|
from .routes import app as websockets_routes
|
import json
from server import db
from sqlalchemy.ext import mutable
class JsonEncodedDict(db.TypeDecorator):
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
user_location_table = db.Table('user_location_table',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('location_id',db.Integer, db.ForeignKey('location.id'), nullable=False),
)
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from cgatReport.Tracker import *
from cpgReport import *
##########################################################################
class replicatedIntervalSummary(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalLengths(cpgTracker):
"""Distribution of interval length. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT length FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalPeakValues(cpgTracker):
"""Distribution of maximum interval coverage (the number of reads at peak). """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT peakval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalAverageValues(cpgTracker):
"""Distribution of average coverage (the average number of reads within the interval) """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT avgval FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
class replicatedIntervalFoldChange(cpgTracker):
"""return fold changes for all intervals. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll(
"SELECT fold FROM %(track)s_replicated_intervals" % locals())
return data
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalPeakLocation(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT (PeakCenter - start) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT (end - PeakCenter) / CAST( Length as FLOAT) - 0.5 FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
class replicatedIntervalPeakDistance(cpgTracker):
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT PeakCenter - start FROM %(track)s_replicated_intervals" % locals())
data2 = self.getValues(
"SELECT end - PeakCenter FROM %(track)s_replicated_intervals" % locals())
return {"distance": data1 + data2}
##########################################################################
##########################################################################
##########################################################################
class replicatedIntervalCpGDensity(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pCpG FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp1(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp1 FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalCpGObsExp2(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT CpG_ObsExp FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
##########################################################################
class replicatedIntervalGCContent(cpgTracker):
pattern = "(.*)_replicated_composition"
def __call__(self, track, slice=None):
data1 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition" % locals())
data2 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_control" % locals())
data3 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking5" % locals())
data4 = self.getValues(
"SELECT pGC FROM %(track)s_replicated_composition_flanking3" % locals())
return odict(list(zip(("CAPseq composition", "Control composition", "5` Flank Composition", "3` Flank Composition"), (data1, data2, data3, data4))))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-04 00:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='professions',
),
migrations.AddField(
model_name='module',
name='profession',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='library.Profession'),
preserve_default=False,
),
]
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
ROOT_URLCONF = 'django_autoconfig.autourlconf'
INSTALLED_APPS = [
'django.contrib.auth',
'nuit',
]
STATIC_URL = '/static/'
STATIC_ROOT = '.static'
from django_autoconfig.autoconfig import configure_settings
configure_settings(globals())
|
"""
Django settings for my_blog project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@7+q1q@_=iniipvuc%nfs)5qauaax2g0cnc1fxzos52t-9ml=m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sarah.1024z@gmail.com'
EMAIL_HOST_PASSWORD = 'rzan2015'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
"""
Logic for uploading to s3 based on supplied template file and s3 bucket
"""
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import logging
import os
import boto3
import click
import docker
from botocore.config import Config
from samcli.commands.package.exceptions import PackageFailedError
from samcli.lib.package.artifact_exporter import Template
from samcli.lib.package.ecr_uploader import ECRUploader
from samcli.lib.package.code_signer import CodeSigner
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.botoconfig import get_boto_config_with_user_agent
from samcli.yamlhelper import yaml_dump
LOG = logging.getLogger(__name__)
class PackageContext:
MSG_PACKAGED_TEMPLATE_WRITTEN = (
"\nSuccessfully packaged artifacts and wrote output template "
"to file {output_file_name}."
"\n"
"Execute the following command to deploy the packaged template"
"\n"
"sam deploy --template-file {output_file_path} "
"--stack-name <YOUR STACK NAME>"
"\n"
)
def __init__(
self,
template_file,
s3_bucket,
image_repository,
s3_prefix,
kms_key_id,
output_template_file,
use_json,
force_upload,
no_progressbar,
metadata,
region,
profile,
on_deploy=False,
signing_profiles=None,
):
self.template_file = template_file
self.s3_bucket = s3_bucket
self.image_repository = image_repository
self.s3_prefix = s3_prefix
self.kms_key_id = kms_key_id
self.output_template_file = output_template_file
self.use_json = use_json
self.force_upload = force_upload
self.no_progressbar = no_progressbar
self.metadata = metadata
self.region = region
self.profile = profile
self.on_deploy = on_deploy
self.s3_uploader = None
self.code_signer = None
self.signing_profiles = signing_profiles
self.ecr_uploader = None
self.uploader = {}
def __enter__(self):
return self
def __exit__(self, *args):
pass
def run(self):
region_name = self.region if self.region else None
s3_client = boto3.client(
"s3",
config=get_boto_config_with_user_agent(signature_version="s3v4", region_name=region_name),
)
ecr_client = boto3.client("ecr", config=get_boto_config_with_user_agent(region_name=region_name))
docker_client = docker.from_env()
self.s3_uploader = S3Uploader(
s3_client, self.s3_bucket, self.s3_prefix, self.kms_key_id, self.force_upload, self.no_progressbar
)
# attach the given metadata to the artifacts to be uploaded
self.s3_uploader.artifact_metadata = self.metadata
self.ecr_uploader = ECRUploader(docker_client, ecr_client, self.image_repository)
code_signer_client = boto3.client("signer")
self.code_signer = CodeSigner(code_signer_client, self.signing_profiles)
# NOTE(srirammv): move this to its own class.
self.uploader = {"s3": self.s3_uploader, "ecr": self.ecr_uploader}
try:
exported_str = self._export(self.template_file, self.use_json)
self.write_output(self.output_template_file, exported_str)
if self.output_template_file and not self.on_deploy:
msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
output_file_name=self.output_template_file,
output_file_path=os.path.abspath(self.output_template_file),
)
click.echo(msg)
except OSError as ex:
raise PackageFailedError(template_file=self.template_file, ex=str(ex)) from ex
def _export(self, template_path, use_json):
template = Template(template_path, os.getcwd(), self.uploader, self.code_signer)
exported_template = template.export()
if use_json:
exported_str = json.dumps(exported_template, indent=4, ensure_ascii=False)
else:
exported_str = yaml_dump(exported_template)
return exported_str
def write_output(self, output_file_name, data):
if output_file_name is None:
click.echo(data)
return
with open(output_file_name, "w") as fp:
fp.write(data)
|
from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
url_name = 'r{}f{}'.format(n_resblocks, n_feats)
if url_name in url:
self.url = url[url_name]
else:
self.url = None
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
# cd ..(src), export PYTHONPATH=`pwd`
# if __name__ == '__main__':
# import torch
# import utility
# from option import args
# torch.manual_seed(args.seed)
# checkpoint = utility.checkpoint(args)
# print(args)
# model = VDSR(args)
# print(model)
|
import PIL.Image
import PIL.ImageColor
import PIL.ImageEnhance
import zeit.cms.repository.folder
import zeit.connector.interfaces
import zeit.content.image.interfaces
import zope.app.appsetup.product
import zope.component
import zope.interface
import zope.security.proxy
class ImageTransform(object):
zope.interface.implements(zeit.content.image.interfaces.ITransform)
zope.component.adapts(zeit.content.image.interfaces.IImage)
MAXIMUM_IMAGE_SIZE = 5000
def __init__(self, context):
self.context = context
try:
self.image = PIL.Image.open(
zope.security.proxy.removeSecurityProxy(context.open()))
self.image.load()
except IOError:
raise zeit.content.image.interfaces.ImageProcessingError(
"Cannot transform image %s" % context.__name__)
def thumbnail(self, width, height, filter=PIL.Image.ANTIALIAS):
image = self.image.copy()
image.thumbnail((width, height), filter)
return self._construct_image(image)
def resize(self, width=None, height=None, filter=PIL.Image.ANTIALIAS):
if width is None and height is None:
raise TypeError('Need at least one of width and height.')
orig_width, orig_height = self.image.size
if width is None:
width = orig_width * height / orig_height
elif height is None:
height = orig_height * width / orig_width
image = self.image.resize((width, height), filter)
return self._construct_image(image)
def create_variant_image(
self, variant, size=None, fill_color=None, format=None):
"""Create variant image from source image.
Will crop the image according to the zoom, focus point and size. In
addition, the image is scaled down to size (if given) and image
enhancements, like brightness, are applied.
The default variant skips cropping, but still applies image
enhancements, so it can be used as a high quality preview of image
enhancements in the frontend.
"""
if not variant.is_default:
image = self._crop_variant_image(variant, size=size)
else:
# Alpha channel is usually activated when cropping,
# so we must do it by hand since we skipped cropping
image = self._enable_alpha_channel(self.image)
# Apply enhancements like brightness
if variant.brightness is not None:
image = PIL.ImageEnhance.Brightness(image).enhance(
variant.brightness)
if variant.contrast is not None:
image = PIL.ImageEnhance.Contrast(image).enhance(
variant.contrast)
if variant.saturation is not None:
image = PIL.ImageEnhance.Color(image).enhance(
variant.saturation)
if variant.sharpness is not None:
image = PIL.ImageEnhance.Sharpness(image).enhance(
variant.sharpness)
# Optionally fill the background of transparent images
if fill_color is not None and self._color_mode == 'RGBA':
fill_color = PIL.ImageColor.getrgb('#' + fill_color)
opaque = PIL.Image.new('RGB', image.size, fill_color)
opaque.paste(image, (0, 0), image)
image = opaque
return self._construct_image(image, format)
def _crop_variant_image(self, variant, size=None):
"""Crop variant image from source image.
Determines crop position using zoom, focus point and size constraint.
The result image will have the exact dimensions that are predefined by
the size argument, if provided. Otherwise it depends on the variant
ratio and zoom only, giving back the best image quality, i.e. will not
scale down.
"""
source_width, source_height = self.image.size
if (source_width == 0 or source_height == 0):
return self.image
zoomed_width = source_width
zoomed_height = source_height
if variant.zoom > 0:
zoomed_width = int(source_width * variant.zoom)
zoomed_height = int(source_height * variant.zoom)
target_ratio = variant.ratio
if target_ratio is None:
target_ratio = float(source_width) / float(source_height)
target_width, target_height = self._fit_ratio_to_image(
zoomed_width, zoomed_height, target_ratio)
if size:
w, h = size
override_ratio = float(w) / float(h)
target_width, target_height = self._fit_ratio_to_image(
target_width, target_height, override_ratio)
x, y = self._determine_crop_position(
variant, target_width, target_height)
image = self._crop(
self.image, x, y, x + target_width, y + target_height)
if size:
w, h = size
if w > self.MAXIMUM_IMAGE_SIZE:
w = self.MAXIMUM_IMAGE_SIZE
if h > self.MAXIMUM_IMAGE_SIZE:
h = self.MAXIMUM_IMAGE_SIZE
image = image.resize((w, h), PIL.Image.ANTIALIAS)
return image
def _fit_ratio_to_image(self, source_width, source_height, target_ratio):
"""Calculate the biggest (width, height) inside the source that adheres
to target ratio"""
original_ratio = float(source_width) / float(source_height)
if target_ratio > original_ratio:
width = source_width
height = int(source_width / target_ratio)
else:
width = int(source_height * target_ratio)
height = source_height
return width, height
def _determine_crop_position(self, variant, target_width, target_height):
width, height = self.image.size
x = int(width * variant.focus_x - target_width * variant.focus_x)
y = int(height * variant.focus_y - target_height * variant.focus_y)
return x, y
def _crop(self, pil_image, x1, y1, x2, y2):
pil_image = pil_image.crop((x1, y1, x2, y2))
pil_image = self._enable_alpha_channel(pil_image)
return pil_image
@property
def _color_mode(self):
# XXX This is a rather crude heuristic.
return 'RGBA' if self.context.format == 'PNG' else 'RGB'
def _enable_alpha_channel(self, pil_image):
"""Enable alpha channel for PNG images by converting to RGBA."""
if pil_image.mode != self._color_mode:
pil_image = pil_image.convert(self._color_mode)
return pil_image
def _construct_image(self, pil_image, format=None):
image = zeit.content.image.image.TemporaryImage()
if not format:
format = self.context.format
image.mimeType = self.context.mimeType
else:
image.mimeType = 'image/' + format.lower() # XXX crude heuristic.
# XXX Maybe encoder setting should be made configurable.
if format in ('JPG', 'JPEG'):
options = {'progressive': True, 'quality': 85, 'optimize': True}
elif format == 'PNG':
options = {'optimize': True}
elif format == 'WEBP':
options = {'quality': 85}
else:
options = {}
pil_image.save(image.open('w'), format, **options)
image.__parent__ = self.context
image_times = zope.dublincore.interfaces.IDCTimes(self.context, None)
if image_times and image_times.modified:
thumb_times = zope.dublincore.interfaces.IDCTimes(image)
thumb_times.modified = image_times.modified
return image
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IPersistentThumbnail)
def persistent_thumbnail_factory(context):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image') or {}
method_name = config.get('thumbnail-method', 'thumbnail')
width = config.get('thumbnail-width', 50)
if width:
width = int(width)
else:
width = None
height = config.get('thumbnail-height', 50)
if height:
height = int(height)
else:
height = None
thumbnail_container = zeit.content.image.interfaces.IThumbnailFolder(
context)
image_name = context.__name__
if image_name not in thumbnail_container:
transform = zeit.content.image.interfaces.ITransform(context)
method = getattr(transform, method_name)
thumbnail = method(width, height)
thumbnail_properties = (
zeit.connector.interfaces.IWebDAVWriteProperties(thumbnail))
image_properties = zeit.connector.interfaces.IWebDAVReadProperties(
context)
for (name, namespace), value in image_properties.items():
if namespace != 'DAV:':
thumbnail_properties[(name, namespace)] = value
thumbnail_properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)
thumbnail_container[image_name] = thumbnail
return thumbnail_container[image_name]
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IThumbnailFolder)
def thumbnail_folder_factory(context):
name = u'thumbnails'
folder = context.__parent__
if name not in folder:
folder[name] = zeit.cms.repository.folder.Folder()
return folder[name]
|
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012-2013 Heiko Strathmann
#
from numpy import *
from pylab import *
from scipy import *
from modshogun import RealFeatures
from modshogun import MeanShiftDataGenerator
from modshogun import GaussianKernel, CombinedKernel
from modshogun import QuadraticTimeMMD, MMDKernelSelectionMax
from modshogun import PERMUTATION, MMD2_SPECTRUM, MMD2_GAMMA, BIASED, UNBIASED
from modshogun import EuclideanDistance
from modshogun import Statistics, Math
# for nice plotting that fits into our shogun tutorial
import latex_plot_inits
def quadratic_time_mmd_graphical():
# parameters, change to get different results
m=100
dim=2
# setting the difference of the first dimension smaller makes a harder test
difference=0.5
# number of samples taken from null and alternative distribution
num_null_samples=500
# streaming data generator for mean shift distributions
gen_p=MeanShiftDataGenerator(0, dim)
gen_q=MeanShiftDataGenerator(difference, dim)
# Stream examples and merge them in order to compute MMD on joint sample
# alternative is to call a different constructor of QuadraticTimeMMD
features=gen_p.get_streamed_features(m)
features=features.create_merged_copy(gen_q.get_streamed_features(m))
# use the median kernel selection
# create combined kernel with Gaussian kernels inside (shoguns Gaussian kernel is
# compute median data distance in order to use for Gaussian kernel width
# 0.5*median_distance normally (factor two in Gaussian kernel)
# However, shoguns kernel width is different to usual parametrization
# Therefore 0.5*2*median_distance^2
# Use a subset of data for that, only 200 elements. Median is stable
sigmas=[2**x for x in range(-3,10)]
widths=[x*x*2 for x in sigmas]
print "kernel widths:", widths
combined=CombinedKernel()
for i in range(len(sigmas)):
combined.append_kernel(GaussianKernel(10, widths[i]))
# create MMD instance, use biased statistic
mmd=QuadraticTimeMMD(combined,features, m)
mmd.set_statistic_type(BIASED)
# kernel selection instance (this can easily replaced by the other methods for selecting
# single kernels
selection=MMDKernelSelectionMax(mmd)
# perform kernel selection
kernel=selection.select_kernel()
kernel=GaussianKernel.obtain_from_generic(kernel)
mmd.set_kernel(kernel);
print "selected kernel width:", kernel.get_width()
# sample alternative distribution (new data each trial)
alt_samples=zeros(num_null_samples)
for i in range(len(alt_samples)):
# Stream examples and merge them in order to replace in MMD
features=gen_p.get_streamed_features(m)
features=features.create_merged_copy(gen_q.get_streamed_features(m))
mmd.set_p_and_q(features)
alt_samples[i]=mmd.compute_statistic()
# sample from null distribution
# bootstrapping, biased statistic
mmd.set_null_approximation_method(PERMUTATION)
mmd.set_statistic_type(BIASED)
mmd.set_num_null_samples(num_null_samples)
null_samples_boot=mmd.sample_null()
# sample from null distribution
# spectrum, biased statistic
if "sample_null_spectrum" in dir(QuadraticTimeMMD):
mmd.set_null_approximation_method(MMD2_SPECTRUM)
mmd.set_statistic_type(BIASED)
null_samples_spectrum=mmd.sample_null_spectrum(num_null_samples, m-10)
# fit gamma distribution, biased statistic
mmd.set_null_approximation_method(MMD2_GAMMA)
mmd.set_statistic_type(BIASED)
gamma_params=mmd.fit_null_gamma()
# sample gamma with parameters
null_samples_gamma=array([gamma(gamma_params[0], gamma_params[1]) for _ in range(num_null_samples)])
# to plot data, sample a few examples from stream first
features=gen_p.get_streamed_features(m)
features=features.create_merged_copy(gen_q.get_streamed_features(m))
data=features.get_feature_matrix()
# plot
figure()
title('Quadratic Time MMD')
# plot data of p and q
subplot(2,3,1)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 4) ) # reduce number of x-ticks
plot(data[0][0:m], data[1][0:m], 'ro', label='$x$')
plot(data[0][m+1:2*m], data[1][m+1:2*m], 'bo', label='$x$', alpha=0.5)
title('Data, shift in $x_1$='+str(difference)+'\nm='+str(m))
xlabel('$x_1, y_1$')
ylabel('$x_2, y_2$')
# histogram of first data dimension and pdf
subplot(2,3,2)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3 )) # reduce number of x-ticks
hist(data[0], bins=50, alpha=0.5, facecolor='r', normed=True)
hist(data[1], bins=50, alpha=0.5, facecolor='b', normed=True)
xs=linspace(min(data[0])-1,max(data[0])+1, 50)
plot(xs,normpdf( xs, 0, 1), 'r', linewidth=3)
plot(xs,normpdf( xs, difference, 1), 'b', linewidth=3)
xlabel('$x_1, y_1$')
ylabel('$p(x_1), p(y_1)$')
title('Data PDF in $x_1, y_1$')
# compute threshold for test level
alpha=0.05
null_samples_boot.sort()
null_samples_spectrum.sort()
null_samples_gamma.sort()
thresh_boot=null_samples_boot[floor(len(null_samples_boot)*(1-alpha))];
thresh_spectrum=null_samples_spectrum[floor(len(null_samples_spectrum)*(1-alpha))];
thresh_gamma=null_samples_gamma[floor(len(null_samples_gamma)*(1-alpha))];
type_one_error_boot=sum(null_samples_boot<thresh_boot)/float(num_null_samples)
type_one_error_spectrum=sum(null_samples_spectrum<thresh_boot)/float(num_null_samples)
type_one_error_gamma=sum(null_samples_gamma<thresh_boot)/float(num_null_samples)
# plot alternative distribution with threshold
subplot(2,3,4)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(alt_samples, 20, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
type_two_error=sum(alt_samples<thresh_boot)/float(num_null_samples)
title('Alternative Dist.\n' + 'Type II error is ' + str(type_two_error))
# compute range for all null distribution histograms
hist_range=[min([min(null_samples_boot), min(null_samples_spectrum), min(null_samples_gamma)]), max([max(null_samples_boot), max(null_samples_spectrum), max(null_samples_gamma)])]
# plot null distribution with threshold
subplot(2,3,3)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3 )) # reduce number of x-ticks
hist(null_samples_boot, 20, range=hist_range, normed=True);
axvline(thresh_boot, 0, 1, linewidth=2, color='red')
title('Sampled Null Dist.\n' + 'Type I error is ' + str(type_one_error_boot))
grid(True)
# plot null distribution spectrum
subplot(2,3,5)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(null_samples_spectrum, 20, range=hist_range, normed=True);
axvline(thresh_spectrum, 0, 1, linewidth=2, color='red')
title('Null Dist. Spectrum\nType I error is ' + str(type_one_error_spectrum))
# plot null distribution gamma
subplot(2,3,6)
grid(True)
gca().xaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
gca().yaxis.set_major_locator( MaxNLocator(nbins = 3) ) # reduce number of x-ticks
hist(null_samples_gamma, 20, range=hist_range, normed=True);
axvline(thresh_gamma, 0, 1, linewidth=2, color='red')
title('Null Dist. Gamma\nType I error is ' + str(type_one_error_gamma))
# pull plots a bit apart
subplots_adjust(hspace=0.5)
subplots_adjust(wspace=0.5)
if __name__=='__main__':
quadratic_time_mmd_graphical()
show()
|
#
# Tests for current input functions
#
import pybamm
import numbers
import unittest
import numpy as np
class TestCurrentFunctions(unittest.TestCase):
def test_constant_current(self):
# test simplify
current = pybamm.electrical_parameters.current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": 2,
}
)
processed_current = parameter_values.process_symbol(current)
self.assertIsInstance(processed_current.simplify(), pybamm.Scalar)
def test_get_current_data(self):
# test process parameters
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"Current function [A]": "[current data]car_current",
}
)
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def current(t):
return dimensional_current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([current], always_array=True)
standard_tests.test_all()
def test_user_current(self):
# create user-defined sin function
def my_fun(t, A, omega):
return A * pybamm.sin(2 * np.pi * omega * t)
# choose amplitude and frequency
A = pybamm.electrical_parameters.I_typ
omega = pybamm.Parameter("omega")
def current(t):
return my_fun(t, A, omega)
# set and process parameters
parameter_values = pybamm.ParameterValues(
{
"Typical current [A]": 2,
"Typical timescale [s]": 1,
"omega": 3,
"Current function [A]": current,
}
)
dimensional_current = pybamm.electrical_parameters.dimensional_current_with_time
dimensional_current_eval = parameter_values.process_symbol(dimensional_current)
def user_current(t):
return dimensional_current_eval.evaluate(t=t)
# check output types
standard_tests = StandardCurrentFunctionTests([user_current])
standard_tests.test_all()
# check output correct value
time = np.linspace(0, 3600, 600)
np.testing.assert_array_almost_equal(
user_current(time), 2 * np.sin(2 * np.pi * 3 * time)
)
class StandardCurrentFunctionTests(object):
def __init__(self, function_list, always_array=False):
self.function_list = function_list
self.always_array = always_array
def test_output_type(self):
for function in self.function_list:
if self.always_array is True:
assert isinstance(function(0), np.ndarray)
else:
assert isinstance(function(0), numbers.Number)
assert isinstance(function(np.zeros(3)), np.ndarray)
assert isinstance(function(np.zeros([3, 3])), np.ndarray)
def test_all(self):
self.test_output_type()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class SubDomainExist(object):
def __init__(self, domain=None, isExist=None):
"""
:param domain: (Optional) 子域名
:param isExist: (Optional) 子域名的存在状态,1:存在,2:不存在,3:zone不存在
"""
self.domain = domain
self.isExist = isExist
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
"""
Public IP address resource.
"""
def __init__(__self__, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, resource_guid=None, tags=None, type=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
IPConfiguration
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the public IP resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
"""
Public IP address resource.
:param str expand: Expands referenced resources.
:param str public_ip_address_name: The name of the subnet.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20161201:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
|
from moler.cmd.unix.ps import Ps
from moler.observable_connection import ObservableConnection, get_connection
from moler.io.raw.terminal import ThreadedTerminal
# v.1 - combine all manually
# moler_conn = ObservableConnection()
# terminal = ThreadedTerminal(moler_connection=moler_conn)
# v.2 - let factory combine
terminal = get_connection(io_type='terminal', variant='threaded')
# v.3 - let factory select default variant
# terminal = get_connection(io_type='terminal')
with terminal.open():
ps_cmd = Ps(connection=terminal.moler_connection, options="-ef")
processes = ps_cmd()
for proc in processes:
if 'python' in proc['CMD']:
print("PID: {} CMD: {}".format(proc['PID'], proc['CMD']))
# result:
"""
PID: 1817 CMD: /usr/bin/python /usr/share/system-config-printer/applet.py
PID: 21825 CMD: /usr/bin/python /home/gl/moler/examples/command/unix_ps.py
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xde71c936
# Compiled with Coconut version 2.0.0-a_dev33 [How Not to Be Seen]
# Coconut Header: -------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get(str("__coconut__"))
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir: # type: ignore
del _coconut_sys.modules[str("__coconut__")]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == str("__coconut__"):
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == str("__coconut__"):
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_tail_call, _coconut_tco, _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _namedtuple_of, _coconut, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op, _coconut_multi_dim_arr
_coconut_sys.path.pop(0)
# Compiled Coconut: -----------------------------------------------------------
from argparse import ArgumentParser
from collections import namedtuple
if _coconut_sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
import hace
parser = ArgumentParser()
parser.add_argument("--host", type=str, default="localhost", help="Host address")
parser.add_argument("-p", "--port", type=int, default="6006", help="Server Port")
parser.add_argument("-e", "--env", type=str, default="op2", help="ACE Environment ID, see GACE doc for what's available")
parser.add_argument("-n", "--num", type=int, default=1, help="Number of Pooled Envs")
parser.add_argument("--pdk", type=str, default="xh035-3V3", help="ACE backend, see GACE doc for what's available")
@_coconut_tco
def isiterable(obj):
return _coconut_tail_call(isinstance, obj, Iterable)
def make_env(env_id, #type: str
backend, #type: str
num=1 #type: int
):
env = (hace.make_env(env_id, backend) if num == 1 else hace.make_same_env_pool(num, env_id, backend))
return env
def simulate_pool(envs, sizings #type: dict[int, dict[str, float]]
):
sizing = dict(((int(i)), (s)) for i, s in sizings.items())
perf = hace.evaluate_circuit_pool(envs, sizing)
return perf
def simulate_single(env, sizing #type: dict[str, float]
):
perf = hace.evaluate_circuit(env, sizing)
return perf
def simulate(env, sizing):
perf = (simulate_pool(env, sizing) if isiterable(env) else simulate_single(env, sizing))
return perf
def performance(env):
perf = ((hace.current_performance_pool if isiterable(env) else hace.current_performance))(env)
return perf
def sizing(env):
size = ((hace.current_sizing_pool if isiterable(env) else hace.current_sizing))(env)
return size
def performance_parameters(env):
pps = {"params": ((hace.performance_identifiers_pool if isiterable(env) else hace.performance_identifiers))(env)}
return pps
def sizing_parameters(env):
sps = {"params": ((hace.sizing_identifiers_pool if isiterable(env) else hace.sizing_identifiers))(env)}
return sps
def initial_sizing(env):
init = ((hace.initial_sizing_pool if isiterable(env) else hace.initial_sizing))(env)
return init
def random_sizing(env):
rng = ((hace.random_sizing_pool if isiterable(env) else hace.random_sizing))(env)
return rng
|
import sys
import os
import copy
import json
import datetime
opt = dict()
opt['dataset'] = '../data/citeseer'
opt['hidden_dim'] = 16
opt['input_dropout'] = 0.5
opt['dropout'] = 0
opt['optimizer'] = 'adam'
opt['lr'] = 0.01
opt['decay'] = 5e-4
opt['self_link_weight'] = 1.0
opt['pre_epoch'] = 2000
opt['epoch'] = 100
opt['iter'] = 1
opt['use_gold'] = 1
opt['draw'] = 'smp'
opt['tau'] = 0.0
opt['save'] = 'exp_citeseer'
opt['mixup_alpha'] =1.0
opt['partition_num'] = 0
opt['task_ratio'] = 0
### ict hyperparameters ###
opt['ema_decay'] = 0.999
opt['consistency_type'] = "mse"
opt['consistency_rampup_starts'] = 500
opt['consistency_rampup_ends'] = 1000
opt['mixup_consistency'] = 10.0
def generate_command(opt):
cmd = 'python3 train.py'
for opt, val in opt.items():
cmd += ' --' + opt + ' ' + str(val)
return cmd
def run(opt):
opt_ = copy.deepcopy(opt)
os.system(generate_command(opt_))
os.system('rm record.txt')
os.system('echo -n -> record.txt')
os.system('rm record_val.txt')
os.system('echo -n -> record_val.txt')
partition_num_list = [8,9,10,11,12,13,14,15,16]
task_ratio_list = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for p in partition_num_list:
for t in task_ratio_list:
os.system('rm record.txt')
os.system('echo -n -> record.txt')
opt['partition_num'] = p
opt['task_ratio'] = t
for k in range(10):
seed = k + 1
opt['seed'] = seed
run(opt)
os.system('python result_cal.py')
with open('record_val.txt', 'a') as f:
f.write(str(p) + ' ' + str(t) + '\n')
|
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import os
import numpy as np
from Modules.IO.sampling import getSamplesSubepoch
from Modules.General.Utils import dump_model_to_gzip_file
from Modules.General.Utils import getImagesSet
from Modules.General.Utils import load_model_from_gzip_file
from Modules.Parsers.parsersUtils import parserConfigIni
from startTesting import segmentVolume
def startTraining(networkModelName,configIniName):
print (" ************************************************ STARTING TRAINING **************************************************")
print (" ********************** Starting training model (Reading parameters) **********************")
myParserConfigIni = parserConfigIni()
myParserConfigIni.readConfigIniFile(configIniName,1)
# Image type (0: Nifti, 1: Matlab)
imageType = myParserConfigIni.imageTypesTrain
print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
print ("-------- Reading Images names used in training/validation -------------")
##-----##
# from sklearn.model_selection import KFold
# import numpy as np
# y1 = myParserConfigIni.indexesForTraining
# #x1 = myParserConfigIni.indexesForValidation
# kf = KFold(n_splits= 5)
#
# for train_index, test_index in kf.split(y1):
# print("TRAIN:", train_index, "TEST:", test_index)
# y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
# from sklearn.model_selection import LeavePOut
# lpo = LeavePOut(p=5)
# y1 = myParserConfigIni.indexesForTraining
# for train, test in lpo.split(y1):
# y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
from sklearn.cross_validation import LeaveOneOut
loo = LeaveOneOut(4)
y1 = myParserConfigIni.indexesForTraining
x1 = myParserConfigIni.indexesForValidation
for train_index, test_index in loo:
print("TRAIN:", train_index, "TEST:", test_index)
y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
# from sklearn.model_selection import train_test_split
# X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)
# -- Get list of images used for training -- #
(imageNames_Train, names_Train) = getImagesSet(myParserConfigIni.imagesFolder,y) # Images
(groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
(roiNames_Train, roi_names_Train) = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI
# -- Get list of images used for validation -- #
(imageNames_Val, names_Val) = getImagesSet(myParserConfigIni.imagesFolder,x) # Images
(groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
(roiNames_Val, roi_names_Val) = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI
# Print names
print (" ================== Images for training ================")
for i in range(0,len(names_Train)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Train[i], gt_names_Train[i] ))
print (" ================== Images for validation ================")
for i in range(0,len(names_Val)):
if len(roi_names_Train) > 0:
print(" Image({}): {} | GT: {} | ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
else:
print(" Image({}): {} | GT: {} ".format(i,names_Val[i], gt_names_Val[i]))
print (" ===============================================================")
# --------------- Load my LiviaNet3D object ---------------
print (" ... Loading model from {}".format(networkModelName))
myLiviaNet3D = load_model_from_gzip_file(networkModelName)
print (" ... Network architecture successfully loaded....")
# Asign parameters to loaded Net
myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
myLiviaNet3D.numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
myLiviaNet3D.firstEpochChangeLR = myParserConfigIni.firstEpochChangeLR
myLiviaNet3D.frequencyChangeLR = myParserConfigIni.frequencyChangeLR
numberOfEpochs = myLiviaNet3D.numberOfEpochs
numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
# --------------- -------------- ---------------
# --------------- Start TRAINING ---------------
# --------------- -------------- ---------------
# Get sample dimension values
receptiveField = myLiviaNet3D.receptiveField
sampleSize_Train = myLiviaNet3D.sampleSize_Train
trainingCost = []
if myParserConfigIni.applyPadding == 1:
applyPadding = True
else:
applyPadding = False
learningRateModifiedEpoch = 0
# Run over all the (remaining) epochs and subepochs
for e_i in xrange(numberOfEpochs):
# Recover last trained epoch
numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))
costsOfEpoch = []
for subE_i in xrange(numberOfSubEpochs):
epoch_nr = subE_i+1
print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))
# Get all the samples that will be used in this sub-epoch
[imagesSamplesAll,
gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
imageNames_Train,
groundTruthNames_Train,
roiNames_Train,
imageType,
sampleSize_Train,
receptiveField,
applyPadding
)
# Variable that will contain weights for the cost function
# --- In its current implementation, all the classes have the same weight
weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size
myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
costsOfBatches = []
evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
for b_i in xrange(numberBatches):
# TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
meanBatchCostError = costErrors[0]
costsOfBatches.append(meanBatchCostError)
myLiviaNet3D.updateLayersMatricesBatchNorm()
#======== Calculate and Report accuracy over subepoch
meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
# Release data
myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))
# Get mean cost epoch
costsOfEpoch.append(meanCostOfSubepoch)
meanCostOfEpoch = sum(costsOfEpoch) / float(numberOfSubEpochs)
# Include the epoch cost to the main training cost and update current mean
trainingCost.append(meanCostOfEpoch)
currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
print(" -------------------------------------------------------- " )
# ------------- Update Learning Rate if required ----------------#
if e_i >= myLiviaNet3D.firstEpochChangeLR :
if learningRateModifiedEpoch == 0:
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
else:
if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
currentLR = myLiviaNet3D.learning_rate.get_value()
newLR = currentLR / 2.0
myLiviaNet3D.learning_rate.set_value(newLR)
print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
learningRateModifiedEpoch = e_i
# ---------------------- Start validation ---------------------- #
numberImagesToSegment = len(imageNames_Val)
print(" ********************** Starting validation **********************")
# Run over the images to segment
for i_d in xrange(numberImagesToSegment) :
print("------------- Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
segmentVolume(myLiviaNet3D,
i_d,
imageNames_Val, # Full path
names_Val, # Only image name
groundTruthNames_Val,
roiNames_Val,
imageType,
applyPadding,
receptiveField,
sampleSize_Train,
strideValues,
myLiviaNet3D.batch_Size,
0 # Validation (0) or testing (1)
)
print(" ********************** Validation DONE ********************** ")
# ------ In this point the training is done at Epoch n ---------#
# Increase number of epochs trained
myLiviaNet3D.numberOfEpochsTrained += 1
# --------------- Save the model ---------------
BASE_DIR = os.getcwd()
path_Temp = os.path.join(BASE_DIR,'outputFiles')
netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
netFolderName = os.path.join(netFolderName,'Networks')
modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
strFinal = " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
print (strFinal)
print("................ The whole Training is done.....")
print(" ************************************************************************************ ")
|
from numbers import Number
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import math as m
from scipy.stats import norm
"""
Minigolf task.
References
----------
- Penner, A. R. "The physics of putting." Canadian Journal of Physics 80.2 (2002): 83-96.
"""
class MiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
self.friction = 0.131 # [0.065:0.196]
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,), dtype=float)
self.observation_space = spaces.Box(low=low, high=high, dtype=float)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
self.state = xn
return self.get_state(), float(reward), done, {'state': self.get_state(), 'action': action, 'danger': float(self.state) < -4}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getDensity_old(self, env_parameters, state, action, next_state):
if state < next_state:
return 0
action = np.clip(action, self.min_action, self.max_action / 2)
action = 1e-8 if action == 0 else action
putter_length = env_parameters[0]
friction = env_parameters[1]
sigma_noise = env_parameters[-1]
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * (state - next_state))
noise = (u / (action * putter_length) - 1) / sigma_noise
return norm.pdf(noise)
def density_old(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
u = np.sqrt(2 * deceleration * diff[:, :, :, i])
noise = (u / (action[:, :, np.newaxis, i] * env_parameters[i, 0]) - 1) / env_parameters[i, -1]
pdf[:, :, :, i] = norm.pdf(noise) * (1 - mask[:, :, :, i]) # set to zero impossible transitions
return pdf[:, :, 0, :]
def densityCurrent_old(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mask = state < next_state
action = np.clip(action, self.min_action, self.max_action / 2)
action[action == 0] = 1e-8
diff = np.abs(state - next_state) # take the abs for the sqrt, but mask negative values later
deceleration = 5 / 7 * self.friction * 9.81
u = np.sqrt(2 * deceleration * diff)
noise = (u / (action[:, :, np.newaxis] * self.putter_length) - 1) / self.sigma_noise
pdf = norm.pdf(noise) * (1 - mask) # set to zero impossible transitions
return pdf[:, :, 0]
def stepDenoisedCurrent_old(self, state, action):
"""
Computes steps without noise.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
t = u / deceleration
return state - u * t + 0.5 * deceleration * t ** 2
def stepDenoisedCurrent(self, state, action):
"""
Computes the mean transitions.
"""
assert state.ndim == 3 and action.ndim == 2
action = np.clip(action, self.min_action, self.max_action / 2)[:, :, np.newaxis]
u = action * self.putter_length
deceleration = 5 / 7 * self.friction * 9.81
return state - 0.5 * u ** 2 * (1 + self.sigma_noise ** 2) / deceleration
def variance(self, action):
"""
Next-state variance given the action
"""
assert action.ndim == 2
deceleration = 5 / 7 * self.friction * 9.81
action = np.clip(action, self.min_action, self.max_action / 2)
k = action ** 2 * self.putter_length ** 2 / (2 * deceleration)
return 2 * k ** 2 * self.sigma_noise ** 2 * (self.sigma_noise ** 2 + 2) + self.min_variance
def densityCurrent(self, state, action, next_state):
"""
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 3 and action.ndim == 2 and next_state.ndim == 3
mean_ns = self.stepDenoisedCurrent(state, action)
var_ns = self.variance(action)
return norm.pdf((next_state - mean_ns)[:, :, 0] / np.sqrt(var_ns))
def density(self, env_parameters, state, action, next_state):
"""
:param env_parameters: list of env_params
:param state: NxTx1
:param action: NxT
:param next_state: NxTx1
:return: pdf NxTx1xn_param
"""
assert state.ndim == 4 and action.ndim == 3 and next_state.ndim == 4
action = np.clip(action, self.min_action, self.max_action / 2)
pdf = np.zeros((state.shape[0], state.shape[1], 1, env_parameters.shape[0]))
for i in range(env_parameters.shape[0]):
deceleration = 5 / 7 * env_parameters[i, 1] * 9.81
k = action ** 2 * env_parameters[i, 0] ** 2 / (2 * deceleration)
# Compute mean next-state
mean_ns = state[:, :, :, i] - k[:, :, np.newaxis, i] * (1 + env_parameters[i, -1])
# Compute variance next-state
var_ns = 2 * k[:, :, np.newaxis, i] ** 2 * env_parameters[i, -1] * (
env_parameters[i, -1] + 2) + self.min_variance
pdf[:, :, :, i] = norm.pdf((next_state[:, :, :, i] - mean_ns) / np.sqrt(var_ns))
return pdf[:, :, 0, :]
class ComplexMiniGolf(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.horizon = 20
self.gamma = 0.99
self.min_pos = 0.0
self.max_pos = 20.0
self.min_action = 1e-5
self.max_action = 10.0
self.putter_length = 1.0 # [0.7:1.0]
# self.friction = 0.131 # [0.065:0.196]
self.friction_low = 0.131
self.friction_high = 0.19 # 0.190
self.hole_size = 0.10 # [0.10:0.15]
self.sigma_noise = 0.3
self.ball_radius = 0.02135
self.min_variance = 1e-2 # Minimum variance for computing the densities
# gym attributes
self.viewer = None
low = np.array([self.min_pos])
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,))
self.observation_space = spaces.Box(low=low, high=high)
# initialize state
self.seed()
self.reset()
def setParams(self, env_param):
self.putter_length = env_param[0]
self.friction = env_param[1]
self.hole_size = env_param[2]
self.sigma_noise = m.sqrt(env_param[-1])
def computeFriction(self, state):
# if state < (self.max_pos - self.min_pos) / 3:
# friction = self.friction_low
# elif state < (self.max_pos - self.min_pos) * 2 / 3:
# friction = self.friction_low
# else:
# friction = self.friction_high
# return friction
delta_f = self.friction_high - self.friction_low
delta_p = self.max_pos - self.min_pos
return self.friction_low + (delta_f / delta_p) * state
def step(self, action, render=False):
action = np.clip(action, self.min_action, self.max_action / 2)
noise = 10
while abs(noise) > 1:
noise = self.np_random.randn() * self.sigma_noise
u = action * self.putter_length * (1 + noise)
friction = self.computeFriction(self.state)
deceleration = 5 / 7 * friction * 9.81
t = u / deceleration
xn = self.state - u * t + 0.5 * deceleration * t ** 2
# reward = 0
# done = True
# if u < v_min:
# reward = -1
# done = False
# elif u > v_max:
# reward = -100
reward = 0
done = True
if self.state > 0:
reward = -1
done = False
elif self.state < -4:
reward = -100
state = self.state
self.state = xn
# TODO the last three values should not be used
return self.get_state(), float(reward), done, {"state": state, "next_state": self.state, "action": action}
# Custom param for transfer
def getEnvParam(self):
return np.asarray([np.ravel(self.putter_length), np.ravel(self.friction), np.ravel(self.hole_size),
np.ravel(self.sigma_noise ** 2)])
def reset(self, state=None):
# TODO change reset
if state is None:
self.state = np.array([self.np_random.uniform(low=self.min_pos,
high=self.max_pos)])
else:
self.state = np.array(state)
return self.get_state()
def get_state(self):
return np.array(self.state)
def get_true_state(self):
"""For testing purposes"""
return np.array(self.state)
def clip_state(self, state):
return state
# return np.clip(state, self.min_pos, self.max_pos)
def clip_action(self, action):
return action
# return np.clip(action, self.min_action, self.max_action)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reward(self, state, action, next_state):
# FIXME: two problems. (1,probably fixed) When the next_state is less than state. (2) reward of -100 is never returned
friction = self.computeFriction(state)
deceleration = 5 / 7 * friction * 9.81
u = np.sqrt(2 * deceleration * max((state - next_state), 0))
v_min = np.sqrt(10 / 7 * friction * 9.81 * state)
v_max = np.sqrt((2 * self.hole_size - self.ball_radius) ** 2 * (9.81 / (2 * self.ball_radius)) + v_min ** 2)
reward = 0
done = True
if u < v_min:
reward = -1
done = False
elif u > v_max:
reward = -100
return reward, done
|
class UnoInfo:
def __init__(self):
self.dataPins = 13
self.analogInPins = 5
self.GND = 3
self.pow = [3.3, 5]
self.TX = 1
self.RX = 0
def getMainInfo(self):
return {"0": self.dataPins, "1": self.GND, "2": self.pow}
def getDigitalPins(self):
return self.dataPins
def getAnalogPins(self):
return self.analogInPins
def getAmountGND(self):
return self.GND
def getPowOut(self):
return self.pow
def getTXSlot(self):
return self.TX
def getRXSlot(self):
return self.RX
|
#!/usr/bin/env python3
import paho.mqtt.client as mqtt
import json
import random
import math
import time
import ssl
config_mqtt_broker_ip = "iot.fh-muenster.de"
config_mqtt_client_id = "dummy-receiver-" + str(random.randint(1000, 9999));
config_mqtt_topic = "sensor/60:01:94:4A:AF:7A"
ts_last_message = int(round(time.time() * 1000))
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(config_mqtt_topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
mqtt_c = mqtt.Client(config_mqtt_client_id)
mqtt_c.on_connect = on_connect
mqtt_c.on_message = on_message
mqtt_c.tls_set(ca_certs="ca.pem")
#mqtt_c.tls_insecure_set(True)
mqtt_c.connect(config_mqtt_broker_ip, 8883, 60)
mqtt_c.loop_forever();
|
# Online References used :
# https://github.com/imadmali/movie-scraper/blob/master/MojoLinkExtract.py
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://nycdatascience.com/blog/student-works/scraping-box-office-mojo/
# https://www.youtube.com/watch?v=XQgXKtPSzUI
# https://www.youtube.com/watch?v=aIPqt-OdmS0
# https://www.youtube.com/watch?v=XQgXKtPSzUI
from bs4 import BeautifulSoup
import pandas as pd
import os
import requests
import glob
import re
def scrape_data_for_actors():
file_path = os.path.join(os.path.join(os.environ['USERPROFILE']),
'Desktop') # This is written in order to save the txt file in the user's specified location on the machine
file_path = os.path.join(file_path,
'BoxOfficeMojo2_virti_bipin') # Folder name to be created where the file will be stored
if not os.path.exists(str(file_path)):
os.mkdir(str(file_path)) # If path does not exist create the path
os.chdir(file_path) # Change the directory of the file path
if len(glob.glob(
"*")) != 0: # The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell
file_list = glob.glob("*")
for file in file_list:
os.remove(file)
# The url of the BoxOffice Mojo to be scraped
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum=1&sort=sumgross&order=DESC&&p=.htm'
pages_data = [] # List to store the pages data
total_pages = []
response = requests.get(url) # Get the response of the url after passing the user input
soup = BeautifulSoup(response.content,
'html.parser') # Using the beautiful soup library to parse the html content and format it
for page in soup.find_all('a', href=lambda href: href and "page" in href): # find the href in a tags
pages_data.append(page['href']) # append the data in the pages_data list
for page in pages_data:
if 'page' in page: # If "page" found in href
index = page.find('page') # Take the index of that page if found
# print("Index", index)
if page[index:index + 10] not in total_pages:
# For extracting the total number of pages
total_pages.append(page[
index:index + 10]) # for example : page=2 so in order to get the total number of pages and iterate through it it goes from 1 till end of pages for pagination
# print("Total Pages", total_pages)
average_gross_list = []
for num in range(1, len(total_pages) + 1, 1):
try:
url = 'https://www.boxofficemojo.com/people/?view=Actor&pagenum={}&sort=sumgross&order=DESC&&p=.htm'.format(num) # This one works well
# Get the Response
print("Page number {}".format(num))
response_from_url = requests.get(url)
html = response_from_url.text
soup = BeautifulSoup(html,
'lxml') # lxml is a pretty extensive library written for parsing XML and HTML documents very quickly
table = soup.find('table', {"cellspacing": "1"})
# Using dataframes
df = pd.read_html(str(table),skiprows=1)
df = df[0]
df = df.iloc[:, :6] # This is used to slice the dataframe to cut off the date sections.
df.columns = ['rank', 'person', 'total gross', 'number of movies', 'Average', 'number 1 picture']
df['id'] = ''
id_list = []
title_list = df['rank'].tolist()
new_index = [i for i in range(1,len(title_list)+1)]
df.index = new_index
for link in soup.findAll('a', {'href': re.compile("\?id=")}):
id_list.append(link.get('href'))
id_list = [x.split('=')[1] for x in id_list]
id_list = [x.split('.')[0] for x in id_list]
id_list = id_list[1:]
id_dict = dict(zip(title_list, id_list))
for index in df.index:
df.loc[index, 'id'] = id_dict[df.loc[index, 'rank']]
df.to_csv("actors.csv", index=False, mode='a')
except Exception as e:
print(e)
continue
file_list = glob.glob("*.csv")
df_container = []
for file in file_list:
df = pd.read_csv(file)
df_container.append(df)
df_combined = pd.concat(df_container)
df_combined.to_csv("actors.txt", index=False, sep="\t")
df = pd.read_csv("actors.txt", sep="\t")
# Data Cleaning
df['Average'] = df['Average'].apply(lambda x: x.replace('$', '')) # replace dollar signs
df['Average'] = df['Average'].apply(lambda x: x.replace(',', '')) # replace commas
df['Average'] = pd.to_numeric(df['Average'], errors='coerce')
df = df.sort_values(by='Average', ascending=False)
actor_with_highest_average_earning = df.iloc[0]['person']
print("actor(s) with the highest average earnings per movie is {}".format(actor_with_highest_average_earning))
new_df = pd.read_csv("actors.txt", sep="\t")
new_df['number of movies'] = pd.to_numeric(new_df['number of movies'], errors='coerce')
actor_most_movies = new_df.loc[new_df['number of movies'].idxmax()].person
print("actor(s) with the maximum number of movies is {}".format(actor_most_movies))
if __name__ == '__main__':
scrape_data_for_actors()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class PyPyqt5(SIPPackage):
"""PyQt is a set of Python v2 and v3 bindings for The Qt Company's Qt
application framework and runs on all platforms supported by Qt including
Windows, OS X, Linux, iOS and Android. PyQt5 supports Qt v5."""
homepage = "https://www.riverbankcomputing.com/software/pyqt/intro"
url = "https://www.riverbankcomputing.com/static/Downloads/PyQt5/5.13.0/PyQt5_gpl-5.13.0.tar.gz"
list_url = "https://www.riverbankcomputing.com/software/pyqt/download5"
sip_module = 'PyQt5.sip'
import_modules = [
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtHelp',
'PyQt5.QtMultimedia', 'PyQt5.QtMultimediaWidgets', 'PyQt5.QtNetwork',
'PyQt5.QtOpenGL', 'PyQt5.QtPrintSupport', 'PyQt5.QtQml',
'PyQt5.QtQuick', 'PyQt5.QtSvg', 'PyQt5.QtTest', 'PyQt5.QtWebChannel',
'PyQt5.QtWebSockets', 'PyQt5.QtWidgets', 'PyQt5.QtXml',
'PyQt5.QtXmlPatterns'
]
version('5.13.0', sha256='0cdbffe5135926527b61cc3692dd301cd0328dd87eeaf1313e610787c46faff9')
version('5.12.3', sha256='0db0fa37debab147450f9e052286f7a530404e2aaddc438e97a7dcdf56292110')
variant('qsci', default=False, description='Build with QScintilla python bindings')
# Without opengl support, I got the following error:
# sip: QOpenGLFramebufferObject is undefined
depends_on('qt@5:+opengl')
depends_on('python@2.6:', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3')
depends_on('qscintilla', when='+qsci')
# For building Qscintilla python bindings
resource(name='qscintilla',
url='https://www.riverbankcomputing.com/static/Downloads/QScintilla/2.10.2/QScintilla_gpl-2.10.2.tar.gz',
sha256='14b31d20717eed95ea9bea4cd16e5e1b72cee7ebac647cba878e0f6db6a65ed0',
destination='spack-resource-qscintilla',
when='^qscintilla@2.10.2'
)
# https://www.riverbankcomputing.com/static/Docs/PyQt5/installation.html
def configure_args(self):
args = [
'--pyuic5-interpreter', self.spec['python'].command.path,
'--sipdir', self.prefix.share.sip.PyQt5,
'--stubsdir', join_path(site_packages_dir, 'PyQt5'),
]
if '+qsci' in self.spec:
args.extend(['--qsci-api-destdir', self.prefix.share.qsci])
return args
@run_after('install')
def make_qsci(self):
if '+qsci' in self.spec:
rsrc_py_path = os.path.join(
self.stage.source_path,
'spack-resource-qscintilla/QScintilla_gpl-' +
str(self.spec['qscintilla'].version), 'Python')
with working_dir(rsrc_py_path):
pydir = join_path(site_packages_dir, 'PyQt5')
python = self.spec['python'].command
python('configure.py', '--pyqt=PyQt5',
'--sip=' + self.prefix.bin.sip,
'--qsci-incdir=' +
self.spec['qscintilla'].prefix.include,
'--qsci-libdir=' + self.spec['qscintilla'].prefix.lib,
'--qsci-sipdir=' + self.prefix.share.sip.PyQt5,
'--apidir=' + self.prefix.share.qsci,
'--destdir=' + pydir,
'--pyqt-sipdir=' + self.prefix.share.sip.PyQt5,
'--sip-incdir=' + python_include_dir,
'--stubsdir=' + pydir)
# Fix build errors
# "QAbstractScrollArea: No such file or directory"
# "qprinter.h: No such file or directory"
# ".../Qsci.so: undefined symbol: _ZTI10Qsci...."
qscipro = FileFilter('Qsci/Qsci.pro')
link_qscilibs = 'LIBS += -L' + self.prefix.lib +\
' -lqscintilla2_qt5'
qscipro.filter('TEMPLATE = lib',
'TEMPLATE = lib\nQT += widgets' +
'\nQT += printsupport\n' + link_qscilibs)
make()
# Fix installation prefixes
makefile = FileFilter('Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
makefile = FileFilter('Qsci/Makefile')
makefile.filter(r'\$\(INSTALL_ROOT\)', '')
make('install')
|
AUTHPLUGIN_FIXTURE = '{"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1alpha1","spec":{},"status":{"token":"test"}}'
|
import sqlite3
from flask import Flask, jsonify, request
app = Flask(__name__)
app.debug = True
api_key = "RANDOM ACCESS KEY HERE"
def CheckAPIKey(key):
if key == api_key:
return True
else:
return False
@app.route('/')
def HomeDir():
return jsonify({'msg': "invalid_endpoint"})
@app.route('/api/v1/hwid')
def HwidDir():
db = sqlite3.connect('auth.db')
c = db.cursor()
opt = request.args.get('type')
hwid = request.args.get('hwid')
key = request.args.get('apikey')
if opt == 'add':
two_step = CheckAPIKey(key)
if two_step == True:
c.execute(f'INSERT INTO hwids VALUES ("{hwid}")')
db.commit()
return jsonify({'msg': "success"})
if two_step == False:
return jsonify({'msg': "invalid_apikey"})
if opt == 'check':
c.execute(f"SELECT * FROM hwids WHERE hwid='{hwid}'")
if hwid in str(c.fetchall()):
return jsonify({'msg': "success"})
else:
return jsonify({'msg': "invalid_hwid"})
else:
return jsonify({'msg': "invalid_type"})
if __name__ == "__main__":
app.run()
|
import os
import shutil
import subprocess
from pathlib import Path
from dotenv import dotenv_values
COMPOSAPY_ROOT_DIR = Path(__file__).parent
COMP_APP_PROD_DIR = COMPOSAPY_ROOT_DIR.parent.parent.joinpath("Product")
DATALAB_SERVICE_STATIC_DIR = COMP_APP_PROD_DIR.joinpath(
"CompAnalytics.DataLabService", "static"
)
TF_EXE_PATH = Path(dotenv_values(".local.env").get("TF_EXE_PATH"))
class CopyFileToSolutionException(Exception):
pass
class TfsException(Exception):
pass
def grant_permissions(path: Path) -> None:
subprocess.check_output(
["icacls", f"{path}", "/grant", "Everyone:F", "/t"],
stderr=subprocess.STDOUT,
)
def tfs_command(cwd: Path, *args) -> None:
run = subprocess.run([f"{TF_EXE_PATH}", *args], cwd=cwd, capture_output=True)
if run.returncode > 1:
raise TfsException(
f"Return code greater than 1, failed tf.exe with args: {args} and cwd: {cwd}."
f"Return Code: {run.returncode}\n"
f"StdOut: {run.stdout}\n"
f"StdErr: {run.stderr}\n"
)
def update_composapy_readme_artifacts(readme_artifacts: list[Path]) -> None:
for artifact in readme_artifacts:
destination_path = DATALAB_SERVICE_STATIC_DIR.joinpath(artifact.name)
shutil.copy(artifact, destination_path)
grant_permissions(destination_path)
def update_composapy_wheel(wheel: Path) -> None:
wheel_dest = DATALAB_SERVICE_STATIC_DIR.joinpath("wheels")
old_wheels = sorted(wheel_dest.glob("composapy-*.whl"))
# add new composapy wheel to local save_dir
try:
shutil.copy(wheel, wheel_dest)
grant_permissions(wheel_dest)
except Exception:
raise CopyFileToSolutionException(
f"Failed to copy wheel from {wheel} to {wheel_dest}."
)
# add new composapy wheel to tfs tracking
tfs_command(wheel_dest, "add", wheel.name)
if len(old_wheels) == 0:
return
# remove old composapy wheels from tfs tracking and local save_dir after new wheel was
# successfully loaded
# ...
# ...
# ... tfs is dumb
for old_wheel in old_wheels:
if old_wheel.name != wheel.name:
try:
tfs_command(wheel_dest, "delete", old_wheel.name)
except Exception:
pass
try:
tfs_command(wheel_dest, "undo", old_wheel.name)
except Exception:
pass
try:
os.remove(Path(old_wheel))
except Exception:
pass # if tfs did not fail to remove the file, this is expected
def update_static_wheel_deps() -> None:
tfs_command(DATALAB_SERVICE_STATIC_DIR, "add", "*", "/recursive")
def update_composapy_tests(tests: Path) -> None:
tests_dest = COMP_APP_PROD_DIR.joinpath("UnitTests", "TestData", "composapy")
# add/replace tests in local save_dir
try:
shutil.copytree(tests, tests_dest, dirs_exist_ok=True)
except Exception:
raise CopyFileToSolutionException(
f"Failed to copy tests from {tests} to {tests_dest}."
)
grant_permissions(tests_dest)
# add specific tfs test file dependencies here
tfs_command(tests_dest, "add", "test_*.py") # tests/test_*.py
tfs_command(tests_dest, "add", "conftest.py") # tests/conftest.py
tfs_command(tests_dest, "add", "__init__.py") # tests/__init__.py
tfs_command(tests_dest, "add", ".test.env") # tests/.test.env
tfs_command(tests_dest, "add", "TestFiles", "/recursive") # tests/TestFiles/*
## cleanup unwanted cache from previous command
tfs_command(tests_dest.joinpath("TestFiles"), "undo", ".pytest_cache", "/recursive")
|
"""
Ridge regression
----------------
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from linregress import lr
# More on theory: https://en.wikipedia.org/wiki/Tikhonov_regularization
X, y = mglearn.datasets.load_extended_boston()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
ridge = Ridge().fit(X_train, y_train)
print 'Train score: {:.2f}'.format(ridge.score(X_train, y_train)) # .89
print 'Test score: {:.2f}'.format(ridge.score(X_test, y_test)) # .75
# Ridge intentionally dampens the effect of each attribute on the
# prediction to avoid overfitting
ridge10 = Ridge(alpha=10).fit(X_train, y_train)
print 'Train test score: {:.2f}'.format(ridge10.score(X_train, y_train)) # .79
print 'Test test score: {:.2f}'.format(ridge10.score(X_test, y_test)) # .64
# You can add a regularization term (alpha) to fit to your particular model
# In this case alpha=10 doesn't help
ridge01 = Ridge(alpha=0.1).fit(X_train, y_train)
print 'Train test score: {:.2f}'.format(ridge01.score(X_train, y_train)) # .93
print 'Test test score: {:.2f}'.format(ridge01.score(X_test, y_test)) # .77
# It appears lowering the alpha to 0.1 was useful here
# plt.plot(ridge.coef_, 's', label='Ridge alpha=1')
# plt.plot(ridge10.coef_, '^', label='Ridge alpha=10')
# plt.plot(ridge01.coef_, 'v', label='Ridge alpha=0.1')
# plt.plot(lr.coef_, 'o', label='Linear regression')
# plt.xlabel('Coefficient index')
# plt.ylabel('Coefficient magnitude')
# plt.hlines(0, 0, len(lr.coef_))
# plt.ylim(-25, 25)
# plt.legend()
# plt.show()
# Plot shows how larger alphas restrict the coefficient
# for linear regression to a tighter bound than smaller
# alphas (lin regress -> alpha = 0)
# mglearn.plots.plot_ridge_n_samples()
# plt.show()
# Comparing test and training accuracy of linear regression to ridge regression
|
from sympy import Symbol, gamma, oo, nan, zoo, factorial, sqrt, Rational, log,\
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin, cos, \
O, cancel
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1,2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x-1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x+1)*gamma(x)
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - x*EulerGamma + x**2*EulerGamma**2/2 + pi**2*x**2/12 + O(x**3)
def test_lowergamma():
pass
def test_uppergamma():
assert uppergamma(4, 0) == 6
def test_polygamma():
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369,20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func = True, basic = False) == e
def test_loggamma():
s1 = loggamma(1/(x+sin(x))+cos(x)).nseries(x,n=4)
s2 = (-log(2*x)-1)/(2*x) - log(x/pi)/2 + (4-log(2*x))*x/24 + O(x**2)
assert cancel(s1 - s2).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x-S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert cancel(s1 - s2).removeO() == 0
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x,n=N,logx=None).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) \
== -log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) \
== x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=8) \
== 2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import re
import string
from jina.hub.crafters.nlp.Sentencizer import Sentencizer
import pickle
# class Splitter(Sentencizer):
# count = 0
# separator = "|"
#
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# def craft(self, text: str, *args, **kwargs) -> Dict:
# print('================== test2')
# return dict(text=text, meta_info=text[:5].encode("utf-8"))
class SentenceSplitter(Sentencizer):
count = 0
separator = "|"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def craft(self, text: str, *args, **kwargs) -> Dict:
results = []
ret = text
with open("tokenizer/eng_sentence_tokenizer.pkl", 'rb') as f:
sent_tokenizer = pickle.load(f)
for ci, (s, e) in enumerate(sent_tokenizer.span_tokenize(ret)):
f = ret[s:e]
f = f[:self.max_sent_len]
if len(f) > self.min_sent_len:
results.append(dict(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e],
meta_info='testID'.encode("utf-8")
))
return results
|
import os
import os.path
import requests
import time
from pathlib import Path
from talon import ctrl, ui, Module, Context, actions, clip
import tempfile
# Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py
extendCommands = []
# Each IDE gets its own port, as otherwise you wouldn't be able
# to run two at the same time and switch between them.
# Note that MPS and IntelliJ ultimate will conflict...
port_mapping = {
"com.google.android.studio": 8652,
"com.jetbrains.AppCode": 8655,
"com.jetbrains.CLion": 8657,
"com.jetbrains.datagrip": 8664,
"com.jetbrains.goland-EAP": 8659,
"com.jetbrains.goland": 8659,
"com.jetbrains.intellij-EAP": 8653,
"com.jetbrains.intellij.ce": 8654,
"com.jetbrains.intellij": 8653,
"com.jetbrains.PhpStorm": 8662,
"com.jetbrains.pycharm": 8658,
"com.jetbrains.rider": 8660,
"com.jetbrains.rubymine": 8661,
"com.jetbrains.rubymine-EAP": 8661,
"com.jetbrains.WebStorm": 8663,
"google-android-studio": 8652,
"idea64.exe": 8654,
"IntelliJ IDEA": 8654,
"jetbrains-appcode": 8655,
"jetbrains-clion": 8657,
"jetbrains-datagrip": 8664,
"jetbrains-goland-eap": 8659,
"jetbrains-goland": 8659,
"jetbrains-idea-ce": 8654,
"jetbrains-idea-eap": 8654,
"jetbrains-idea": 8654,
"jetbrains-phpstorm": 8662,
"jetbrains-pycharm-ce": 8658,
"jetbrains-pycharm": 8658,
"jetbrains-rider": 8660,
"JetBrains Rider": 8660,
"jetbrains-rubymine": 8661,
"jetbrains-rubymine-eap": 8661,
"jetbrains-studio": 8652,
"jetbrains-webstorm": 8663,
"RubyMine": 8661,
"RubyMine-EAP": 8661,
"PyCharm": 8658,
"pycharm64.exe": 8658,
"WebStorm": 8663,
"webstorm64.exe": 8663,
}
def _get_nonce(port, file_prefix):
file_name = file_prefix + str(port)
try:
with open(os.path.join(tempfile.gettempdir(), file_name), "r") as fh:
return fh.read()
except FileNotFoundError as e:
try:
home = str(Path.home())
with open(os.path.join(home, file_name), "r") as fh:
return fh.read()
except FileNotFoundError as eb:
print(f"Could not find {file_name} in tmp or home")
return None
except IOError as e:
print(e)
return None
def send_idea_command(cmd):
print("Sending {}".format(cmd))
active_app = ui.active_app()
bundle = active_app.bundle or active_app.name
port = port_mapping.get(bundle, None)
nonce = _get_nonce(port, ".vcidea_") or _get_nonce(port, "vcidea_")
proxies = {"http": None, "https": None}
print(f"sending {bundle} {port} {nonce}")
if port and nonce:
response = requests.get(
"http://localhost:{}/{}/{}".format(port, nonce, cmd),
proxies=proxies,
timeout=(0.05, 3.05),
)
response.raise_for_status()
return response.text
def get_idea_location():
return send_idea_command("location").split()
def idea_commands(commands):
command_list = commands.split(",")
print("executing jetbrains", commands)
global extendCommands
extendCommands = command_list
for cmd in command_list:
if cmd:
send_idea_command(cmd.strip())
time.sleep(0.1)
ctx = Context()
mod = Module()
mod.apps.jetbrains = "app.name: /jetbrains/"
mod.apps.jetbrains = "app.name: IntelliJ IDEA"
mod.apps.jetbrains = "app.name: PyCharm"
mod.apps.jetbrains = "app.name: RubyMine"
mod.apps.jetbrains = "app.name: RubyMine-EAP"
mod.apps.jetbrains = """
os: mac
and app.bundle: com.google.android.studio
"""
# windows
mod.apps.jetbrains = "app.name: idea64.exe"
mod.apps.jetbrains = "app.name: PyCharm64.exe"
mod.apps.jetbrains = "app.name: pycharm64.exe"
mod.apps.jetbrains = "app.name: webstorm64.exe"
mod.apps.jetbrains = """
os: mac
and app.bundle: com.jetbrains.pycharm
"""
mod.apps.jetbrains = """
os: windows
and app.name: JetBrains Rider
os: windows
and app.exe: rider64.exe
"""
@mod.action_class
class Actions:
def idea(commands: str):
"""Send a command to Jetbrains product"""
idea_commands(commands)
def idea_grab(times: int):
"""Copies specified number of words to the left"""
old_clip = clip.get()
try:
original_line, original_column = get_idea_location()
for _ in range(times):
send_idea_command("action EditorSelectWord")
send_idea_command("action EditorCopy")
send_idea_command("goto {} {}".format(original_line, original_column))
send_idea_command("action EditorPaste")
finally:
clip.set(old_clip)
global extendCommands
extendCommands = []
ctx.matches = r"""
app: jetbrains
"""
@ctx.action_class("app")
class AppActions:
def tab_next():
actions.user.idea("action NextTab")
def tab_previous():
actions.user.idea("action PreviousTab")
def tab_close():
actions.user.idea("action CloseContent")
def tab_reopen():
actions.user.idea("action ReopenClosedTab")
@ctx.action_class("code")
class CodeActions:
# talon code actions
def toggle_comment():
actions.user.idea("action CommentByLineComment")
@ctx.action_class("edit")
class EditActions:
# talon edit actions
def copy():
actions.user.idea("action EditorCopy")
def cut():
actions.user.idea("action EditorCut")
def delete():
actions.user.idea("action EditorBackSpace")
def paste():
actions.user.idea("action EditorPaste")
def find_next():
actions.user.idea("action FindNext")
def find_previous():
actions.user.idea("action FindPrevious")
def find(text: str = None):
actions.user.idea("action Find")
def line_clone():
actions.user.idea("action EditorDuplicate")
def line_swap_down():
actions.user.idea("action MoveLineDown")
def line_swap_up():
actions.user.idea("action MoveLineUp")
def indent_more():
actions.user.idea("action EditorIndentLineOrSelection")
def indent_less():
actions.user.idea("action EditorUnindentSelection")
def select_line(n: int = None):
actions.user.idea("action EditorSelectLine")
def select_word():
actions.user.idea("action EditorSelectWord")
def select_all():
actions.user.idea("action $SelectAll")
def file_start():
actions.user.idea("action EditorTextStart")
def file_end():
actions.user.idea("action EditorTextEnd")
def extend_file_start():
actions.user.idea("action EditorTextStartWithSelection")
def extend_file_end():
actions.user.idea("action EditorTextEndWithSelection")
def jump_line(n: int):
actions.user.idea("goto {} 0".format(n))
# move the cursor to the first nonwhite space character of the line
actions.user.idea("action EditorLineEnd")
actions.user.idea("action EditorLineStart")
@ctx.action_class("win")
class WinActions:
def filename():
title = actions.win.title()
result = title.split(" ")
# iterate over reversed result
# to support titles such as
# Class.Library2 – a.js
for word in reversed(result):
if "." in word:
return word
return ""
@ctx.action_class("user")
class UserActions:
def tab_jump(number: int):
# depends on plugin GoToTabs
if number < 10:
actions.user.idea("action GoToTab{}".format(number))
def extend_until_line(line: int):
actions.user.idea("extend {}".format(line))
def select_range(line_start: int, line_end: int):
# if it's a single line, select the entire thing including the ending new-line5
if line_start == line_end:
actions.user.idea("goto {} 0".format(line_start))
actions.user.idea("action EditorSelectLine"),
else:
actions.user.idea("range {} {}".format(line_start, line_end))
def extend_camel_left():
actions.user.idea("action EditorPreviousWordInDifferentHumpsModeWithSelection")
def extend_camel_right():
actions.user.idea("action EditorNextWordInDifferentHumpsModeWithSelection")
def camel_left():
actions.user.idea("action EditorPreviousWordInDifferentHumpsMode")
def camel_right():
actions.user.idea("action EditorNextWordInDifferentHumpsMode")
def line_clone(line: int):
actions.user.idea("clone {}".format(line))
|
"""
2D Distributions
================
Some plots visualize a transformation of the original data set. Use a
stat parameter to choose a common transformation to visualize.
Each stat creates additional variables to map aesthetics to. These
variables use a common ..name.. syntax.
Look at the examples of 2D distributions below.
"""
# sphinx_gallery_thumbnail_path = "gallery_py\_stats\_2d_distributions.png"
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# %%
df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')
# %%
w, h = 400, 300
p = ggplot(df, aes('cty', 'hwy')) + ggsize(w, h)
p11 = p + geom_bin2d() + ggtitle('geom="bin2d" + default stat')
p12 = p + geom_point(aes(color='..count..'), stat='bin2d', size=3, shape=15) + \
ggtitle('geom="point" + stat="bin2d"')
p21 = p + geom_density2d() + ggtitle('geom="density2d" + default stat')
p22 = p + geom_point(stat='density2d', size=.5) + ggtitle('geom="point" + stat="density2d"')
bunch = GGBunch()
bunch.add_plot(p11, 0, 0)
bunch.add_plot(p12, w, 0)
bunch.add_plot(p21, 0, h)
bunch.add_plot(p22, w, h)
bunch
|
from dataclasses import replace
from dataclass_abc import dataclass_abc
from rxbp.indexed.indexedflowable import IndexedFlowable
from rxbp.indexed.indexedsharedflowable import IndexedSharedFlowable
from rxbp.indexed.mixins.indexedflowablemixin import IndexedFlowableMixin
from rxbp.typing import ValueType
@dataclass_abc
class IndexedSharedFlowableImpl(IndexedSharedFlowable[ValueType]):
underlying: IndexedFlowableMixin
def _copy(
self,
is_shared: bool = None,
**kwargs,
):
return replace(self, **kwargs)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#@title Input protein sequence(s), then hit `Runtime` -> `Run all`
#from google.colab import files
import os.path
import re
import hashlib
import random
def add_hash(x,y):
return x+"_"+hashlib.sha1(y.encode()).hexdigest()[:5]
with open("protein") as f:
query_sequence = f.read()
#query_sequence = '' #@param {type:"string"}
#@markdown - Use `:` to specify inter-protein chainbreaks for **modeling complexes** (supports homo- and hetro-oligomers). For example **PI...SK:PI...SK** for a mono-dimer
# remove whitespaces
query_sequence = "".join(query_sequence.split())
jobname = 'test' #@param {type:"string"}
# remove whitespaces
basejobname = "".join(jobname.split())
basejobname = re.sub(r'\W+', '', basejobname)
jobname = add_hash(basejobname, query_sequence)
while os.path.isfile(f"{jobname}.csv"):
jobname = add_hash(basejobname, ''.join(random.sample(query_sequence,len(query_sequence))))
with open(f"{jobname}.csv", "w") as text_file:
text_file.write(f"id,sequence\n{jobname},{query_sequence}")
queries_path=f"{jobname}.csv"
# number of models to use
use_amber = False #@param {type:"boolean"}
use_templates = False #@param {type:"boolean"}
#save_to_google_drive = False #@param {type:"boolean"}
#@markdown - if the save_to_google_drive option was selected, the result zip will be uploaded to your Google Drive
#@markdown ### Advanced settings
msa_mode = "MMseqs2 (UniRef+Environmental)" #@param ["MMseqs2 (UniRef+Environmental)", "MMseqs2 (UniRef only)","single_sequence","custom"]
model_type = "auto" #@param ["auto", "AlphaFold2-ptm", "AlphaFold2-multimer"]
#@markdown - "auto" = protein structure prediction using "AlphaFold2-ptm" and complex prediction "AlphaFold-multimer". For complexes "AlphaFold-multimer" and "AlphaFold-ptm" can be used.
pair_mode = "unpaired+paired" #@param ["unpaired+paired","paired","unpaired"] {type:"string"}
#@markdown - "unpaired+paired" = pair sequences from same species and add unpaired MSA, "unpaired" = generate seperate MSA for each chain, "paired" - only use sequences that were sucessfully paired.
num_recycles = 1 #@param [1,3,6,12,24,48] {type:"raw"}
#@markdown Don't forget to hit `Runtime` -> `Run all` after updating the form.
# decide which a3m to use
if msa_mode.startswith("MMseqs2"):
a3m_file = f"{jobname}.a3m"
elif msa_mode == "custom":
a3m_file = f"{jobname}.custom.a3m"
if not os.path.isfile(a3m_file):
custom_msa_dict = files.upload()
custom_msa = list(custom_msa_dict.keys())[0]
header = 0
import fileinput
for line in fileinput.FileInput(custom_msa,inplace=1):
if line.startswith(">"):
header = header + 1
if not line.rstrip():
continue
if line.startswith(">") == False and header == 1:
query_sequence = line.rstrip()
print(line, end='')
os.rename(custom_msa, a3m_file)
queries_path=a3m_file
print(f"moving {custom_msa} to {a3m_file}")
else:
a3m_file = f"{jobname}.single_sequence.a3m"
with open(a3m_file, "w") as text_file:
text_file.write(">1\n%s" % query_sequence)
# Removed
#if save_to_google_drive:
# from pydrive.drive import GoogleDrive
# from pydrive.auth import GoogleAuth
# from google.colab import auth
# from oauth2client.client import GoogleCredentials
# auth.authenticate_user()
# gauth = GoogleAuth()
# gauth.credentials = GoogleCredentials.get_application_default()
# drive = GoogleDrive(gauth)
# print("You are logged into Google Drive and are good to go!")
# In[ ]:
#@title Run Prediction
import sys
from colabfold.download import download_alphafold_params, default_data_dir
from colabfold.utils import setup_logging
from colabfold.batch import get_queries, run, set_model_type
from colabfold.colabfold import plot_protein
from pathlib import Path
import matplotlib.pyplot as plt
# For some reason we need that to get pdbfixer to import
if use_amber and '/usr/local/lib/python3.7/site-packages/' not in sys.path:
sys.path.insert(0, '/usr/local/lib/python3.7/site-packages/')
def prediction_callback(unrelaxed_protein, length, prediction_result, input_features):
fig = plot_protein(unrelaxed_protein, Ls=length, dpi=100)
plt.show()
plt.close()
result_dir="."
setup_logging(Path(".").joinpath("log.txt"))
queries, is_complex = get_queries(queries_path)
model_type = set_model_type(is_complex, model_type)
download_alphafold_params(model_type, Path("."))
run(
queries=queries,
result_dir=result_dir,
use_templates=use_templates,
use_amber=use_amber,
msa_mode=msa_mode,
model_type=model_type,
num_models=5,
num_recycles=num_recycles,
model_order=[1, 2, 3, 4, 5],
is_complex=is_complex,
data_dir=Path("."),
keep_existing_results=False,
recompile_padding=1.0,
rank_by="auto",
pair_mode=pair_mode,
stop_at_score=float(100),
prediction_callback=prediction_callback,
)
# In[ ]:
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.core.util import CLIError
from azure.cli.testsdk.base import execute
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk import (
JMESPathCheck,
JMESPathCheckExists,
JMESPathCheckGreaterThan,
NoneCheck,
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer,
TestCli,
LiveScenarioTest)
from azure.cli.testsdk.preparers import (
AbstractPreparer,
SingleValueReplacer)
from azure.cli.command_modules.sql.custom import (
ClientAuthenticationType,
ClientType)
from datetime import datetime, timedelta
from time import sleep
# Constants
server_name_prefix = 'clitestserver'
server_name_max_length = 63
class SqlServerPreparer(AbstractPreparer, SingleValueReplacer):
def __init__(self, name_prefix=server_name_prefix, parameter_name='server', location='westus',
admin_user='admin123', admin_password='SecretPassword123',
resource_group_parameter_name='resource_group', skip_delete=True):
super(SqlServerPreparer, self).__init__(name_prefix, server_name_max_length)
self.location = location
self.parameter_name = parameter_name
self.admin_user = admin_user
self.admin_password = admin_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
template = 'az sql server create -l {} -g {} -n {} -u {} -p {}'
execute(TestCli(), template.format(self.location, group, name, self.admin_user, self.admin_password))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(TestCli(), 'az sql server delete -g {} -n {} --yes --no-wait'.format(group, name))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a sql server account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class SqlServerMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
def test_sql_server_mgmt(self, resource_group_1, resource_group_2, resource_group_location):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
loc = 'westeurope'
user = admin_login
# test create sql server with minimal required parameters
server_1 = self.cmd('sql server create -g {} --name {} -l {} '
'--admin-user {} --admin-password {}'
.format(resource_group_1, server_name_1, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity', None)]).get_output_in_json()
# test list sql server should be 1
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[JMESPathCheck('length(@)', 1)])
# test update sql server
self.cmd('sql server update -g {} --name {} --admin-password {} -i'
.format(resource_group_1, server_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test update without identity parameter, validate identity still exists
# also use --id instead of -g/-n
self.cmd('sql server update --id {} --admin-password {}'
.format(server_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test create another sql server, with identity this time
self.cmd('sql server create -g {} --name {} -l {} -i '
'--admin-user {} --admin-password {}'
.format(resource_group_2, server_name_2, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test list sql server in that group should be 1
self.cmd('sql server list -g {}'.format(resource_group_2), checks=[JMESPathCheck('length(@)', 1)])
# test list sql server in the subscription should be at least 2
self.cmd('sql server list', checks=[JMESPathCheckGreaterThan('length(@)', 1)])
# test show sql server
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_1),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server show --id {}'
.format(server_1['id']),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server list-usages -g {} -n {}'
.format(resource_group_1, server_name_1),
checks=[JMESPathCheck('[0].resourceName', server_name_1)])
# test delete sql server
self.cmd('sql server delete --id {} --yes'
.format(server_1['id']), checks=NoneCheck())
self.cmd('sql server delete -g {} --name {} --yes'
.format(resource_group_2, server_name_2), checks=NoneCheck())
# test list sql server should be 0
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[NoneCheck()])
class SqlServerFirewallMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_firewall_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
firewall_rule_1 = 'rule1'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '255.255.255.255'
firewall_rule_2 = 'rule2'
start_ip_address_2 = '123.123.123.123'
end_ip_address_2 = '123.123.123.124'
# allow_all_azure_ips_rule = 'AllowAllAzureIPs'
# allow_all_azure_ips_address = '0.0.0.0'
# test sql server firewall-rule create
fw_rule_1 = self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_1, end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)]).get_output_in_json()
# test sql server firewall-rule show by group/server/name
self.cmd('sql server firewall-rule show --name {} -g {} --server {}'
.format(firewall_rule_1, rg, server),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule show by id
self.cmd('sql server firewall-rule show --id {}'
.format(fw_rule_1['id']),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule update by group/server/name
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule update by id
self.cmd('sql server firewall-rule update --id {} '
'--start-ip-address {}'
.format(fw_rule_1['id'], start_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_2)])
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--end-ip-address {}'
.format(firewall_rule_1, rg, server,
end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule create another rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_2, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule list
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 2)])
# # test sql server firewall-rule create azure ip rule
# self.cmd('sql server firewall-rule allow-all-azure-ips -g {} --server {} '
# .format(rg, server), checks=[
# JMESPathCheck('name', allow_all_azure_ips_rule),
# JMESPathCheck('resourceGroup', rg),
# JMESPathCheck('startIpAddress', allow_all_azure_ips_address),
# JMESPathCheck('endIpAddress', allow_all_azure_ips_address)])
# # test sql server firewall-rule list
# self.cmd('sql server firewall-rule list -g {} --server {}'
# .format(rg, server), checks=[JMESPathCheck('length(@)', 3)])
# test sql server firewall-rule delete
self.cmd('sql server firewall-rule delete --id {}'
.format(fw_rule_1['id']), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 1)])
self.cmd('sql server firewall-rule delete --name {} -g {} --server {}'
.format(firewall_rule_2, rg, server), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[NoneCheck()])
class SqlServerDbMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_db_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
database_name_2 = "cliautomationdb02"
database_name_3 = "cliautomationdb03"
update_service_objective = 'S1'
update_storage = '10GB'
update_storage_bytes = str(10 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'East US 2'
# test sql db commands
db1 = self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('zoneRedundant', False)]).get_output_in_json()
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql db list-usages -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[JMESPathCheck('[0].resourceName', database_name)])
# Show by group/server/name
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Show by id
self.cmd('sql db show --id {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Update by group/server/name
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name,
update_service_objective, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update by id
self.cmd('sql db update --id {} --set tags.key2=value2'
.format(db1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key2', 'value2')])
# Rename by group/server/name
db2 = self.cmd('sql db rename -g {} -s {} -n {} --new-name {}'
.format(rg, server, database_name, database_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2)]).get_output_in_json()
# Rename by id
db3 = self.cmd('sql db rename --id {} --new-name {}'
.format(db2['id'], database_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3)]).get_output_in_json()
# Delete by group/server/name
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name_3),
checks=[NoneCheck()])
# Delete by id
self.cmd('sql db delete --id {} --yes'
.format(db3['id']),
checks=[NoneCheck()])
class SqlServerDbOperationMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
def test_sql_db_operation_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'S1'
# Create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# Update DB with --no-wait
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --no-wait'
.format(resource_group, server, database_name, update_service_objective))
# List operations
ops = list(
self.cmd('sql db op list -g {} -s {} -d {}'
.format(resource_group, server, database_name, update_service_objective),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].databaseName', database_name)
])
.get_output_in_json())
# Cancel operation
self.cmd('sql db op cancel -g {} -s {} -d {} -n {}'
.format(resource_group, server, database_name, ops[0]['name']))
class SqlServerConnectionPolicyScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_server_connection_policy(self, resource_group, resource_group_location, server):
# Show
self.cmd('sql server conn-policy show -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('connectionType', 'Default')])
# Update
for type in ('Proxy', 'Default', 'Redirect'):
self.cmd('sql server conn-policy update -g {} -s {} -t {}'
.format(resource_group, server, type),
checks=[JMESPathCheck('connectionType', type)])
class AzureActiveDirectoryAdministratorScenarioTest(LiveScenarioTest):
# convert to ScenarioTest and re-record when ISSUE #6011 is fixed
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_aad_admin(self, resource_group, server):
rg = resource_group
sn = server
oid = '5e90ef3b-9b42-4777-819b-25c36961ea4d'
oid2 = 'e4d43337-d52c-4a0c-b581-09055e0359a0'
user = 'DSEngAll'
user2 = 'TestUser'
self.cmd('sql server ad-admin create -s {} -g {} -i {} -u {}'
.format(sn, rg, oid, user),
checks=[JMESPathCheck('login', user),
JMESPathCheck('sid', oid)])
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('[0].login', user)])
self.cmd('sql server ad-admin update -s {} -g {} -u {} -i {}'
.format(sn, rg, user2, oid2),
checks=[JMESPathCheck('login', user2),
JMESPathCheck('sid', oid2)])
self.cmd('sql server ad-admin delete -s {} -g {}'
.format(sn, rg))
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('login', None)])
class SqlServerDbCopyScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(parameter_name='server1', resource_group_parameter_name='resource_group_1')
@SqlServerPreparer(parameter_name='server2', resource_group_parameter_name='resource_group_2')
def test_sql_db_copy(self, resource_group_1, resource_group_2,
resource_group_location,
server1, server2):
database_name = "cliautomationdb01"
database_copy_name = "cliautomationdb02"
service_objective = 'S1'
rg = resource_group_1
loc_display = 'West US'
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server1, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
# copy database to same server (min parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {}'
.format(rg, server1, database_name, database_copy_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_copy_name)
])
# copy database to other server (max parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --dest-resource-group {} --dest-server {} '
'--service-objective {}'
.format(rg, server1, database_name, database_copy_name,
resource_group_2, server2, service_objective),
checks=[
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('requestedServiceObjectiveName', service_objective)
])
def _get_earliest_restore_date(db):
return datetime.strptime(db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _get_deleted_date(deleted_db):
return datetime.strptime(deleted_db['deletionDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _create_db_wait_for_first_backup(test, rg, server, database_name):
# create db
db = test.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Wait until earliestRestoreDate is in the past. When run live, this will take at least
# 10 minutes. Unforunately there's no way to speed this up.
earliest_restore_date = _get_earliest_restore_date(db)
while datetime.utcnow() <= earliest_restore_date:
sleep(10) # seconds
return db
class SqlServerDbRestoreScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_db_restore(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_standalone_database_name = 'cliautomationdb01restore1'
restore_pool_database_name = 'cliautomationdb01restore2'
elastic_pool = 'cliautomationpool1'
# create elastic pool
self.cmd('sql elastic-pool create -g {} -s {} -n {}'
.format(rg, server, elastic_pool))
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Restore to standalone db
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_standalone_database_name, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_standalone_database_name),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore to db into pool
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --elastic-pool {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_pool_database_name, elastic_pool),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('elasticPoolName', elastic_pool),
JMESPathCheck('status', 'Online')])
class SqlServerDbRestoreDeletedScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_db_restore_deleted(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_database_name1 = 'cliautomationdb01restore1'
restore_database_name2 = 'cliautomationdb01restore2'
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Delete database
self.cmd('sql db delete -g {} -s {} -n {} --yes'.format(rg, server, database_name))
# Wait for deleted database to become visible. When run live, this will take around
# 5-10 minutes. Unforunately there's no way to speed this up. Use timeout to ensure
# test doesn't loop forever if there's a bug.
start_time = datetime.now()
timeout = timedelta(0, 15 * 60) # 15 minutes timeout
while True:
deleted_dbs = list(self.cmd('sql db list-deleted -g {} -s {}'.format(rg, server)).get_output_in_json())
if deleted_dbs:
# Deleted db found, stop polling
break
# Deleted db not found, sleep (if running live) and then poll again.
if self.is_live:
self.assertTrue(datetime.now() < start_time + timeout, 'Deleted db not found before timeout expired.')
sleep(10) # seconds
deleted_db = deleted_dbs[0]
# Restore deleted to latest point in time
self.cmd('sql db restore -g {} -s {} -n {} --deleted-time {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, _get_deleted_date(deleted_db).isoformat(),
restore_database_name1, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name1),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore deleted to earlier point in time
self.cmd('sql db restore -g {} -s {} -n {} -t {} --deleted-time {} --dest-name {}'
.format(rg, server, database_name, _get_earliest_restore_date(deleted_db).isoformat(),
_get_deleted_date(deleted_db).isoformat(), restore_database_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name2),
JMESPathCheck('status', 'Online')])
class SqlServerDbSecurityScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer()
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer()
@StorageAccountPreparer()
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_db_security_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
database_name = "cliautomationdb01"
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# get audit policy
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update audit policy - enable
state_enabled = 'Enabled'
retention_days = 30
audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP'
audit_actions_expected = ['DATABASE_LOGOUT_GROUP',
'DATABASE_ROLE_MEMBER_CHANGE_GROUP']
self.cmd('sql db audit-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint={}'
' --retention-days={} --actions {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, audit_actions_input),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd('sql db audit-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - disable
state_disabled = 'Disabled'
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get threat detection policy
self.cmd('sql db threat-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update threat detection policy - enable
disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly'
disabled_alerts_expected = 'Sql_Injection_Vulnerability;Access_Anomaly'
email_addresses_input = 'test1@example.com test2@example.com'
email_addresses_expected = 'test1@example.com;test2@example.com'
email_account_admins = 'Enabled'
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint {}'
' --retention-days {} --email-addresses {} --disabled-alerts {}'
' --email-account-admins {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, email_addresses_input,
disabled_alerts_input, email_account_admins),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - specify storage account and resource group. use secondary key
key_2 = self._get_storage_key(storage_account_2, resource_group_2)
self.cmd('sql db threat-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key_2),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - disable
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
class SqlServerDwMgmtScenarioTest(ScenarioTest):
# pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_dw_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'DW200'
update_storage = '20TB'
update_storage_bytes = str(20 * 1024 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'West US'
# test sql db commands
dw = self.cmd('sql dw create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('edition', 'DataWarehouse'),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Sanity check that the default max size is not equal to the size that we will update to
# later. That way we know that update is actually updating the size.
self.assertNotEqual(dw['maxSizeBytes'], update_storage_bytes,
'Initial max size in bytes is equal to the value we want to update to later,'
' so we will not be able to verify that update max size is actually updating.')
# DataWarehouse is a little quirky and is considered to be both a database and its
# separate own type of thing. (Why? Because it has the same REST endpoint as regular
# database, so it must be a database. However it has only a subset of supported operations,
# so to clarify which operations are supported by dw we group them under `sql dw`.) So the
# dw shows up under both `db list` and `dw list`.
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2), # includes dw and master
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql dw list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].resourceGroup', rg)])
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# pause/resume
self.cmd('sql dw pause -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Paused')])
self.cmd('sql dw resume -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Online')])
# Update DW storage
self.cmd('sql dw update -g {} -s {} -n {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update DW service objective
self.cmd('sql dw update --id {} --service-objective {}'
.format(dw['id'], update_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Delete DW
self.cmd('sql dw delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw delete --id {} --yes'
.format(dw['id']),
checks=[NoneCheck()])
class SqlServerDnsAliasMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
def test_sql_server_dns_alias_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
alias_name = 'alias1'
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# Create server dns alias
self.cmd('sql server dns-alias create -n {} -s {} -g {}'
.format(alias_name, s1.name, s1.group),
checks=[
JMESPathCheck('name', alias_name),
JMESPathCheck('resourceGroup', s1.group)
])
# Check that alias is created on a right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the same server (to check that operation is idempotent)
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# Check if alias is pointing to the right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} --original-resource-group {} -s {} -g {}'
.format(alias_name, s2.name, s2.group, s3.name, s3.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Drop alias
self.cmd('sql server dns-alias delete -n {} -s {} -g {}'
.format(alias_name, s3.name, s3.group),
checks=[NoneCheck()])
# Verify that alias got dropped correctly
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 0)
])
class SqlServerDbReplicaMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
def test_sql_db_replica_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
database_name = "cliautomationdb01"
service_objective = 'S1'
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# create db in first server
self.cmd('sql db create -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s1.group)])
# create replica in second server with min params
# partner resouce group unspecified because s1.group == s2.group
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
.format(s1.group, s1.name, database_name,
s2.name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# create replica in third server with max params
# --elastic-pool is untested
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
' --partner-resource-group {} --service-objective {}'
.format(s1.group, s1.name, database_name,
s3.name, s3.group, service_objective),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group),
JMESPathCheck('requestedServiceObjectiveName', service_objective)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group)])
# list replica links on s1 - it should link to s2 and s3
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# list replica links on s3 - it should link only to s1
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Secondary'),
JMESPathCheck('[0].partnerRole', 'Primary')])
# Failover to s3.
self.cmd('sql db replica set-primary -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# list replica links on s3 - it should link to s1 and s2
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# Stop replication from s3 to s2 twice. Second time should be no-op.
for _ in range(2):
# Delete link
self.cmd('sql db replica delete-link -g {} -s {} -n {} --partner-resource-group {}'
' --partner-server {} --yes'
.format(s3.group, s3.name, database_name, s2.group, s2.name),
checks=[NoneCheck()])
# Verify link was deleted. s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Failover to s3 again (should be no-op, it's already primary)
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Force failover back to s1
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s1.group, s1.name, database_name),
checks=[NoneCheck()])
class SqlElasticPoolsMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolsMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "cliautomationpool01"
def verify_activities(self, activities, resource_group, server):
if isinstance(activities, list.__class__):
raise AssertionError("Actual value '{}' expected to be list class."
.format(activities))
for activity in activities:
if isinstance(activity, dict.__class__):
raise AssertionError("Actual value '{}' expected to be dict class"
.format(activities))
if activity['resourceGroup'] != resource_group:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['resourceGroup'], resource_group))
elif activity['serverName'] != server:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['serverName'], server))
elif activity['currentElasticPoolName'] != self.pool_name:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['currentElasticPoolName'], self.pool_name))
return True
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_elastic_pools_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb02"
pool_name2 = "cliautomationpool02"
edition = 'Standard'
dtu = 1200
db_dtu_min = 10
db_dtu_max = 50
storage = '1200GB'
storage_mb = 1228800
updated_dtu = 50
updated_db_dtu_min = 10
updated_db_dtu_max = 50
updated_storage = '50GB'
updated_storage_mb = 51200
db_service_objective = 'S1'
rg = resource_group
loc_display = 'East US 2'
# test sql elastic-pool commands
elastic_pool_1 = self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} '
'--storage {}'
.format(rg, server, self.pool_name, dtu,
edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)]).get_output_in_json()
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('zoneRedundant', False)])
self.cmd('sql elastic-pool show --id {}'
.format(elastic_pool_1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)])
self.cmd('sql elastic-pool list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', self.pool_name),
JMESPathCheck('[0].state', 'Ready'),
JMESPathCheck('[0].databaseDtuMin', db_dtu_min),
JMESPathCheck('[0].databaseDtuMax', db_dtu_max),
JMESPathCheck('[0].edition', edition),
JMESPathCheck('[0].storageMb', storage_mb)])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --storage {} --set tags.key1=value1'
.format(rg, server, self.pool_name,
updated_dtu, updated_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', updated_dtu),
JMESPathCheck('edition', edition),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('storageMb', updated_storage_mb),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update --id {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(elastic_pool_1['id'], dtu,
updated_db_dtu_min, updated_db_dtu_max,
storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', updated_db_dtu_min),
JMESPathCheck('databaseDtuMax', updated_db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--remove tags.key1'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('tags', {})])
# create a second pool with minimal params
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
.format(rg, server, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name2),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready')])
self.cmd('sql elastic-pool list -g {} -s {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# Create a database directly in an Azure sql elastic pool
self.cmd('sql db create -g {} --server {} --name {} '
'--elastic-pool {}'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# Move database to second pool. Specify service objective just for fun
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', pool_name2),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# Remove database from pool
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name, db_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('requestedServiceObjectiveName', db_service_objective),
JMESPathCheck('status', 'Online')])
# Move database back into pool
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# List databases in a pool
self.cmd('sql elastic-pool list-dbs -g {} -s {} -n {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# List databases in a pool - alternative command
self.cmd('sql db list -g {} -s {} --elastic-pool {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# self.cmd('sql elastic-pool db show-activity -g {} --server {} --elastic-pool {}'
# .format(rg, server, pool_name),
# checks=[
# JMESPathCheck('length(@)', 1),
# JMESPathCheck('[0].resourceGroup', rg),
# JMESPathCheck('[0].serverName', server),
# JMESPathCheck('[0].currentElasticPoolName', pool_name)])
# activities = self.cmd('sql elastic-pools db show-activity -g {} '
# '--server-name {} --elastic-pool-name {}'
# .format(rg, server, pool_name),
# checks=[JMESPathCheck('type(@)', 'array')])
# self.verify_activities(activities, resource_group)
# delete sql server database
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
# delete sql elastic pool
self.cmd('sql elastic-pool delete -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[NoneCheck()])
# delete sql elastic pool by id
self.cmd('sql elastic-pool delete --id {}'
.format(elastic_pool_1['id']),
checks=[NoneCheck()])
class SqlElasticPoolOperationMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolOperationMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "operationtestep1"
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
def test_sql_elastic_pool_operation_mgmt(self, resource_group, resource_group_location, server):
edition = 'Premium'
dtu = 125
db_dtu_min = 0
db_dtu_max = 50
storage = '50GB'
storage_mb = 51200
update_dtu = 250
update_db_dtu_min = 50
update_db_dtu_max = 250
# Create elastic pool
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(resource_group, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('edition', edition),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('storageMb', storage_mb)])
# Update elastic pool
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {}'
.format(resource_group, server, self.pool_name, update_dtu, update_db_dtu_min, update_db_dtu_max))
# List operations on the elastic pool
ops = list(self.cmd('sql elastic-pool op list -g {} --server {} --elastic-pool {}'
.format(resource_group, server, self.pool_name)).get_output_in_json())
# Cancel operation
try:
self.cmd('sql elastic-pool op cancel -g {} --server {} --elastic-pool {} --name {}'
.format(resource_group, server, self.pool_name, ops[0]['name']))
except Exception as e:
expectedmessage = "Cannot cancel management operation {} in current state.".format(ops[0]['name'])
if expectedmessage in str(e):
pass
class SqlServerCapabilityScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_sql_capabilities(self):
location = 'westus'
# New capabilities are added quite frequently and the state of each capability depends
# on your subscription. So it's not a good idea to make strict checks against exactly
# which capabilities are returned. The idea is to just check the overall structure.
db_max_size_length_jmespath = 'length([].supportedServiceLevelObjectives[].supportedMaxSizes[])'
# Get all db capabilities
self.cmd('sql db list-editions -l {}'.format(location),
checks=[
# At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheckExists("[?name == 'Premium']"),
# At least s0 and p1 service objectives exist
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"),
# Max size data is omitted
JMESPathCheck(db_max_size_length_jmespath, 0)])
# Get all db capabilities with size data
self.cmd('sql db list-editions -l {} --show-details max-size'.format(location),
checks=[
# Max size data is included
JMESPathCheckGreaterThan(db_max_size_length_jmespath, 0)])
# Search for db edition - note that it's case insensitive
self.cmd('sql db list-editions -l {} --edition standard'.format(location),
checks=[
# Standard edition exists, other editions don't
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for db service objective - note that it's case insensitive
# Checked items:
# * Standard edition exists, other editions don't
# * S0 service objective exists, others don't exist
self.cmd('sql db list-editions -l {} --edition standard --service-objective s0'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheck("length([].supportedServiceLevelObjectives[] | [?name != 'S0'])", 0)])
pool_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedMaxSizes[])'
pool_db_max_dtu_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[])'
pool_db_min_dtu_length_jmespath = ('length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[]'
'.supportedPerDatabaseMinDtus[])')
pool_db_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxSizes[])'
# Get all elastic pool capabilities
self.cmd('sql elastic-pool list-editions -l {}'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Premium']"),
JMESPathCheck(pool_max_size_length_jmespath, 0), # Optional details are omitted
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Search for elastic pool edition - note that it's case insensitive
self.cmd('sql elastic-pool list-editions -l {} --edition standard'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # Standard edition exists, other editions don't
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtu limit
self.cmd('sql elastic-pool list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan('length([].supportedElasticPoolDtus[?limit == `100`][])', 0),
JMESPathCheck('length([].supportedElasticPoolDtus[?limit != `100`][])', 0)])
# Get all db capabilities with pool max size
self.cmd('sql elastic-pool list-editions -l {} --show-details max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max size
self.cmd('sql elastic-pool list-editions -l {} --show-details db-max-size'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max dtu
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-max-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db min dtu (which is nested under per db max dtu)
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with everything
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu db-max-dtu '
'db-max-size max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
class SqlServerImportExportMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
@StorageAccountPreparer()
def test_sql_db_import_export_mgmt(self, resource_group, resource_group_location, server, storage_account):
location_long_name = 'West US'
admin_login = 'admin123'
admin_password = 'SecretPassword123'
db_name = 'cliautomationdb01'
db_name2 = 'cliautomationdb02'
db_name3 = 'cliautomationdb03'
blob = 'testbacpac.bacpac'
blob2 = 'testbacpac2.bacpac'
container = 'bacpacs'
firewall_rule_1 = 'allowAllIps'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '0.0.0.0'
# create server firewall rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_1, end_ip_address_1),
checks=[JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# create dbs
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name2),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name2),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name3),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name3),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online')])
# get storage account endpoint
storage_endpoint = self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
bacpacUri = '{}{}/{}'.format(storage_endpoint, container, blob)
bacpacUri2 = '{}{}/{}'.format(storage_endpoint, container, blob2)
# get storage account key
storageKey = self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
# Set Expiry
expiryString = '9999-12-25T00:00:00Z'
# Get sas key
sasKey = self.cmd('storage blob generate-sas --account-name {} -c {} -n {} --permissions rw --expiry {}'.format(
storage_account, container, blob2, expiryString)).get_output_in_json()
# create storage account blob container
self.cmd('storage container create -n {} --account-name {} --account-key {} '
.format(container, storage_account, storageKey),
checks=[JMESPathCheck('created', True)])
# export database to blob container using both keys
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to second database using Storage Key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name2, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name2),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to third database using SAS key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name3, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name3),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
class SqlServerConnectionStringScenarioTest(ScenarioTest):
def test_sql_db_conn_str(self):
# ADO.NET, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;')
# ADO.NET, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Password"')
# ADO.NET, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Integrated"')
# SqlCmd, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -N -l 30')
# SqlCmd, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -G -N -l 30')
# SqlCmd, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -G -N -l 30')
# JDBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>@myserver;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30')
# JDBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword')
# JDBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryIntegrated')
# PHP PDO, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo').get_output_in_json()
self.assertEqual(conn_str, '$conn = new PDO("sqlsrv:server = tcp:myserver.database.windows.net,1433; Database = mydb; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");')
# PHP PDO, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADPassword', expect_failure=True)
# PHP PDO, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADIntegrated', expect_failure=True)
# PHP, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php').get_output_in_json()
self.assertEqual(conn_str, '$connectionOptions = array("UID"=>"<username>@myserver", "PWD"=>"<password>", "Database"=>mydb, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:myserver.database.windows.net,1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);')
# PHP, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADPassword', expect_failure=True)
# PHP, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADIntegrated', expect_failure=True)
# ODBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;')
# ODBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryPassword')
# ODBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryIntegrated')
class SqlTransparentDataEncryptionScenarioTest(ScenarioTest):
def wait_for_encryption_scan(self, rg, sn, db_name):
active_scan = True
retry_attempts = 5
while active_scan:
tdeactivity = self.cmd('sql db tde list-activity -g {} -s {} -d {}'
.format(rg, sn, db_name)).get_output_in_json()
# if tdeactivity is an empty array, there is no ongoing encryption scan
active_scan = (len(tdeactivity) > 0)
time.sleep(10)
retry_attempts -= 1
if retry_attempts <= 0:
raise CliTestError("Encryption scan still ongoing: {}.".format(tdeactivity))
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tde(self, resource_group, server):
rg = resource_group
sn = server
db_name = self.create_random_name("sqltdedb", 20)
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, sn, db_name))
# validate encryption is on by default
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# disable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Disabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is disabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
# enable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Enabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is enabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tdebyok(self, resource_group, server):
resource_prefix = 'sqltdebyok'
# add identity to server
server_resp = self.cmd('sql server update -g {} -n {} -i'
.format(resource_group, server)).get_output_in_json()
server_identity = server_resp['identity']['principalId']
# create db
db_name = self.create_random_name(resource_prefix, 20)
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name))
# create vault and acl server identity
vault_name = self.create_random_name(resource_prefix, 24)
self.cmd('keyvault create -g {} -n {} --enable-soft-delete true'
.format(resource_group, vault_name))
self.cmd('keyvault set-policy -g {} -n {} --object-id {} --key-permissions wrapKey unwrapKey get list'
.format(resource_group, vault_name, server_identity))
# create key
key_name = self.create_random_name(resource_prefix, 32)
key_resp = self.cmd('keyvault key create -n {} -p software --vault-name {}'
.format(key_name, vault_name)).get_output_in_json()
kid = key_resp['key']['kid']
# add server key
server_key_resp = self.cmd('sql server key create -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault')])
server_key_name = server_key_resp.get_output_in_json()['name']
# validate show key
self.cmd('sql server key show -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('name', server_key_name)])
# validate list key (should return 2 items)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# update encryption protector to akv key
self.cmd('sql server tde-key set -g {} -s {} -t AzureKeyVault -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# validate encryption protector is akv via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# update encryption protector to service managed
self.cmd('sql server tde-key set -g {} -s {} -t ServiceManaged'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# delete server key
self.cmd('sql server key delete -g {} -s {} -k {}'
.format(resource_group, server, kid))
# wait for key to be deleted
time.sleep(10)
# validate deleted server key via list (should return 1 item)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 1)])
class SqlServerVnetMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_vnet_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
vnet_rule_1 = 'rule1'
vnet_rule_2 = 'rule2'
# Create vnet's - vnet1 and vnet2
vnetName1 = 'vnet1'
vnetName2 = 'vnet2'
subnetName = 'subnet1'
addressPrefix = '10.0.1.0/24'
endpoint = 'Microsoft.Sql'
# Vnet 1 without service endpoints to test ignore-missing-vnet-service-endpoint feature
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName1))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {}'
.format(rg, vnetName1, subnetName, addressPrefix))
vnet1 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName1, rg)).get_output_in_json()
vnet_id_1 = vnet1['id']
# Vnet 2
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName2))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {} --service-endpoints {}'
.format(rg, vnetName2, subnetName, addressPrefix, endpoint),
checks=JMESPathCheck('serviceEndpoints[0].service', 'Microsoft.Sql'))
vnet2 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName2, rg)).get_output_in_json()
vnet_id_2 = vnet2['id']
# test sql server vnet-rule create using subnet name and vnet name and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {} --vnet-name {} -i'
.format(vnet_rule_1, rg, server, subnetName, vnetName1))
# test sql server vnet-rule show rule 1
self.cmd('sql server vnet-rule show --name {} -g {} --server {}'
.format(vnet_rule_1, rg, server),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule create using subnet id
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_2, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 1 with vnet 2
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_1, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 2 with vnet 1 and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {} -i'
.format(vnet_rule_2, rg, server, vnet_id_1),
checks=[JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_1),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule list
self.cmd('sql server vnet-rule list -g {} --server {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# test sql server vnet-rule delete rule 1
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_1, rg, server),
checks=NoneCheck())
# test sql server vnet-rule delete rule 2
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_2, rg, server),
checks=NoneCheck())
class SqlSubscriptionUsagesScenarioTest(ScenarioTest):
def test_sql_subscription_usages(self):
self.cmd('sql list-usages -l westus',
checks=[JMESPathCheckGreaterThan('length(@)', 2)])
self.cmd('sql show-usage -l westus -u SubscriptionFreeDatabaseDaysLeft',
checks=[
JMESPathCheck('name', 'SubscriptionFreeDatabaseDaysLeft'),
JMESPathCheckGreaterThan('limit', 0)])
class SqlZoneResilienceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_zone_resilient_database(self, resource_group, resource_group_location, server):
database_name = "createUnzonedUpdateToZonedDb"
database_name_2 = "createZonedUpdateToUnzonedDb"
database_name_3 = "updateNoParamForUnzonedDb"
database_name_4 = "updateNoParamForZonedDb"
rg = resource_group
loc_display = "East US 2"
# Test creating database with zone resilience set to false. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant {}'
.format(rg, server, database_name, "Premium", False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with zone resilience set to true. Expect zone resilience to update to true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --zone-redundant'
.format(rg, server, database_name, 'P1'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', True)])
# Test creating database with zone resilience set to true. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --z'
.format(rg, server, database_name_2, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --z {}'
.format(rg, server, database_name_2, 'P1', False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', False)])
# Create database with no zone resilience set. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(rg, server, database_name_3, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with no zone resilience set. Expect zone resilience to stay false.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_3, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', False)])
# Create database with zone resilience set. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, database_name_4, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with no zone resilience set. Expect zone resilience to stay true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_4, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('elasticPoolName', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_zone_resilient_pool(self, resource_group, resource_group_location, server):
pool_name = "createUnzonedUpdateToZonedPool"
pool_name_2 = "createZonedUpdateToUnzonedPool"
pool_name_3 = "updateNoParamForUnzonedPool"
pool_name_4 = "updateNoParamForZonedPool"
rg = resource_group
# Test creating pool with zone resilience set to false. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --z {}'
.format(rg, server, pool_name, "Premium", False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with zone resilience set to true. Expect zone resilience to update to true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --z'
.format(rg, server, pool_name))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('zoneRedundant', True)])
# Test creating pool with zone resilience set to true. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_2, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --zone-redundant {}'
.format(rg, server, pool_name_2, False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('zoneRedundant', False)])
# Create pool with no zone resilience set. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(rg, server, pool_name_3, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with no zone resilience set. Expect zone resilience to stay false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_3, 250))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', False)])
# Create pool with zone resilience set. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_4, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with no zone resilience set. Expect zone resilience to stay true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_4, 250, True))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', True)])
|
default_app_config = "grandchallenge.evaluation.apps.EvaluationConfig"
|
class StoryPytestError(Exception):
"""Base error of all stories-pytest errors."""
pass
|
from datetime import datetime
import re
from urllib.parse import quote
from django.db import models
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import (
QUERY_TYPE_CHOICES,
QUERY_TYPE_USER,
QUERY_TYPE_LIST,
QUERY_TYPE_SEARCH,
)
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile(r"(^|\W)@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile(r"#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = '<a href="http://twitter.com/search?q=%23\\1">#\\1</a>'
replace_usernames = '\\1<a href="http://twitter.com/\\2">@\\2</a>'
class TwitterQueryException(Exception):
pass
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES, max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: (
"https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")
),
QUERY_TYPE_LIST: (
"https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value
),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json["user"]
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json["user"]["screen_name"]
tweet.full_name = tweet_json["user"]["name"]
tweet.profile_image_url = tweet_json["user"]["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, "TWITTER_STRIP_HIGH_MULTIBYTE", False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = "".join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True
)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True
)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True
)
query = models.ForeignKey("Query", on_delete=models.CASCADE, related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
|
"""carfinder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from finder import views
from finder import forms
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import include, url, handler404
# from django.views.generic.simple import direct_to_template
from django.views.generic.base import TemplateView
index = views.Index()
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.Index.as_view(), name='index'),
path('upload/', forms.upload, name='upload'),
path('upload/predictImage', index.predict_image, name="predictImage"),
path('list/',index.list_of_cars, name='listOfCars')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# cmd to run the hadler python manage.py collectstatic
handler404 = 'finder.views.error_404'
|
from rest_framework.views import APIView
from knox.auth import TokenAuthentication
from rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny
from rest_framework.response import Response
from jaseci.utils.utils import logger
from jaseci.api.public_api import public_api
from jaseci.element.element import element
from jaseci_serv.base.orm_hook import orm_hook
from jaseci_serv.base.models import JaseciObject, GlobalVars
from time import time
class JResponse(Response):
def __init__(self, master, *args, **kwargs):
super().__init__(*args, **kwargs)
self.master = master
for i in self.master._h.save_obj_list:
self.master._h.commit_obj_to_redis(i)
self.master._h.skip_redis_update = True
def close(self):
super(JResponse, self).close()
# Commit db changes after response to user
self.master._h.commit()
class AbstractJacAPIView(APIView):
"""
The builder set of Jaseci APIs
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def post(self, request):
"""
General post function that parses api signature to load parms
SuperSmart Post - can read signatures of master and process
bodies accordingly
"""
self.proc_request(request)
api_result = self.caller.general_interface_to_api(
self.cmd, type(self).__name__)
self.log_request_time()
return self.issue_response(api_result)
def log_request_time(self):
"""Api call preamble"""
TY = '\033[33m'
TG = '\033[32m'
EC = '\033[m' # noqa
tot_time = time()-self.start_time
save_count = 0
if(isinstance(self.caller, element)):
save_count = len(self.caller._h.save_obj_list)
logger.info(str(
f'API call to {TG}{type(self).__name__}{EC}'
f' completed in {TY}{tot_time:.3f} seconds{EC}'
f' saving {TY}{save_count}{EC} objects.'))
def proc_request(self, request):
"""Parse request to field set"""
pl_peek = str(dict(request.data))[:256]
logger.info(str(
f'Incoming call to {type(self).__name__} with {pl_peek}'))
self.start_time = time()
self.cmd = request.data
self.set_caller(request)
self.res = "Not valid interaction!"
def set_caller(self, request):
"""Assigns the calling api interface obj"""
self.caller = request.user.get_master()
def issue_response(self, api_result):
"""Issue response from call"""
# self.caller._h.commit()
# return Response(api_result)
# for i in self.caller._h.save_obj_list:
# self.caller._h.commit_obj_to_redis(i)
return JResponse(self.caller, api_result)
class AbstractAdminJacAPIView(AbstractJacAPIView):
"""
The abstract base for Jaseci Admin APIs
"""
permission_classes = (IsAuthenticated, IsAdminUser)
class AbstractPublicJacAPIView(AbstractJacAPIView):
"""
The abstract base for Jaseci Admin APIs
"""
permission_classes = (AllowAny,)
def set_caller(self, request):
"""Assigns the calling api interface obj"""
self.caller = public_api(orm_hook(
objects=JaseciObject.objects,
globs=GlobalVars.objects
))
def issue_response(self, api_result):
"""Issue response from call"""
# If committer set, results should be saved back
if(self.caller.committer):
return JResponse(self.caller.committer, api_result)
else:
return Response(api_result)
|
from twisted.internet import reactor
from twisted.web import client, error, http
from twisted.web.resource import Resource
from hookah import queue
import urllib
import sys, os
from urllib import unquote
import cgi
import StringIO
import mimetools
import mimetypes
# TODO: Make these configurable
RETRIES = 3
DELAY_MULTIPLIER = 5
def decode_multipart_formdata(body_io, cgi_environ):
body_io.seek(0,0)
fs = cgi.FieldStorage(fp=body_io, environ=cgi_environ, keep_blank_values=True)
fields = {}
files = {}
for field in fs.list:
if field.filename:
files.setdefault(field.name, []).append((field.name, field.filename, field.value))
else:
fields.setdefault(field.name, []).append(field.value)
return fields, files
def encode_multipart_formdata(fields, files):
BOUNDARY = mimetools.choose_boundary()
CRLF = '\r\n'
L = []
for key in fields:
for value in fields[key]:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for key in files:
for key, filename, value in files[key]:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def cgi_environ_factory(request):
if request.prepath:
scriptName = '/' + '/'.join(request.prepath)
else:
scriptName = ''
if request.postpath:
pathInfo = '/' + '/'.join(request.postpath)
else:
pathInfo = ''
parts = request.uri.split('?', 1)
if len(parts) == 1:
queryString = ''
else:
queryString = unquote(parts[1])
environ = {
'REQUEST_METHOD': request.method,
'REMOTE_ADDR': request.getClientIP(),
'SCRIPT_NAME': scriptName,
'PATH_INFO': pathInfo,
'QUERY_STRING': queryString,
'CONTENT_TYPE': request.getHeader('content-type') or '',
'CONTENT_LENGTH': request.getHeader('content-length') or '',
'SERVER_NAME': request.getRequestHostname(),
'SERVER_PORT': str(request.getHost().port),
'SERVER_PROTOCOL': request.clientproto}
for name, values in request.requestHeaders.getAllRawHeaders():
name = 'HTTP_' + name.upper().replace('-', '_')
# It might be preferable for http.HTTPChannel to clear out
# newlines.
environ[name] = ','.join([
v.replace('\n', ' ') for v in values])
return environ
def post_and_retry(url, data, retry=0, content_type='application/x-www-form-urlencoded'):
if type(data) is dict:
print "Posting [%s] to %s with %s" % (retry, url, data)
data = urllib.urlencode(data)
else:
print "Posting [%s] to %s with %s bytes of postdata" % (retry, url, len(data))
headers = {
'Content-Type': content_type,
'Content-Length': str(len(data)),
}
client.getPage(url, method='POST' if len(data) else 'GET', headers=headers, postdata=data if len(data) else None).addCallbacks( \
if_success, lambda reason: if_fail(reason, url, data, retry, content_type))
def if_success(page): pass
def if_fail(reason, url, data, retry, content_type):
if reason.getErrorMessage()[0:3] in ['301', '302', '303']:
return # Not really a fail
print reason.getErrorMessage()
if retry < RETRIES:
retry += 1
reactor.callLater(retry * DELAY_MULTIPLIER, post_and_retry, url, data, retry, content_type)
class DispatchResource(Resource):
isLeaf = True
def render(self, request):
path = '/'.join(request.prepath[1:])
content_type = request.getHeader('content-type')
if content_type.startswith('application/x-www-form-urlencoded'):
content_type = 'urlencoded'
fields, files = request.args, {}
elif content_type.startswith('multipart/form-data'):
content_type = 'multipart'
fields, files = decode_multipart_formdata(request.content, cgi_environ_factory(request))
topic_param = fields.get('_topic', [None])[0]
if topic_param:
del fields['_topic']
if content_type == 'multipart':
out_type, data = encode_multipart_formdata(fields, files)
else:
out_type, data = 'application/x-www-form-urlencoded', urllib.urlencode(fields, doseq=True)
queue.put('dispatch', {
'topic' : topic_param,
'data' : data,
'content_type' : out_type,
})
request.setResponseCode(http.ACCEPTED)
return "202 Scheduled"
url = fields.get('_url', [None])[0]
if url:
del fields['_url']
if content_type == 'multipart':
out_type, data = encode_multipart_formdata(fields, files)
else:
out_type, data = 'application/x-www-form-urlencoded', urllib.urlencode(fields, doseq=True)
post_and_retry(url, data, content_type=out_type)
request.setResponseCode(http.ACCEPTED)
return "202 Scheduled"
else:
request.setResponseCode(http.BAD_REQUEST)
return "400 No destination URL"
|
# Generated by Django 3.2.7 on 2021-12-02 21:01
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('Author', '0001_initial'),
('Posts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='inbox',
name='iPosts',
field=models.ManyToManyField(blank=True, default=list, to='Posts.Post'),
),
migrations.AddField(
model_name='followers',
name='items',
field=models.ManyToManyField(blank=True, default=list, related_name='items', to=settings.AUTH_USER_MODEL),
),
]
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from datetime import datetime
from DateTime.DateTime import DateTime
import six
MAX32 = int(2 ** 31 - 1)
def safe_callable(ob):
# Works with ExtensionClasses and Acquisition.
try:
ob.__class__
try:
return bool(ob.__call__)
except AttributeError:
return isinstance(ob, six.class_types)
except AttributeError:
return callable(ob)
def datetime_to_minutes(value, precision=1,
max_value=MAX32, min_value=-MAX32):
if value is None:
return value
if isinstance(value, (str, datetime)):
value = DateTime(value)
if isinstance(value, DateTime):
value = value.millis() / 1000 / 60 # flatten to minutes
# flatten to precision
if precision > 1:
value = value - (value % precision)
value = int(value)
if value > max_value or value < min_value:
# value must be integer fitting in the range (default 32bit)
raise OverflowError(
'{0} is not within the range of dates allowed.'.format(value))
return value
|
# Given a list of pts text files, build a complete dataset from it.
import glob
import os
import PIL.Image
import cv2
import numpy as np
from time import time
from argparse import ArgumentParser
from scipy.spatial import cKDTree
import tensorflow as tf
import SaddlePoints
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if os.path.isdir(path):
pass
else:
raise
# Given chessboard corners, get all 7x7 = 49 internal x-corner positions.
def getXcorners(corners):
# Get Xcorners for image
ideal_corners = np.array([[0,1],[1,1],[1,0],[0,0]],dtype=np.float32)
M = cv2.getPerspectiveTransform(ideal_corners, corners) # From ideal to real.
# 7x7 internal grid of 49 x-corners/
xx,yy = np.meshgrid(np.arange(7, dtype=np.float32), np.arange(7, dtype=np.float32))
all_ideal_grid_pts = np.vstack([xx.flatten(), yy.flatten()]).T
all_ideal_grid_pts = (all_ideal_grid_pts + 1) / 8.0
chess_xcorners = cv2.perspectiveTransform(np.expand_dims(all_ideal_grid_pts,0), M)[0,:,:]
return chess_xcorners
def getPointsNearPoints(ptsA, ptsB, MIN_DIST_PX=3):
# Returns a mask for points in A that are close by MIN_DIST_PX to points in B
min_dists, min_dist_idx = cKDTree(ptsB).query(ptsA, 1)
mask = min_dists < MIN_DIST_PX
return mask
# Load image from path
def loadImage(img_filepath):
print ("Processing %s" % (img_filepath))
img = PIL.Image.open(img_filepath)
if (img.size[0] > 640):
img = img.resize((640, 480), PIL.Image.BICUBIC)
gray = np.array(img.convert('L'))
rgb = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return rgb, gray
def getTiles(pts, img_gray, WINSIZE=10):
# NOTE : Assumes no point is within WINSIZE of an edge!
# Points Nx2, columns should be x and y, not r and c.
# WINSIZE = the number of pixels out from the point that a tile should be.
# Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)
img_shape = np.array([img_gray.shape[1], img_gray.shape[0]])
tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1], dtype=img_gray.dtype)
for i, pt in enumerate(np.round(pts).astype(np.int64)):
tiles[i,:,:] = img_gray[pt[1]-WINSIZE:pt[1]+WINSIZE+1,
pt[0]-WINSIZE:pt[0]+WINSIZE+1]
return tiles
def getTilesColor(pts, img, WINSIZE=10):
# NOTE : Assumes no point is within WINSIZE of an edge!
# Points Nx2, columns should be x and y, not r and c.
# WINSIZE = the number of pixels out from the point that a tile should be.
# Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)
img_shape = np.array([img.shape[1], img.shape[0]])
tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1, 3], dtype=img.dtype)
for i, pt in enumerate(np.round(pts).astype(np.int64)):
tiles[i,:,:,:] = img[pt[1]-WINSIZE:pt[1]+WINSIZE+1,
pt[0]-WINSIZE:pt[0]+WINSIZE+1, :]
return tiles
# View image with chessboard lines overlaid.
def addOverlay(idx, img, corners, good_xcorners, bad_pts):
for pt in np.round(bad_pts).astype(np.int64):
cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,0,255), -1)
for pt in np.round(good_xcorners).astype(np.int64):
cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,255,0), -1)
cv2.polylines(img,
[np.round(corners).astype(np.int32)],
isClosed=True, thickness=2, color=(255,0,255))
cv2.putText(img,
'Frame % 4d' % (idx),
(5,15), cv2.FONT_HERSHEY_PLAIN, 1.0,(255,255,255),0)
def visualizeTiles(tiles):
# Assumes no more than 49 tiles, only plots the first 49
N = len(tiles)
# assert N <= 49
assert tiles.shape[1] == tiles.shape[2] # square tiles
side = tiles.shape[1]
cols = 7#int(np.ceil(np.sqrt(N)))
rows = 7#int(np.ceil(N/(cols)))+1
tile_img = np.zeros([rows*side, cols*side, 3], dtype=tiles.dtype)
for i in range(min(N,49)):
r, c = side*(int(i/cols)), side*(i%cols)
tile_img[r:r+side, c:c+side,:] = tiles[i,:,:,:]
return tile_img
# Converting the values into features
# _int64 is used for numeric values
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# _bytes is used for string/char values
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main(args):
for pointfile in args.pointfiles:
with open(pointfile, 'r') as f:
lines = f.readlines()
video_filepath = lines[0]
images_path = os.path.dirname(pointfile)
# Writing to TFrecord
video_filename = os.path.basename(video_filepath)[:-5]
folder_path = "%s/winsize_%s_color" % (args.tfrecords_path, args.winsize)
mkdir_p(folder_path)
tfrecord_path = "%s/%s_ws%d.tfrecords" % (folder_path, video_filename, args.winsize)
with tf.python_io.TFRecordWriter(tfrecord_path) as writer:
for line in lines[1:]:
tA = time()
parts = line.split(',')
idx = int(parts[0])
# if (idx < 260):
# continue
corners = np.array(parts[1:], dtype=np.float32).reshape([4,2])
xcorners = getXcorners(corners)
filename = "%s/frame_%03d.jpg" % (images_path, idx)
img, gray = loadImage(filename)
# Saddle points
spts, gx, gy = SaddlePoints.getFinalSaddlePoints(gray, WINSIZE=args.winsize)
good_spt_mask = getPointsNearPoints(spts, xcorners)
good_xcorners = spts[good_spt_mask]
bad_spts = spts[~good_spt_mask]
# Only keep the same # of bad points as good
# Shuffle bad points so we get a good smattering.
N = len(good_xcorners)
np.random.shuffle(bad_spts)
bad_spts = bad_spts[:N]
# good_xcorners, bad_xcorners, bad_spts, spts, keep_mask = getXcornersNearSaddlePts(gray, xcorners)
tiles = getTilesColor(good_xcorners, img, WINSIZE=args.winsize)
bad_tiles = getTilesColor(bad_spts, img, WINSIZE=args.winsize)
# Write tiles to tf-records
for tile in tiles:
feature = { 'label': _int64_feature(1),
'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
for tile in bad_tiles:
feature = { 'label': _int64_feature(0),
'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if args.viztiles:
tile_img = visualizeTiles(tiles)
bad_tile_img = visualizeTiles(bad_tiles)
print('\t Took %.1f ms.' % ((time() - tA)*1000))
if args.vizoverlay:
overlay_img = img.copy()
addOverlay(idx, overlay_img, corners, good_xcorners, bad_spts)
cv2.imshow('frame',overlay_img)
if args.viztiles:
cv2.imshow('tiles', tile_img)
cv2.imshow('bad_tiles', bad_tile_img)
if (args.vizoverlay or args.viztiles):
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("pointfiles", nargs='+',
help="All pts.txt points files containing filename and chessboard coordinates.")
parser.add_argument("-savetf",
action='store_true', help="Whether to save tfrecords")
parser.add_argument("-viztiles",
action='store_true', help="Whether to visualize tiles or not")
parser.add_argument("-vizoverlay",
action='store_true', help="Whether to visualize overlay")
parser.add_argument("--tfrecords_path", default='datasets/tfrecords',
help="Folder to store tfrecord output")
parser.add_argument("-ws", "--winsize", dest="winsize", default=10, type=int,
help="Half window size (full kernel = 2*winsize + 1)")
args = parser.parse_args()
print(args)
main(args)
|
# Question 1
# This function converts miles to kilometers (km).
# Complete the function to return the result of the conversion
# Call the function to convert the trip distance from miles to kilometers
# Fill in the blank to print the result of the conversion
# Calculate the round-trip in kilometers by doubling the result, and fill in the blank to print the result
# 1) Complete the function to return the result of the conversion
def convert_distance(miles):
km = miles * 1.6 # approximately 1.6 km in 1 mile
return km
my_trip_miles = 55
# 2) Convert my_trip_miles to kilometers by calling the function above
my_trip_km = convert_distance(my_trip_miles)
# 3) Fill in the blank to print the result of the conversion
print("The distance in kilometers is " + str(my_trip_km))
# 4) Calculate the round-trip in kilometers by doubling the result,
# and fill in the blank to print the result
print("The round-trip in kilometers is " + str(my_trip_km * 2))
|
#!/usr/bin/env python
import tyrell.spec as Stuff
from tyrell.interpreter import PostOrderInterpreter
from tyrell.enumerator import SmtEnumerator, RelaxedRandomEnumerator
from tyrell.decider import Example, ExampleConstraintDecider, SimpleSpiceDecider, ExampleConstraintPruningDecider
from tyrell.synthesizer import Synthesizer
from tyrell.logger import get_logger
from skidl import *
from skidl.pyspice import *
logger = get_logger('tyrell')
class Circuitsv():
def __init__(self, op):
self.circuit = Circuit()
self.vin = Net('VI')
self.vout = Net('VO')
self.ground = Net('GND')
self.sanity = Net('Sanity')
self.pow = []
self.op = op
self.circuit += self.vin, self.vout, self.ground, self.sanity
def reinit(self):
self.circuit.reset()
self.circuit = Circuit()
self.vin = Net('VI')
self.vout = Net('VO')
self.ground = Net('GND')
self.sanity = Net('Sanity')
self.pow = []
self.circuit += self.vin, self.vout, self.ground, self.sanity
class ToyInterpreter(PostOrderInterpreter):
def __init__(self, circuit):
self.circuit = circuit
def eval_get_ground(self, node, args):
return self.circuit.ground
def eval_get_Resistance(self, node, args):
return int(args[0])
def eval_get_outnet(self, node, args):
return self.circuit.vout
def eval_get_Supply(self, node, args):
if len(self.circuit.pow) == 0:
with self.circuit.circuit:
vdc = V(dc_value=5 @ u_V)
self.circuit.pow.append(vdc)
self.circuit.pow[0]['n']+=self.circuit.ground
return self.circuit.pow[0]
def eval_startCon(self, node, args):
if self.circuit.op == "mult":
return None
logger.info("startCon")
self.circuit.vin += args[0][1]
return self.circuit
def eval_startConPart(self, node, args):
logger.info("startConPart")
return self.circuit
def eval_toTransist(self, node, args):
logger.info("toTransist")
return args[0]
def eval_toResist(self, node, args):
return args[0]
def eval_Rout(self, node, args):
logger.info("Rout")
with self.circuit.circuit:
r = R(value = args[1])
r[2]+=args[0]
return r
def eval_Tout(self, node, args):
logger.info("Tout")
with self.circuit.circuit:
q = BJT(model='2n2222a')
q['c']+=args[0]
q['b']+=args[1]
q['e']+=args[2]
return q
def eval_NO(self, node, args):
with self.circuit.circuit:
self.circuit.vout+=args[0][1]
return self.circuit.vout
def eval_NI(self, node, args):
with self.circuit.circuit:
self.circuit.vin+=args[0][1]
return self.circuit.vin
def eval_Nout(self, node, args):
with self.circuit.circuit:
n = Net()
n += args[0][1]
return n
def eval_GR(self, node, args):
logger.info("GR")
self.circuit.ground += args[0][1]
return self.circuit.ground
def eval_Rpow(self, node, args):
with self.circuit.circuit:
r = R(value = args[1])
r[2]+=args[0]['p']
return r
#Abstract Interpreter
def apply_vin(self, val):
return val
def apply_vout(self, val):
return val
def apply_ground(self, val):
return val
def main():
logger.info('Parsing Spec...')
# TBD: parse the DSL definition file and store it to `spec`
spec = Stuff.parse_file('example/divmult3.tyrell')
logger.info('Parsing succeeded')
circ = Circuitsv("mult")
logger.info('Building synthesizer...')
synthesizer = Synthesizer(
enumerator=RelaxedRandomEnumerator(spec, max_depth=6, min_depth=4, seed=None),
decider=SimpleSpiceDecider(
spec=spec, # TBD: provide the spec here
interpreter=ToyInterpreter(circ),
examples=[Example(input=[circ],output=["mult", 16.5])] # TBD: provide the example here
)
)
logger.info('Synthesizing programs...')
prog = synthesizer.synthesize()
if prog is not None:
logger.info('Solution found: {}'.format(prog))
else:
logger.info('Solution not found!')
if __name__ == '__main__':
logger.setLevel('DEBUG')
main()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cf_managementgroups(cli_ctx, **_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_managementgroups.managementgroups import ManagementGroupsAPI
return get_mgmt_service_client(
cli_ctx,
ManagementGroupsAPI,
subscription_bound=False)
def management_groups_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_groups
def management_group_subscriptions_client_factory(cli_ctx, _):
return cf_managementgroups(cli_ctx).management_group_subscriptions
|
import scipy.stats as stats
import numpy as np
import warnings
from ecdfgof import adtest, kstest
warnings.filterwarnings("ignore")
_long = [
("alpha", stats.alpha),
("anglit", stats.anglit),
("arcsine", stats.arcsine),
("argus", stats.argus),
("beta", stats.beta),
("betaprime", stats.betaprime),
("bradford", stats.bradford),
("burr", stats.burr),
("burr12", stats.burr12),
("cauchy", stats.cauchy),
("chi", stats.chi),
("chi2", stats.chi2),
("cosine", stats.cosine),
("crystalball", stats.crystalball),
("dgamma", stats.dgamma),
("dweibull", stats.dweibull),
# ("erlang", stats.erlang),
("expon", stats.expon),
("exponnorm", stats.exponnorm),
("exponweib", stats.exponweib),
("exponpow", stats.exponpow),
("f", stats.f),
("fatiguelife", stats.fatiguelife),
("fisk", stats.fisk),
("foldcauchy", stats.foldcauchy),
("foldnorm", stats.foldnorm),
# ("frechet_r", stats.frechet_r),
# ("frechet_l", stats.frechet_l),
("genlogistic", stats.genlogistic),
("gennorm", stats.gennorm),
("genpareto", stats.genpareto),
("genexpon", stats.genexpon),
("genextreme", stats.genextreme),
("gausshyper", stats.gausshyper),
("gamma", stats.gamma),
("gengamma", stats.gengamma),
("genhalflogistic", stats.genhalflogistic),
("gilbrat", stats.gilbrat),
("gompertz", stats.gompertz),
("gumbel_r", stats.gumbel_r),
("gumbel_l", stats.gumbel_l),
("halfcauchy", stats.halfcauchy),
("halflogistic", stats.halflogistic),
("halfnorm", stats.halfnorm),
("halfgennorm", stats.halfgennorm),
("hypsecant", stats.hypsecant),
("invgamma", stats.invgamma),
("invgauss", stats.invgauss),
("invweibull", stats.invweibull),
("johnsonsb", stats.johnsonsb),
("johnsonsu", stats.johnsonsu),
("kappa4", stats.kappa4),
("kappa3", stats.kappa3),
("ksone", stats.ksone),
("kstwobign", stats.kstwobign),
("laplace", stats.laplace),
("levy", stats.levy),
("levy_l", stats.levy_l),
("levy_stable", stats.levy_stable),
("logistic", stats.logistic),
("loggamma", stats.loggamma),
("loglaplace", stats.loglaplace),
("lognorm", stats.lognorm),
("lomax", stats.lomax),
("maxwell", stats.maxwell),
("mielke", stats.mielke),
("moyal", stats.moyal),
("nakagami", stats.nakagami),
("ncx2", stats.ncx2),
("ncf", stats.ncf),
("nct", stats.nct),
("norm", stats.norm),
("norminvgauss", stats.norminvgauss),
("pareto", stats.pareto),
("pearson3", stats.pearson3),
("powerlaw", stats.powerlaw),
("powerlognorm", stats.powerlognorm),
("powernorm", stats.powernorm),
# ("rdist", stats.rdist),
# ("reciprocal", stats.reciprocal),
("rayleigh", stats.rayleigh),
("rice", stats.rice),
("recipinvgauss", stats.recipinvgauss),
("semicircular", stats.semicircular),
("skewnorm", stats.skewnorm),
("t", stats.t),
("trapz", stats.trapz),
("triang", stats.triang),
("truncexpon", stats.truncexpon),
# ("truncnorm", stats.truncnorm),
("tukeylambda", stats.tukeylambda),
("uniform", stats.uniform),
# ("vonmises", stats.vonmises),
("vonmises_line", stats.vonmises_line),
("wald", stats.wald),
("weibull_min", stats.weibull_min),
("weibull_max", stats.weibull_max),
# ("wrapcauchy", stats.wrapcauchy),
]
_short = [
("alpha", stats.alpha),
("beta", stats.beta),
("cauchy", stats.cauchy),
("chi2", stats.chi2),
# ("cosine", stats.cosine),
("expon", stats.expon),
("exponnorm", stats.exponnorm),
("f", stats.f),
("gamma", stats.gamma),
("laplace", stats.laplace),
("levy", stats.levy),
("levy_stable", stats.levy_stable),
("logistic", stats.logistic),
("loggamma", stats.loggamma),
("loglaplace", stats.loglaplace),
("lognorm", stats.lognorm),
("norm", stats.norm),
("pareto", stats.pareto),
("powerlaw", stats.powerlaw),
("t", stats.t),
("triang", stats.triang),
("uniform", stats.uniform),
("weibull_min", stats.weibull_min),
("weibull_max", stats.weibull_max),
]
def fit(data, scipydist, name=None):
# fit distribution using maximum likelihood
params = scipydist.fit(data)
# create a "frozen" distribution object
dist = scipydist(*params)
# calculate log likelihood function and info criteria
loglike = dist.logpdf(data).sum()
bic = np.log(len(data)) * len(params) - 2.0 * loglike # Schwarz
aic = 2.0 * len(params) - 2.0 * loglike # Akaike
# p-values for GOF tests
ad_pval = adtest(data, dist)[1] # Anderson-Darling
ks_pval = kstest(data, dist)[1] # Kolmogorov-Smirnov
return {"bic": bic, "aic": aic, "ad_pval": ad_pval,
"ks_pval": ks_pval, "dist": dist, "name": name}
def _fit_all(data, dist_list):
results = list(map(lambda x: fit(data, x[1], x[0]), dist_list))
return sorted(results, key=lambda r: r["bic"]) # lowest BIC to highest
def _fstr(value):
return ("%.3f" % value).rjust(8)
def _result_line(r, header=False):
if header is True:
return " distribution, BIC, AIC, KS p-val, AD p-val\n"
else:
return ("%s, %s, %s, %s, %s\n" %
(r["name"].rjust(15), _fstr(r["bic"]), _fstr(r["aic"]),
_fstr(r["ks_pval"]), _fstr(r["ad_pval"])))
def compare(data, long=False):
dist_list = _long if long is True else _short
results = _fit_all(data, dist_list)
lines = [_result_line(None, header=True)] + list(map(_result_line, results))
return "".join(lines)
|
import pytest
from brownie import interface, RewardsManager, Contract
from utils.voting import create_vote
from utils.config import (lido_dao_voting_address,
lido_dao_agent_address,
balancer_deployed_manager,
lido_dao_token_manager_address,
ldo_token_address)
from utils.evm_script import encode_call_script
def test_erc_20_recover_via_voting(ldo_holder, rewards_manager, helpers, accounts, dao_voting, ldo_token, stranger):
# manager_contract = Contract.from_abi('RewardsManager', balancer_deployed_manager, RewardsManager.abi)
agent_contract = interface.Agent(lido_dao_agent_address)
ldo_token.transfer(rewards_manager, 10**18, {"from": ldo_holder})
assert ldo_token.balanceOf(rewards_manager) == 10**18
encoded_recover_calldata = rewards_manager.recover_erc20.encode_input(ldo_token_address, 10**18, stranger)
recover_script = encode_call_script([(rewards_manager.address, encoded_recover_calldata)])
forwrded_script = encode_call_script([(lido_dao_agent_address, agent_contract.forward.encode_input(recover_script))])
(vote_id, _) = create_vote(
voting=interface.Voting(lido_dao_voting_address),
token_manager=interface.TokenManager(lido_dao_token_manager_address),
vote_desc='',
evm_script=forwrded_script,
tx_params={"from": ldo_holder})
helpers.execute_vote(vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting)
assert ldo_token.balanceOf(rewards_manager) == 0
assert ldo_token.balanceOf(stranger) == 10**18
|
import RPi.GPIO as GPIO
import time
HIN = 8
LIN = 10
freq = 500
class Motor:
def __init__(self, HIN=HIN, LIN=LIN, freq=freq):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(HIN, GPIO.OUT)
GPIO.setup(LIN, GPIO.OUT)
self.high = GPIO.PWM(HIN, freq)
self.low = GPIO.PWM(LIN, freq)
self.low.start(0)
def setSpeed(self, speed):
if speed < 0:
self.high.start(0)
elif speed > 100:
self.high.start(100)
else:
self.high.start(speed)
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Get hardware info from Bpod
"""
from pybpodapi.protocol import Bpod
from confapp import conf
my_bpod = Bpod()
my_bpod.close()
print("Target Bpod firmware version: ", conf.TARGET_BPOD_FIRMWARE_VERSION)
print("Firmware version (read from device): ", my_bpod.hardware.firmware_version)
print("Machine type version (read from device): ", my_bpod.hardware.machine_type)
|
import boto3
class DynamoDB(object):
def __init__(self, table_name):
self.resource = self._resource()
self.client = self._client()
self.table = self.resource.Table(table_name)
self.table_name = table_name
def _resource(self):
return boto3.resource('dynamodb')
def _client(self):
return boto3.client('dynamodb')
def put_item(self, item):
return self.table.put_item(Item=item)
def get_item(self, key, value):
return self.table.get_item(Key={key: value})
def get_scan_paginator(self, attributes, page_size=100):
paginator = self.client.get_paginator('scan')
for page in paginator.paginate(
TableName=self.table_name,
AttributesToGet=[attributes],
PaginationConfig={'PageSize': page_size}):
yield page
|
CCSettingsList = {'Simulink.SolverCC' : # Solver
{
'StartTime' : '0.0',
'StopTime' : 'inf',
'SolverMode' : 'SingleTasking',
'Solver' : 'FixedStepDiscrete',
'SolverName' : 'FixedStepDiscrete',
#Defined by Solver and SolverName 'SolverType' : 'Fixed-Step',
'AutoInsertRateTranBlk' : 'off'
},
'Simulink.DataIOCC' : # DataIO
{ 'SaveFormat' : 'StructureWithTime'},
'Simulink.OptimizationCC' : # Optimization
{
'BlockReduction' : 'off',
'BooleanDataType' : 'on',
'ConditionallyExecuteInputs' : 'off',
'UseSpecifiedMinMax' : 'off',
'ExpressionFolding' : 'off',
'RollThreshold' : 5,
'ZeroExternalMemoryAtStartup' : 'on',
'ZeroInternalMemoryAtStartup' : 'on',
'NoFixptDivByZeroProtection' : 'on',
'EfficientFloat2IntCast' : 'off',
'EfficientMapNaN2IntZero' : 'off',
'LifeSpan' : 'inf',
'InitFltsAndDblsToZero' : 'off'
},
'Simulink.DebuggingCC' : #Diag_Signal_Data
{
'RTPrefix' : 'error',
'ArrayBoundsChecking' : 'none',
'SignalInfNanChecking' : 'error',
'SignalRangeChecking' : 'error',
'CheckMatrixSingularityMsg' : 'error',
'IntegerOverflowMsg' : 'error',
'UnderSpecifiedDataTypeMsg' : 'error',
'UniqueDataStoreMsg' : 'error',
# 'Diag_Data_Stores' :
'ReadBeforeWriteMsg' : 'EnableAllAsError',
'WriteAfterWriteMsg' : 'EnableAllAsError',
'WriteAfterReadMsg' : 'EnableAllAsError',
'MultiTaskDSMMsg' : 'error',
# 'Diag_Solver' :
'AlgebraicLoopMsg' : 'error',
'ArtificialAlgebraicLoopMsg' : 'error',
'BlockPriorityViolationMsg' : 'error',
'SolverPrmCheckMsg' : 'error',
'UnknownTsInhSupMsg' : 'error',
'StateNameClashWarn' : 'warning',
# 'Diag_Saving' :
'SaveWithDisabledLinksMsg' : 'warning',
'SaveWithParameterizedLinksMsg' : 'warning',
# 'Diag_Init' :
'CheckSSInitialOutputMsg' : 'on',
'CheckExecutionContextPreStartOutputMsg' : 'on',
'CheckExecutionContextRuntimeOutputMsg' : 'on',
# 'Diag' :
'SignalResolutionControl' : 'UseLocalSettings',
# 'Diag_Sample_Time' :
'InheritedTsInSrcMsg' : 'warning',
'DiscreteInheritContinuousMsg' : 'error',
'MultiTaskCondExecSysMsg' : 'error',
'MultiTaskRateTransMsg' : 'error',
'SingleTaskRateTransMsg' : 'error',
'TasksWithSamePriorityMsg' : 'error',
'SigSpecEnsureSampleTimeMsg' : 'error',
# 'Diag_Data_Type' :
'Int32ToFloatConvMsg' : 'warning',
'UnnecessaryDatatypeConvMsg' : 'warning',
'VectorMatrixConversionMsg' : 'error',
# 'Diag_Parameter' :
'ParameterDowncastMsg' : 'error',
'ParameterOverflowMsg' : 'error',
'ParameterUnderflowMsg' : 'error',
'ParameterPrecisionLossMsg' : 'warning',
'ParameterTunabilityLossMsg' : 'error',
# 'Diag_Function_Call' :
'InvalidFcnCallConnMsg' : 'error',
'FcnCallInpInsideContextMsg' : 'Enable All',
# 'Diag_Sig_Connectivity' :
'SignalLabelMismatchMsg' : 'warning',
'UnconnectedInputMsg' : 'error',
'UnconnectedOutputMsg' : 'error',
'UnconnectedLineMsg' : 'error',
# 'Diag_Compatibility' :
'SFcnCompatibilityMsg' : 'error',
# 'Diag_Bus_Connectivity' :
'BusObjectLabelMismatch' : 'error',
'RootOutportRequireBusObject' : 'error',
'StrictBusMsg' : 'ErrorOnBusTreatedAsVector',
# 'Diag_Debug' :
'AssertControl' : 'DisableAll',
# 'Diag_Model_Referencing' :
'ModelReferenceIOMsg' : 'error',
'ModelReferenceVersionMismatchMessage' : 'none',
'ModelReferenceIOMismatchMessage' : 'error',
'ModelReferenceCSMismatchMessage' : 'warning',
'ModelReferenceDataLoggingMessage' : 'error'
},
'Simulink.HardwareCC' : #HW_Implementation
{
'ProdShiftRightIntArith' : 'on',
'ProdHWDeviceType' : 'Freescale->MPC82xx'
},
'Simulink.ModelReferenceCC' : #Model_Referencing
{
'UpdateModelReferenceTargets' : 'IfOutOfDate',
'ModelReferenceNumInstancesAllowed' : 'Single',
'ModelReferencePassRootInputsByReference' : 'on',
'ModelReferenceMinAlgLoopOccurrences' : 'off'
},
'Simulink.RTWCC' : # RTW
{
'IncludeHyperlinkInReport' : 'on',
'GenerateTraceInfo' : 'on',
'GenerateTraceReport' : 'on',
'GenerateTraceReportSl' : 'on',
'GenerateTraceReportSf' : 'on',
'GenerateTraceReportEml' : 'on',
'ObjectivePriorities' : ['Traceability','Safety precaution'],
'CheckMdlBeforeBuild' : 'Warning'
},
'Simulink.CodeAppCC' : # RTW_Code_Appearance
{
'ForceParamTrailComments' : 'on',
'GenerateComments' : 'on',
'MaxIdLength' : 31,
'ShowEliminatedStatement' : 'on',
'SimulinkDataObjDesc' : 'on',
'SFDataObjDesc' : 'on',
'MangleLength' : 4,
'CustomSymbolStrGlobalVar' : '$R$N$M',
'CustomSymbolStrType' : '$N$R$M',
'CustomSymbolStrField' : '$N$M',
'CustomSymbolStrFcn' : '$R$N$M$F',
'CustomSymbolStrFcnArg' : 'rt$I$N$M',
'CustomSymbolStrBlkIO' : 'rtb_$N$M',
'CustomSymbolStrTmpVar' : '$N$M',
'CustomSymbolStrMacro' : '$R$N$M_D',
'CustomCommentsFcn' : 'taxibot_comments_mptfun.m',
'DefineNamingRule' : 'None',
'ParamNamingRule' : 'None',
'SignalNamingRule' : 'None',
'InsertBlockDesc' : 'on',
'SimulinkBlockComments' : 'on',
'EnableCustomComments' : 'on',
'InlinedPrmAccess' : 'Literals',
'ReqsInCode' : 'on'
},
'Simulink.ERTTargetCC' : # RTW_ERT_Target
{
'TargetFunctionLibrary' : 'C89/90 (ANSI)',
'ERTMultiwordLength' : 256,
'GenerateSampleERTMain' : 'off',
'IncludeMdlTerminateFcn' : 'off',
'GeneratePreprocessorConditionals' : 'Enable all',
'CombineOutputUpdateFcns' : 'on',
'SuppressErrorStatus' : 'on',
'SupportAbsoluteTime' : 'off',
'MatFileLogging' : 'off',
'SupportNonFinite' : 'off',
'SupportComplex' : 'off',
'SupportContinuousTime' : 'off',
'SupportNonInlinedSFcns' : 'off',
'SupportVariableSizeSignals' : 'off',
'ParenthesesLevel' : 'Maximum',
'PortableWordSizes' : 'off',
'GenerateASAP2' : 'on',
'InlinedParameterPlacement' : 'Hierarchical',
'ERTSrcFileBannerTemplate' : 'taxibot_code_c_template.cgt',
'ERTHdrFileBannerTemplate' : 'taxibot_code_h_template.cgt',
'ERTDataSrcFileTemplate' : 'taxibot_data_c_template.cgt',
'ERTDataHdrFileTemplate' : 'taxibot_data_h_template.cgt',
'GRTInterface' : 'off',
'PreserveExpressionOrder' : 'on',
'PreserveIfCondition' : 'on',
'ConvertIfToSwitch' : 'off',
'EnableUserReplacementTypes' : 'on',
'UtilityFuncGeneration' : 'Shared location'
}}
DataStoreCC = { # Checks Rule HISL_0013 A
'HISL_0013 A': {'UniqueDataStoreMsg' : 'error',
'ReadBeforeWriteMsg' : 'EnableAllAsError',
'WriteAfterWriteMsg' : 'EnableAllAsError',
'WriteAfterReadMsg' : 'EnableAllAsError',
'MultiTaskDSMMsg' : 'error'},
# Checks Rule HISL_0005 C
'HISL_0005 C': {'CheckMatrixSingularityMsg' : 'error'}
}
AllowedOtherBlocks = {
'BusCreator' : [],
'BusSelector' : [],
'Concatenate' : [],
'Mux' : [],
'Demux' : [],
'From' : [],
'Goto' : [],
'GotoTagVisibility' : [],
'Merge' : [],
'Inport' : [],
'Outport' : [],
'Terminator' : [],
'Constant' : ['Value'],
'If' : [],
'SwitchCase' : [],
'RateTransition' : [],
'DataTypeConversion' : [],
'Lookup' : ['InputValues', 'Table'],
'Lookup2D' : ['RowIndex', 'ColumnIndex', 'Table'],
'Chart' : [],
'UnitDelay' : ['X0'],
'DiscreteIntegrator' : ['InitialCondition'],
'DiscreteTransferFcn' : ['Numerator', 'Denominator'],
'Sum' : [],
'Gain' : ['Gain'],
'Product' : [],
'Abs' : [],
'Math' : [],
'MinMax' : [],
'Trigonometry' : [],
'Sqrt' : [],
'Logic' : [],
'RelationalOperator' : [],
'Relay' : ['OnSwitchValue', 'OffSwitchValue', 'OnOutputValue', 'OffOutputValue'],
'Saturate' : ['UpperLimit', 'LowerLimit'],
'Switch' : ['Threshold'],
'ActionPort' : [],
'TriggerPort' : [],
'MultiPortSwitch' : [],
'Selector' : []
}
AllowedSubsystemBlocks = {
'ActionType': ['then', 'else', 'case', 'default','elseif'],
'TreatAsAtomicUnit': ['on'],
'RTWSystemCode': ['Auto', 'Reusable function', 'Function'],
'MaskType': ['CMBlock', 'Compare To Constant', 'DocBlock',
'Conversion', 'ReqId','Stateflow']
}
AllowedModelReferenceBlocks = {
'MaskType': ['Asymmetrical Debounce', 'Falling Edge', 'First order filter', 'Hysteresis',
'Latch', 'Periodic enable', 'Rate Limiter', 'Rising edge',
'Running average', 'SR Latch', 'Symmetrical Debounce']
}
AllowedReferenceBlocks = {
'SourceType' : ['Asymmetrical Debounce', 'CMBlock', 'Conversion',
'DocBlock', 'Falling Edge', 'Hysteresis',
'Latch', 'Lookup Table Dynamic', 'Rate Limiter',
'ReqId', 'Rising edge', 'Saturation Dynamic',
'SR Latch', 'SubSystem', 'Symmetrical Debounce'
'Function-Call Generator', 'Compare To Constant',
'First order filter','Periodic enable',
'Running average','Symmetrical Debounce'],
}
AttributesFormatString = {
'Lookup' : '<input=%<inputvalues>>\\\\n<output=%<outputvalues>>',
'UnitDelay' : '<initial=%<x0>>\\\\n<tsample=%<sampleTime>>',
'Switch' : '<threshold=%<threshold>>\\\\n<criteria=%<Criteria>>',
'DiscreteIntegrator' : '<initial=%<initialcondition>>\\\\n<tsample=%<sampleTime>>\\\\n<limits=%<UpperSaturationLimit>/%<LowerSaturationLimit>(%<LimitOutput>)>',
'DiscreteZeroPole' : '<tsample=%<sampleTime>>\\\\n<gain=%<gain>>',
'Outport' : '<tsample=%<SampleTime>>',
'Inport' : '<tsample=%<SampleTime>>',
'Lookup2D' : '<row=%<x>>\\\\n<column=%<y>>\\\\n<table=%<t>>',
'Saturate' : '<limits=%<upperlimit>\\%<lowerlimit>>',
'Backlash' : '<initial=%<initialoutput>,width=%<backlashwidth>>',
'DeadZone' : '<zone=%<lowervalue>/%<uppervalue>>',
'Relay' : '<low=(%<offswitchvalue>,%<offoutputvalue>)>\\\\n<high=(%<onswitchvalue>,%<onoutputvalue>)>',
'Merge' : '<initial=%<initialoutput>>',
'DiscreteTransferFcn' : '<tsample=%<sampleTime>>',
'Quantizer' : '<interval=%<quantizationinterval>>'
}
ReusableLibList = ['Asymmetrical Debounce', 'Falling Edge', 'First order filter',
'Hysteresis', 'Latch', 'Periodic enable', 'Rate Limiter', 'Rising edge',
'Running average', 'SR Latch', 'Symmetrical Debounce']
RuleDetails = {
'MISRA AC SLSF 002' : 'Data type conversion block used for signal data type conversion.',
'MISRA AC SLSF 003' : 'Fixed step discrete solver used for functional algorithm',
'MISRA AC SLSF 004' : 'Simulink diagnostic configuration.',
'MISRA AC SLSF 005 B' : 'Function and duplicate inport blocks must not be used',
'MISRA AC SLSF 005 C' : 'Data store memory usage must not be used to exchange data across subsystem.',
'MISRA AC SLSF 006 A' : 'Block parameters evaluation at runtime must not contain Expressions, Data type conversions and Selection of rows or columns.',
'MISRA AC SLSF 006 B' : 'Block parameters intended to be configured or calibrated must be entered as named constants.',
'MISRA AC SLSF 006 D' : 'named constants must be defined in an external file',
'MISRA AC SLSF 006 E' : 'Masked sub-systems must not be used to pass parameters',
'MISRA AC SLSF 007 A' : 'define explicitly the initialization value.',
'MISRA AC SLSF 008 A' : 'Saturation property should not be selected if configured to saturate on overflow',
'MISRA AC SLSF 008 B' : 'Configure rounding behaviour to zero',
'MISRA AC SLSF 009 B' : 'Block priority should be not used for block execution order',
'MISRA AC SLSF 009 C' : 'Execution order specified by function calls or data flows.',
'MISRA AC SLSF 009 D' : 'Sample time to be inherited.',
'MISRA AC SLSF 011 A' : 'Not more than one level of nested control flow.',
'MISRA AC SLSF 011 B' : 'Default case, a must in switch case',
'MISRA AC SLSF 012 A' : 'the control input must be a Boolean type.',
'MISRA AC SLSF 013 A' : 'at least two switched inputs',
'MISRA AC SLSF 013 C' : 'Control input must be greater than or equal to 1 and less than switched inputs.',
'MISRA AC SLSF 014 A' : 'S-functions must be only under certain conditions.',
'MISRA AC SLSF 015 A' : 'Vector signal:created either by feeding individual named scalar signals into a mux-block, or by using a vector constant, or by a Stateflow block.',
'MISRA AC SLSF 015 B' : 'Matrix signal:created either by feeding individual vector signals into a matrix concatenation block, or a matrix constant, or by a Stateflow block.',
'MISRA AC SLSF 015 C' : 'contain signals with common functionality, data type, dimensions and units.',
'MISRA AC SLSF 016 A' : 'created by using a bus creator block.',
'MISRA AC SLSF 016 B' : 'must be named.',
'MISRA AC SLSF 016 C' : 'must not contain unnamed signals.',
'MISRA AC SLSF 016 D' : 'must only be operated on by bus capable Simulink blocks.',
'MISRA AC SLSF 016 E' : 'be split up using a bus-selector block and not a demux-block only.',
'MISRA AC SLSF 017 A' : 'no unconnected blocks.',
'MISRA AC SLSF 017 B' : 'no unconnected signal lines or busses.',
'MISRA AC SLSF 018 A' : 'Global and scoped blocks must not be used.',
'MISRA AC SLSF 018 B' : 'Tag must match corresponding signal or bus label.',
'MISRA AC SLSF 018 C' : 'tags must be unique.',
'MISRA AC SLSF 018 D' : '"goto" block must have one or more matching "from" block.',
'MISRA AC SLSF 018 E' : ' "from" block must have exactly one matching "goto" block.',
'MISRA AC SLSF 027 A' : 'that require a label must be labelled directly at source.',
'MISRA AC SLSF 027 B' : 'Propagated labels must be used to redisplay the name.',
'MISRA AC SLSF 027 C' : 'passing through an inport must be labelled.',
'MISRA AC SLSF 027 D' : 'passing through an outport must be labelled.',
'MISRA AC SLSF 027 E' : 'originate from inside a re-useable subsystem must not labelled.',
'MISRA AC SLSF 027 G' : 'connected to Bus Creator, Goto, Mux, Subsystem, Stateflow Chart must be labelled.',
'MISRA AC SLSF 027 I' : 'Signal labels or propagated labels must be applied to busses with some conditions.',
'MISRA AC SLSF 027 J' : 'non-propagated labels must be unique.',
'MISRA AC SLSF 032 A' : ' port names must still be visible.',
'MISRA AC SLSF 034 A' : '"C-like bitwise operators" (& and |) must be enabled for all charts.',
'MISRA AC SLSF 034 C' : '"use strong data typing with Simulink I/O" is selected.',
'MISRA AC SLSF 034 D' : '"Execute (enter) Chart at Initialization" must be disabled.',
'MISRA AC SLSF 035 A' : 'The choice of state-chart or flow-chart is driven by the nature of the behaviour being modelled.',
'MISRA AC SLSF 035 B' : 'Truth tables must not be used.',
'MISRA AC SLSF 036 A' : 'Bus inputs are not permitted.',
'MISRA AC SLSF 036 C' : 'name of a Stateflow input/output must be the same as the corresponding signal label.',
'MISRA AC SLSF 037 A' : 'Must be defined at the chart level or below in the object hierarchy and not at the model level.',
'MISRA AC SLSF 037 B' : 'local data item name must not be used in different scopes within one state machine.',
'MISRA AC SLSF 037 G' : 'no unused data items.',
'MISRA AC SLSF 037 H' : 'must not be set to "Inherit: Same as Simulink".',
'MISRA AC SLSF 038 C' : 'C library functions must not be used in a state machine. ',
'MISRA AC SLSF 039 A' : ' a state must have either zero or more than one sub-state.',
'MISRA AC SLSF 040 B' : 'must not be used as a grouping mechanism',
'MISRA AC SLSF 040 D' : 'the order of the critical states must be documented in a textbox at the top level of the state machine, wherever critical.',
'MISRA AC SLSF 041 A' : 'must contain text only.',
'MISRA AC SLSF 042 A' : 'Super state containing exclusive states must have one default transition.',
'MISRA AC SLSF 042 B' : 'no more than one default transition',
'MISRA AC SLSF 042 C' : 'Top level of the state machine must not contain more than one default transitions.',
'MISRA AC SLSF 042 D' : 'inside a state chart must have ungaurded path to a state.',
'MISRA AC SLSF 042 E' : 'must not cross state boundaries',
'MISRA AC SLSF 043 A' : 'condition action and transition action must not be used in the same machine.',
'MISRA AC SLSF 043 D' : 'semi-colon at the end of each action.',
'MISRA AC SLSF 043 F' : 'no more than one internal transition from any state',
'MISRA AC SLSF 043 I' : 'one conditional transition must begin at every junction.',
'MISRA AC SLSF 043 J' : 'temporal logic must not be used.',
'MISRA AC SLSF 044 A' : 'during state actions must not be used.',
'MISRA AC SLSF 044 C' : 'In flow charts state actions must not be used.',
'MISRA AC SLSF 046 A' : 'History junction must not be used.',
'MISRA AC SLSF 047 A' : 'local , directed, broadcasted stateflow events, including all implicit eventsmust not be used.',
'MISRA AC SLSF 047 B' : 'output sateflows must be used only as outputs and not tested internally on transition conditions.',
'MISRA AC SLSF 048 A' : 'Matlab functions must not be called within state machine.',
'MISRA AC SLSF 048 B' : 'embedded MATLAB block must not be used.',
'MISRA AC SLSF 048 C' : 'c code within custom code tab needs to be just pre-processor directives.',
'MISRA AC SLSF 048 D' : 'pointers to be used only to call external functions.',
'MISRA AC SLSF 048 E' : 'custom code type needs to be converted to Mathworks type.',
'MISRA AC SLSF 048 F' : 'custom code must adhere to MISRA C',
'MISRA AC SLSF 048 G' : 'Numbers other than "0" and "1" must not appear on state machine.',
'MISRA AC SLSF 052 A' : 'must be unique within state machine.',
'MISRA AC SLSF 052 B' : 'same name as data should not be given in the chart.',
'MISRA AC SLSF 053 A' : 'transitions must not be drawn one upon the other.',
'MISRA AC SLSF 053 J' : 'must contain only one terminating junction.',
'MISRA AC SLSF 054 A' : 'above horizontal transitions and to the right of vertical transitions.',
'MISRA AC SLSF 055 A' : 'The order should be entry:, during: and exit: only.',
'HISL_0002 B' : 'Protect the second input of rem function from going to zero.',
'HISL_0002 A' : 'Protect the input of reciprocal function from going to zero.',
'HISL_0003 C' : 'Protect the input from going negative.',
'HISL_0004 A' : 'Protect the input from going negative.',
'HISL_0004 B' : 'Protect the input from equalling zero.',
'HISL_0005 A' : 'InElement-wise(.*) mode, protect all divisor inputs from going to zero.',
'HISL_0005 B' : 'In Matrix(*) mode, protect all divisor inputs from becoming singular input matrices.',
'HISL_0005 C' : 'Set the model configuration parameter Diagnostics > Data Validity > Signals > Division by singular matrix to error if Matrix(*) mode selected.',
'HISL_0008 B' : 'use a block that has a constant value for Iteration limit source, when source is external.',
'HISL_0010 A' : 'In the block parameter dialog box, select Show else condition.',
'HISL_0010 B' : 'Connect the outports of the If block to If Action Subsystem blocks.',
'HISL_0011 B' : 'Connect the outports of the Switch Case block to an Action Subsystem block.',
'HISL_0011 C' : 'Use an integer data type for the inputs to Switch Case blocks.',
'HISL_0012 B' : 'avoid using sample time-dependent blocks if the subsystem is called asynchronously',
'HISL_0013 A' : 'Configuration Parameters dialog box',
'HISL_0015 B' : 'Specify execution of the conditionally executed subsystems such that in all cases only one subsystem executes during a time step.',
'HISL_0015 C' : 'Clear the Merge block parameter Allow unequal port widths.',
'HISL_0021 A' : 'Use a consistent vector indexing method for all blocks. ',
'HISL_0022 A' : 'for index signals use integer or enum type.',
'HISL_0022 B' : 'type should cover the range of index.',
'HISL_0016 A' : 'Avoid comparisons using the == or ~= operator on floating-point data types.',
'HISL_0017 A' : 'Set the block Output data type parameter to Boolean.',
'HISL_0018 A' : 'Set the block Output data type parameter to Boolean.',
'HISL_0019 A' : 'Avoid signed integer data types as input to the block.',
'HISL_0019 B' : 'Choose an output data type that represents zero exactly.',
'HISF_0003 A' : 'Avoid signed integer data types as operands to the bitwise operations.',
'HISF_0010 A' : 'Avoid using these transitions.',
'HISF_0013 A' : 'Avoid creating transitions that cross from one parallel state to another.',
'HISF_0014 A' : 'Avoid transition paths that go into and out of a state without ending on a substate.',
'RP_0008' : 'Important Mask parameters of basic block should be displayed in their attribute format string.',
'RP_0012' : 'All signals entering and leaving a merge block should have matching name.',
'RP_0018' : 'input should not be boolean signals',
'RP_0021' : 'Width of signal inputs must be same.',
'RP_0028' : 'All events external to Stateflow should be a function call event.',
'RP_0036' : 'Transition from states must not depend on the implicit clockwise rule.',
'RP_0037' : 'Not permitted',
'RP_0046' : 'Not permitted',
'RP_0051' : 'Data types of signal inputs must be same.',
'RP_0054' : 'Allowed set of blocks are specified.',
'RP_0055' : 'Neither condition actions or transition actions should be used in transition between two states.',
'RP_0056' : 'Default shape and size should be used',
'RP_0057' : 'Name must be placed below',
'RP_0058' : 'must be name identical to corresponiding signal or bus name',
'RP_0059' : 'Shall be present at root level to detail revision history.',
'RP_0060' : 'Shall be present at root level to feature description.',
'RP_0061' : 'Look up method "Interpolation - Extrapolation" must not be used.',
'RP_0062' : 'All outputs from a feature must be displayed',
'RP_0063' : 'Global parmeters shall not be defined via Model Parameter Configuration Method.',
'RP_0064' : 'All signals and busses propagating from Blocks must be labelled with propagated signals.'
}
RuleCheckerInput = {
#TODO : Make it block type rather than property. (See rule in the spreadsheet)
'MISRA AC SLSF 005 B' : {'ResultType' : 'NotExist'
},
'MISRA AC SLSF 005 C' : {'Property' : 'DataStoreMemory',
'Model' : 'SIMULINK_BLOCK'},
'MISRA AC SLSF 006 A' : {'srchKeys' : {'BlockType':['Constant','DiscreteTransferFcn','DiscreteIntegrator','Gain','Lookup2D','Lookup','Relay','Saturate','Switch','UnitDelay','Reference'],'Name':'','SourceType':'Compare To Constant'},
'RuleInfo' : ['MANUAL CHECK RULE:check the Block Parameter value in Block:','that should not contain Expressions,Data Type Conversions,Selection of Rows and Columns.'],
'matchType' : 'Dynamic'
},
'MISRA AC SLSF 007 A' : {'PropChkData' : {'X0': '[]'},
'PropChkData1' : {'InitialOutput': '[]'},
'PropChkData2' : {'InitialCondition': '[]'},
'PropChkData3' : {'InitialStates': '[]'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 008 A' : {'PropChkData' : {'SaturateOnIntegerOverflow': 'off'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 008 B' : {'PropChkData' : {'RndMeth': 'Zero'},
'UniqueKey' : ['BlockType', 'Name'],
'ExcludeBlockLst' : ['Rounding']
},
'MISRA AC SLSF 009 B' : {'Property' : 'Priority',
'Model' : 'SIMULINK_BLOCK'},
'MISRA AC SLSF 009 D' : {'PropChkData1' : {'SampleTime': '-1'},
'UniqueKey' : ['BlockType', 'Name'],
'ExcludeBlockLst' : ['RateTransition', 'UnitDelay',
'DiscreteIntegrator', 'DiscreteTransferFcn',
'TriggerPort', 'Outport', 'Inport'],
'PropChkData2' : {'SystemSampleTime': '-1'},
'ListType' : 'Block',
'BlockType1' : 'SubSystem',
'BlockType2' : 'Reference',
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 011 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#'},
'DstInput' : {'BlockType' : 'If',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'BlockType',
'CheckValue' : 'If',
'CheckExp' : 'NOT EQUAL'}
},
'MISRA AC SLSF 011 B' : {'PropChkData' : {'ShowDefaultCase': 'on'},
'UniqueKey' : ['BlockType', 'Name']
},
'MISRA AC SLSF 012' : {
'UniqueKey' : ['BlockType', 'Name','Threshold'],
'PropChkData' : {
'SourceProp' : 'Criteria',
},
'ResultMatchType' : 'Match'
},
'MISRA AC SLSF 013 A' : {'ListType' : 'Block',
'BlockType' : 'MultiPortSwitch',
'PropChkData' : {'Inputs': 1},
'UniqueKey' : ['BlockType', 'Name'],
'ResultMatchType' : 'Greater'
},
'MISRA AC SLSF 013 C' : {'srchKeys' : {'BlockType':'MultiPortSwitch','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the control input of MultiPortSwitch Block in:','that value should be greater than or equal to one and not exceed the number of switched inputs.'],
'matchType' : 'blockExist'
},
'MISRA AC SLSF 016 A' : {'matchType' :'Match'
},
'MISRA AC SLSF 016 B' : {'matchType' :'Exist'
},
'MISRA AC SLSF 016 C' : {'matchType' :'NameExist',
'UniqueKey' :{'BlockType':'BusCreator'}
},
'MISRA AC SLSF 016 E' : {'matchType' :'NotExist'
},
'MISRA AC SLSF 017 A' : {'ListType' : ['Block','Line',],
'AllowedBlock' :[['Inport','From','Ground','Constant'], #only output Blocks
['Goto','Outport','Terminator'], #only input Blocks
['BusCreator','BusSelector','Mux','Demux','Merge','If','SwitchCase',
'Concatenate','Reference','Sum','Product','MinMax','Trigonometry',
'Logic','RelationalOperator','Saturate','DiscreteTransferFcn',
'TriggerPort','Selector','Math','MultiPortSwitch'
], # 2D ports,which may vary.
['Lookup','Sqrt','Abs','Gain','UnitDelay','Relay','RateTransition','DataTypeConversion'],#2D vector,fixied size.
{'Lookup2D':[2,1],
'Switch':[3,1]
},
['SubSystem'],
['DiscreteIntegrator']
]
},
'MISRA AC SLSF 017 B' : {'ListType' : 'Line',
'PropChkData' : {'SrcBlock': '',
'DstBlock':'',
},
'ResultMatchType' : 'Any'
},
'MISRA AC SLSF 018 A' : {'PropData' : {'BlockType': 'Goto'},
'CheckListData' : {'TagVisibility': 'local'},
'ListType' : 'Block',
'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 018 B' : {'PropData' : {'BlockType': 'Goto'},
'ListType' : ['Block','Line', 'Port'],
'UniqueKey' : ['BlockType', 'Name', 'PropagatedSignals'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 018 C' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'Goto',
'SourceProp' : 'GotoTag'}
},
'MISRA AC SLSF 018 D' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'Goto',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'From',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Exist'
},
'MISRA AC SLSF 018 E' : {'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {'SourceBlockType': 'From',
'SourceProp' : 'GotoTag',
'DestBlockType' : 'Goto',
'DestProp' : 'GotoTag'
},
'ResultMatchType' : 'Unique'
},
'MISRA AC SLSF 027 A' : {'ListType' : 'SrcBlock',
'AllowedBlock' :[['Inport','From','Ground','Constant',
'Lookup','Sqrt','Abs','Gain','UnitDelay','Relay','RateTransition','DataTypeConversion',
'Lookup2D','Switch'
],
['BusCreator','BusSelector','Mux','Demux','Merge','If','SwitchCase',
'Concatenate','Reference','Sum','Product','MinMax','Trigonometry',
'Logic','RelationalOperator','Saturate','DiscreteTransferFcn',
'TriggerPort','Selector','Math','MultiPortSwitch'
], # 2D ports,which may vary.
['DiscreteIntegrator']
]
},
'MISRA AC SLSF 027 C' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'Inport',
'SourceProp':'SrcBlock'},
'ResultMatchType' : 'Inport'
},
'MISRA AC SLSF 027 D' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'Outport',
'SourceProp':'DstBlock'},
'ResultMatchType' : 'Outport'
},
'MISRA AC SLSF 027 E' : {'srchKeys' : ['BlockType', 'SourceType','Name', 'MaskType'],
'PropChkData' : ['SrcBlock', 'Name']
},
'MISRA AC SLSF 027 G' : {
'matchType' :'NameExist',
'UniqueKey' :{'BlockType':'BusCreator'},
'UniqueKey2' : ['BlockType'],
'PropChkData' : {'SourceProp' : 'Name',
'BlockProp' : 'DstBlock'
},
'ResultMatchType' : 'Exist2',
'AllowedBlock' :['Mux','Goto','SubSystem']
},
'MISRA AC SLSF 027 I' : { 'ListType' : ['Line'],
'PropChkData' : {'BlockType':'BusCreator',
'SourceProp':'SrcBlock'},
'PropChkData1' : {'BlockType':'BusCreator',
'SourceProp':'DstBlock'},
'PropChkData2' : {'BlockType':'BusSelector',
'SourceProp':'SrcBlock'},
'PropChkData3' : {'BlockType':'BusSelector',
'SourceProp':'DstBlock'},
'ResultMatchType' : 'Exist'
},
'MISRA AC SLSF 027 J' : {'PropChkData' : {'SourceBlockType': 'Line',
'SourceProp' : 'Name'}
},
'MISRA AC SLSF 034 A' : {'ListType' : 'chart',
'PropChkData' : {'actionLanguage': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'MISRA AC SLSF 034 C' : {'ListType' : 'chart',
'PropChkData' : {'disableImplicitCasting': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'MISRA AC SLSF 034 D' : {'ListType' : 'chart',
'PropChkData' : {'executeAtInitialization': 0},
'ResultMatchType' : 'Exact'
},
'MISRA AC SLSF 035 B' : {'Property' : 'truthTable',
'Model' : 'STATEFLOW'
},
'MISRA AC SLSF 036 A' : {'srchKeys' :{'LineSrchKeys':['SrcBlock','Name'],
'BlckSrchKeys':['OutDataTypeStr','Name'],
'chartSrchKeys':['Name','Ports','MaskType','MaskDescription']
}
},
'MISRA AC SLSF 036 C' : {'srchKeys' :{'BlckSrchKeys':['BlockType','Name','Port'],
'chartSrchKeys':['Name','Ports','MaskType','MaskDescription']
}
},
'MISRA AC SLSF 037 G' : {'PropChkData' : {'SourceBlockType': 'data',
'SourceProp' : 'name',
'DestBlockType' : 'state',
'DestProp' : 'labelString',
'DestProp1' : 'labelString'
},
},
'MISRA AC SLSF 037 H' : {'ListType' : 'data',
'PropChkData' : {'dataType': 'Inherit: Same as Simulink'},
'ResultMatchType' : 'Opposite'
},
'MISRA AC SLSF 039 A' : {'ResultType' : 'Exist'
},
'MISRA AC SLSF 041 A' : {'ListType' : 'state',
'PropChkData' : {'type': 'GROUP_STATE'},
'ResultMatchType' : 'Text'
},
'MISRA AC SLSF 042 A' : {
'resultType' :'Exist'
},
'MISRA AC SLSF 042 B' : {
'resultType' :'Single'
},
'MISRA AC SLSF 042 C' : {
'resultType' :'DefaultAtTop'
},
'MISRA AC SLSF 042 D' : {
'resultType' :'Unguarded_Exist'
},
'MISRA AC SLSF 042 E' : {
'resultType' :'DefaultTx_Exist'
},
'MISRA AC SLSF 043 D' : {
'ChkData' : ';'
},
'MISRA AC SLSF 043 A' : {
'srchKeys' : ['labelString','chart']
},
'MISRA AC SLSF 043 I' : {
'resultType' :'Unguarded_Exist'
},
'MISRA AC SLSF 043 J' : {
'ChkData' : ['after', 'before', 'at', 'every', 'temporalCount']
},
'MISRA AC SLSF 044 A' : {'ListType' : 'state',
'PropChkData' : {'labelString': ['during:', 'du:']},
'ResultMatchType' : 'Contains'
},
'MISRA AC SLSF 044 C' : {
'ChkData' : ';'
},
'MISRA AC SLSF 046 A' : {'ListType' : 'junction',
'PropChkData' : {'type': 'HISTORY_JUNCTION'},
'ResultMatchType' : 'Opposite'
},
'MISRA AC SLSF 048 A' : {'Property' : 'MATLABFcn',
'Model' : 'SIMULINK_BLOCK'
},
'MISRA AC SLSF 048 B' : {'ResultType' : 'Exist',
},
'MISRA AC SLSF 048 C' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:C Code with in the custom code tab must be limited to preprocessor statements'],
},
'MISRA AC SLSF 048 D' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:Pointers must not be used except when they required to call an external function'],
},
'MISRA AC SLSF 048 E' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:Custom code variables must be restricted to fixied width word size datatypes 1)signed 8,16,32 integers(int8_T,int16_T,int32_T) 2)unsigned 8,16,32 integers(uint8_T,uint16_T,uint32_T) 3)32 and 64 bit floating point number(real32_T,real64_T) 4)Bolean(boolean_T)'],
},
'MISRA AC SLSF 048 F' : {'ResultType' : 'Exist',
'RuleInfo' :['MANUAL CHECK RULE:LDRA Tool checks the MISRA C standards for used custom code,so check the LDRA tool reports'],
},
'MISRA AC SLSF 048 G' : {'ListType' : 'state',
'ListType1' : 'transition',
'PropChkData' : {'labelString': ['0', '1']},
'ResultMatchType' : 'Otherthan',
'ListFoundCheck' : 'PASS',
'PropFoundCheck' : 'FALSE'
},
'MISRA AC SLSF 052 A' : {'PropChkData' : {'SourceBlockType': 'state',
'SourceProp' : 'labelString'}
},
'MISRA AC SLSF 052 B' : {'PropChkData' : {'SourceBlockType': 'data',
'SourceProp' : 'name',
'DestBlockType' : 'state',
'DestProp' : 'labelString'
},
'CheckType' : 'Unique'
},
'MISRA AC SLSF 053 A' : {
'resultType' :'NotExist'
},
'HISL_0002 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutMin' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Math',
'Operator': 'reciprocal',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckExp' : 'MANUAL'}
},
'HISL_0002 B' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['rem'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the second input of the rem Block in:','If it is zero,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0003 C' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutMin' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Sqrt',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutMin',
'CheckValue' : 0,
'CheckExp' : 'GREATER/EQUAL'}
},
'HISL_0004 A' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['log','log10'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the input of the logarithm Block in:','If it is negative,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0004 B' : {'srchKeys' : {'BlockType' : 'Math',
'Operator' : ['log','log10'],
'Name' : ''
},
'RuleInfo' :['MANUAL CHECK RULE:check the input of the logarithm Block in:','If it is zero,then this rule will fail'],
'matchType' :'MathExist'
},
'HISL_0005 A' : {'srchKeys' : {'BlockType':'Product','Multiplication':'','Name':'','Inputs':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input of divisor port in Product Block in:','If it is zero,then this rule will fail'],
'matchType' : 'ProductExist'
},
'HISL_0005 B' : {'srchKeys' :{'BlockType':'Product','Multiplication':'','Name':'','Inputs':''},
'RuleInfo' :['MANUAL CHECK RULE:check the input signal of the divisor port in Product Block in:','If it is singular input matrices,then this rule will fail'],
'matchType' :'ProductExist'
},
'HISL_0010 A' : {'PropChkData' : {'ShowElse': 'on'},
'UniqueKey' : ['BlockType', 'Name','ElseIfExpressions'],
'ListType' : 'Block',
'BlockType' : 'If',
'ResultMatchType' : 'Exact'
},
'HISL_0010 B' : {'PropChkData' : {'DstPort': 'ifaction'},
'blockType' : 'IfExist',
'UniqueKey' : ['BlockType', 'Name','Ports','ElseIfExpressions']
},
'HISL_0011 B' : {'PropChkData' : {'DstPort': 'ifaction'},
'blockType' : 'SwitchCaseExist',
'UniqueKey' : ['BlockType', 'Name','Ports']
},
'HISL_0011 C' : {'srchKeys' : {'BlockType':'SwitchCase','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input of SwitchCase Block in:','If it is not a integer datatype,then this rule will fail'],
'matchType' : 'blockExist'
},
'HISL_0015 B' : {'srchKeys' : {'BlockType':'Merge','Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:If two or more inputs of Merge Block in:',' are coming from conditionally excuted subSystems, then such inputs must have mutual exclusion between the conditionally executed subsystems feeding a Merge block'],
'matchType' : 'blockExist'
},
'HISL_0015 C' : {'PropChkData' : {'AllowUnequalInputPortWidths': 'off'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType' : 'Merge',
'ResultMatchType' : 'Exact'
},
'HISL_0016 A' : {'srchKeys' : {'BlockType':'RelationalOperator','Operator':['==','~='],'Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input signals of RelationalOperator Block in:','If input signals are float type,then this rule will fail'],
'matchType' : 'Exist'
},
'HISL_0017 A' : {
'UniqueKey' : {'BlockType':'RelationalOperator'},
'SrchKeys' : ['BlockType','OutDataTypeStr','Name']
},
'HISL_0018 A' : {
'UniqueKey' : {'BlockType':'Logic'},
'SrchKeys' : ['BlockType','OutDataTypeStr','Name']
},
'HISL_0019 A' : {'srchKeys' : {'BlockType':'Reference','SourceType':['Bitwise Operator'],'Name':''},
'RuleInfo' : ['MANUAL CHECK RULE:check the input signals of Bitwise Operator Block in:','If input signals are signed integer data type,then this rule will fail'],
'matchType' : 'Exist'
},
'HISL_0019 B' : {'PropData' : {'BlockType': 'Reference',
'SourceType': 'Bitwise Operator'},
'CheckListData' : {'BitMaskRealWorld' : 'Stored Integer'},
'ListType' : 'Block'
},
'HISF_0003 A' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#',
'OutDataTypeStr' : '#ValueKey#'},
'DstInput' : {'BlockType' : 'Reference',
'SourceType': 'Bitwise Operator',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutDataTypeStr',
'CheckValue' : ['uint8', 'uint16','uint32'],
'CheckExp' : 'WITHIN'}
},
'RP_0012' : {
'UniqueKey' : ['BlockType', 'Name'],
'PropChkData' : {
'SourceProp' : 'Ports',
},
'ResultMatchType' : 'Unique'
},
'RP_0018' : {'SrcInput' : {'BlockType' : '#ValueKey#',
'Name' : '#MatchKey#'},
'DstInput' : {'BlockType' : 'RelationalOperator',
'Name' : '#MatchKey#'},
'CheckList' : {'CheckItem' : 'OutDataTypeStr',
'CheckValue' : 'Boolean',
'CheckExp' : 'NOT EQUAL'}
},
'RP_0021' : {'PropChkData' : {'AllowDiffInputSizes': 'off'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType' : 'Switch',
'ResultMatchType' : 'Exact'
},
'RP_0028' : {'srchKeys_chart' :['id','name'],
'srchKeys_event' :['name','linkNode','scope','trigger']
},
'RP_0036' : {'ListType' : 'chart',
'PropChkData' : {'userSpecifiedStateTransitionExecutionOrder': 1},
'ResultMatchType' : 'Exact',
'ListFoundCheck' : 'FAIL',
'PropFoundCheck' : 'TRUE'
},
'RP_0037' : {
'ChkData' : ';'
},
'RP_0046' : {'ListType' : 'state',
'ListType1' : 'transition',
'PropChkData' : {'labelString': []},
'ResultMatchType' : 'DoesNotContain',
},
'RP_0051' : {'PropChkData' : {'InputSameDT': 'on'},
'UniqueKey' : ['BlockType', 'Name'],
'ListType' : 'Block',
'BlockType1' : 'Switch',
'BlockType2' : 'MultiPortSwitch',
'ResultMatchType' : 'Exact'
},
'RP_0055' : {'ListType' : 'transition',
'PropChkData' : {'labelString': [';']},
'ResultMatchType' : 'Contains',
'ListFoundCheck' : 'PASS',
'PropFoundCheck' : 'TRUE'
},
'RP_0057' : {'Property' : 'NamePlacement',
'Model' : 'SIMULINK_BLOCK'
},
'RP_0058' : { 'matchType' :'Exact',
'UniqueKey' :{'BlockType':'Inport'},
'UniqueKey2' :{'BlockType':'Outport'}
},
'RP_0059' : {'SrchKeys' : ['BlockType','SourceType','ECoderFlag'],
'PropChkData' : {'ECoderFlag': 'History'}
},
'RP_0060' : {'SrchKeys' : ['BlockType','SourceType','ECoderFlag'],
'PropChkData' : {'ECoderFlag': 'Description'}
},
'RP_0061' : {'PropChkData' : {'LookUpMeth': 'Interpolation-Extrapolation'},
'UniqueKey' : ['BlockType', 'Name']
},
'RP_0063' : {'Property' : 'TunableVars',
'Model' : 'SIMULINK_MODEL'
},
'RP_0064' : {
'UniqueKey' : ['BlockType'],
'PropChkData' : {'SourceProp' : 'Name',
'BlockProp' : 'SrcBlock'
},
'ResultMatchType' : 'Exist',
'AllowedBlock' : ['From','SubSystem','Demux','Selector']
}
}
configReferenceFiles = {
'mdlref10ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_10ms_mdlref_config_set.m',
'context10ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_10ms_context_config_set.m',
'mdlref50ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_50ms_mdlref_config_set.m',
'context50ms_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_50ms_context_config_set.m',
'lib_cs' : 'Z:\IAI-TXB-HLC\Dynamic\Models\Simulink_Localisation\SimulinkCommon\mdl_config_sets/taxibot_libraryMdlRef_config_set.m'}
|
# Телеграм-бот v.002 - бот создаёт меню, присылает собачку, и анекдот
import telebot # pyTelegramBotAPI 4.3.1
from telebot import types
import requests
import bs4
bot = telebot.TeleBot('5105972662:AAG24fr382U1_hosO4Zrb-tv_BTakAV1MPk') # Создаем экземпляр бота
# -----------------------------------------------------------------------
# Функция, обрабатывающая команду /start
@bot.message_handler(commands=["start"])
def start(message, res=False):
chat_id = message.chat.id
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("👋 Главное меню")
btn2 = types.KeyboardButton("❓ Помощь")
markup.add(btn1, btn2)
bot.send_message(chat_id,
text="Привет, {0.first_name}! Я тестовый бот для курса программирования на языке ПаЙтон".format(
message.from_user), reply_markup=markup)
# -----------------------------------------------------------------------
# Получение сообщений от юзера
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
chat_id = message.chat.id
ms_text = message.text
if ms_text == "Главное меню" or ms_text == "👋 Главное меню" or ms_text == "Вернуться в главное меню": # ..........
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Развлечения")
btn2 = types.KeyboardButton("WEB-камера")
btn3 = types.KeyboardButton("Управление")
back = types.KeyboardButton("Помощь")
markup.add(btn1, btn2, btn3, back)
bot.send_message(chat_id, text="Вы в главном меню", reply_markup=markup)
elif ms_text == "Развлечения": # ..................................................................................
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
btn1 = types.KeyboardButton("Картиночки с котиками")
btn2 = types.KeyboardButton("Анекдоты")
btn3 = types.KeyboardButton("Картиночки с собачками")
btn4 = types.KeyboardButton("Играть в камень-ножницы-бумага")
back = types.KeyboardButton("Вернуться в главное меню")
markup.add(btn1, btn2, btn3, btn4, back)
bot.send_message(chat_id, text="Развлечения", reply_markup=markup)
# ..............................................................................
elif ms_text == "/cat" or ms_text == "Картиночки с котиками":
contents = requests.get('https://random.cat/meow.json').json()
urlCAT = contents['url']
bot.send_photo(chat_id, photo=urlCAT, caption="Держи котика!")
# ..............................................................................
elif ms_text == "Анекдоты":
bot.send_message(chat_id, text="еще не готово...")
# .............................................................................
elif ms_text == "/dog" or ms_text == "Картиночки с собачками":
contents = requests.get('https://random.dog/woof.json').json()
urlDOG = contents['url']
bot.send_photo(chat_id, photo=urlDOG, caption="Держи собатьку!")
#..............................................................................
elif ms_text == "Играть в камень-ножницы-бумага":
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "WEB-камера": # .............................................................................
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "Управление": # ...................................................................................
bot.send_message(chat_id, text="еще не готово...")
elif ms_text == "Помощь" or ms_text == "/help": # .................................................................
bot.send_message(chat_id, "Автор: Панасенко Софья, 1-МД-5")
key1 = types.InlineKeyboardMarkup()
btn1 = types.InlineKeyboardButton(text="Напишите автору", url="https://t.me/ave_satanas_bitch")
key1.add(btn1)
img = open('author.jpg', 'rb')
bot.send_photo(message.chat.id, img, reply_markup=key1)
else: # ...........................................................................................................
bot.send_message(chat_id, text="Я тебя слышу!!! Ваше сообщение: " + ms_text)
# -----------------------------------------------------------------------
bot.polling(none_stop=True, interval=0) # Запускаем бота
print()
|
import os, gzip, pickle, sys, datetime, struct
from glob import glob
import pandas as pd
import subprocess
import shutil
import numpy as np
from datetime import timedelta
from io import StringIO
from SWaN_accel import config
from SWaN_accel import utils
from SWaN_accel import feature_set
pd.options.mode.chained_assignment = None # default='warn'
# JAR = 'jar/readBinaryFile.jar'
# col = ["HEADER_TIME_STAMP","X","Y","Z"]
col = ["HEADER_TIME_STAMP","X_ACCELERATION_METERS_PER_SECOND_SQUARED",
"Y_ACCELERATION_METERS_PER_SECOND_SQUARED","Z_ACCELERATION_METERS_PER_SECOND_SQUARED"]
MHEALTH_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S"
PROB_WEAR = 'PROB_WEAR'
PROB_SLEEP = 'PROB_SLEEP'
PROB_NWEAR = 'PROB_NWEAR'
ori_header = ['ORI_X_MEDIAN', 'ORI_Y_MEDIAN', 'ORI_Z_MEDIAN']
def mhealth_timestamp_parser(val):
return datetime.datetime.strptime(val, MHEALTH_TIMESTAMP_FORMAT)
def contigous_regions_usingOri(condition):
d = np.floor(np.absolute(np.diff(condition)))
idx, = d.nonzero()
idx += 1
idx = np.r_[0, idx - 1]
idx = np.r_[idx, condition.size - 1]
bout_lis = []
for i in range(len(idx) - 1):
if i == 0:
first = idx[i]
else:
first = idx[i] + 1
second = idx[i + 1]
bout_lis = bout_lis + [[first, second]]
this_ar = np.asarray(bout_lis)
return this_ar
def contigous_regions(condition):
d = np.diff(condition)
idx, = d.nonzero()
idx += 1
idx = np.r_[0, idx - 1]
idx = np.r_[idx, condition.size - 1]
bout_lis = []
for i in range(len(idx) - 1):
if i == 0:
first = idx[i]
else:
first = idx[i] + 1
second = idx[i + 1]
bout_lis = bout_lis + [[first, second]]
this_ar = np.asarray(bout_lis)
return this_ar
def filterUsingZori(bout_array, fil_df, lab_str, ref_str, prob_wear, prob_sleep, prob_nwear):
fdf = fil_df.copy()
tmp_fdf = fil_df.copy()
for n in range(len(bout_array)):
ar_sub = fdf[bout_array[n][0]:bout_array[n][1] + 1]
ar_sub_pred = ar_sub[lab_str].values[0]
ar_sub_start = ar_sub.index[0]
ar_sub_ori = ar_sub[ref_str].values
bout_array_sub = contigous_regions_usingOri(ar_sub_ori)
bout_array_sub_final = bout_array_sub + ar_sub_start
for m in range(len(bout_array_sub_final)):
start = bout_array_sub_final[m][0]
end = bout_array_sub_final[m][1]
if ar_sub_pred == 0:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
elif ar_sub_pred == 1:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
elif ar_sub_pred == 2:
if start == end:
fdf.loc[start, 'PREDICTED_SMOOTH'] = 0
fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]
fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_wear]
fdf.loc[start]['PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]
else:
fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 2
fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]
fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]
fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]
return fdf
def lookBeforeAfter(lo_df):
global new_lab
df = lo_df.copy()
tmp_df = lo_df.copy()
tmp_ar = tmp_df['PREDICTED_SMOOTH'].values
ff_obout_array = contigous_regions(tmp_ar)
bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])
bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1
start_ind = bout_df.iloc[0]['START_IND']
stop_ind = bout_df.iloc[-1]['STOP_IND']
size = len(bout_df.index)
for bout_ind, bout_row in bout_df.iterrows():
start, end, this_size = bout_row['START_IND'], bout_row['STOP_IND'], bout_row['SIZE']
lab = tmp_df.loc[start]['PREDICTED_SMOOTH']
bout_df.loc[bout_ind, 'LABEL'] = lab
if lab == 1:
if (bout_ind == len(bout_df.index) - 1) or (this_size >= 480):
# if(this_size >= 480):
bout_df.loc[bout_ind, 'LABEL'] = 2
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
# print('Done nonwear first')
sleep_df = bout_df[bout_df.LABEL == 1]
# ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]
ref_df_short = sleep_df[(sleep_df.SIZE >= 20)]
ref_ind_ar_short = ref_df_short.index
# nonwear related
nwear_df = bout_df[bout_df.LABEL == 2]
nwear_ref_ind_ar_short = None
if not nwear_df.empty:
nwear_ref_ind_ar_short = nwear_df.index
# also add nonwear vicinity
for bout_ind, bout_row in bout_df.iterrows():
start, end = bout_row['START_IND'], bout_row['STOP_IND']
lab = bout_row['LABEL']
size = bout_row['SIZE']
if lab == 1:
if (size < 480) and (size >= 60):
# min_distance = 60
min_distance = 20
nwear_min_distance = 10
up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]
up_dist = None
down_dist = None
if len(up) != 0:
up_ind = up[-1]
sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]
up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if len(down) != 0:
down_ind = down[0]
sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]
down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
# nonwear related
nwear_up_dist = None
nwear_down_dist = None
if not nwear_df.empty:
nwear_up = nwear_ref_ind_ar_short[nwear_ref_ind_ar_short < bout_ind]
nwear_down = nwear_ref_ind_ar_short[nwear_ref_ind_ar_short > bout_ind]
if len(nwear_up) != 0:
nwear_up_ind = nwear_up[-1]
sub_bout_df = bout_df.loc[(bout_df.index > nwear_up_ind) & (bout_df.index < bout_ind)]
nwear_up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if len(nwear_down) != 0:
nwear_down_ind = nwear_down[0]
sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < nwear_down_ind)]
nwear_down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
# nonwear vicinity related
if nwear_down_dist:
if nwear_down_dist < nwear_min_distance:
# print('flip', start, end, nwear_up_dist, nwear_down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 2
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
if nwear_up_dist:
if nwear_up_dist < nwear_min_distance:
# print('flip', start, end, nwear_up_dist, nwear_down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 2
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
# sleep vicinity related
if (not up_dist) & (not down_dist):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 2
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
if up_dist and down_dist:
if (up_dist > min_distance) and (down_dist > min_distance):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 2
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
# print('untouched', start, end, up_dist, down_dist)
sleep_df = bout_df[bout_df.LABEL == 1]
ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]
ref_ind_ar_short = ref_df_short.index
for bout_ind, bout_row in bout_df.iterrows():
start, end = bout_row['START_IND'], bout_row['STOP_IND']
lab = bout_row['LABEL']
size = bout_row['SIZE']
if lab == 1:
if (size < 60) and (size > 30):
min_distance = 30
up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]
up_dist = None
down_dist = None
if len(up) != 0:
up_ind = up[-1]
sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]
up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if len(down) != 0:
down_ind = down[0]
sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]
down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if (not up_dist) & (not down_dist):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
if not up_dist:
if down_dist:
if down_dist > min_distance:
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
if not down_dist:
if up_dist:
if up_dist > min_distance:
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
if up_dist and down_dist:
if (up_dist > min_distance) and (down_dist > min_distance):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
continue
# print('untouched', start, end, up_dist, down_dist)
sleep_df = bout_df[bout_df.LABEL == 1]
ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]
ref_ind_ar_short = ref_df_short.index
for bout_ind, bout_row in bout_df.iterrows():
start, end = bout_row['START_IND'], bout_row['STOP_IND']
lab = bout_row['LABEL']
size = bout_row['SIZE']
if lab == 1:
if size <= 30:
min_distance = 30
up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]
up_dist = None
down_dist = None
if len(up) != 0:
up_ind = up[-1]
sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]
up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if len(down) != 0:
down_ind = down[0]
sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]
down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()
if (not up_dist) & (not down_dist):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
continue
if not up_dist:
if down_dist:
if down_dist > min_distance:
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
continue
if not down_dist:
if up_dist:
if up_dist > min_distance:
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
continue
if up_dist and down_dist:
if (up_dist > min_distance) or (down_dist > min_distance):
# print('flip', start, end, up_dist, down_dist)
bout_df.loc[bout_ind, 'LABEL'] = 0
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
continue
# print('untouched', start, end, up_dist, down_dist)
# smooth the wear between sleep period
tmp_ar = df['PREDICTED_SMOOTH'].values
ff_obout_array = contigous_regions(tmp_ar)
bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])
bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1
tmp_df = df.copy()
for i in range(len(bout_df) - 1):
# print(i)
start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']
lab = df.loc[start]['PREDICTED_SMOOTH']
if this_size <= 20:
prev_start = None
next_start = None
if i != 0:
prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[i - 1, 'STOP_IND'], \
bout_df.loc[i - 1, 'SIZE']
prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']
if i != len(bout_df):
next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[i + 1, 'STOP_IND'], \
bout_df.loc[i + 1, 'SIZE']
next_lab = df.loc[next_start]['PREDICTED_SMOOTH']
if prev_start and next_start:
if prev_size >= next_size:
new_lab = prev_lab
else:
new_lab = next_lab
elif prev_start:
new_lab = prev_lab
elif next_start:
new_lab = next_lab
if lab == 2:
# print(start,end,lab,new_lab)
if new_lab == 0:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
if new_lab == 1:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 1
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
# smooth the wear between sleep period
tmp_ar = df['PREDICTED_SMOOTH'].values
ff_obout_array = contigous_regions(tmp_ar)
bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])
bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1
tmp_df = df.copy()
for i in range(len(bout_df) - 1):
# print(i,len(bout_df))
start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']
lab = df.loc[start]['PREDICTED_SMOOTH']
if this_size <= 20:
prev_start = None
next_start = None
if i != 0:
prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[i - 1, 'STOP_IND'], \
bout_df.loc[i - 1, 'SIZE']
prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']
if i != len(bout_df):
next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[i + 1, 'STOP_IND'], \
bout_df.loc[i + 1, 'SIZE']
next_lab = df.loc[next_start]['PREDICTED_SMOOTH']
if prev_start and next_start:
if prev_size >= next_size:
new_lab = prev_lab
else:
new_lab = next_lab
elif prev_start:
new_lab = prev_lab
elif next_start:
new_lab = next_lab
if lab == 0:
# print(start,end,lab,new_lab)
if new_lab == 2:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 2
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
if new_lab == 1:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 1
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
# smooth the wear between sleep period
tmp_ar = df['PREDICTED_SMOOTH'].values
ff_obout_array = contigous_regions(tmp_ar)
bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])
bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1
tmp_df = df.copy()
for i in range(len(bout_df) - 1):
start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']
lab = df.loc[start]['PREDICTED_SMOOTH']
if this_size <= 20:
prev_start = None
next_start = None
if i != 0:
prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[
i - 1, 'STOP_IND'], \
bout_df.loc[i - 1, 'SIZE']
prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']
if i != len(bout_df):
next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[
i + 1, 'STOP_IND'], \
bout_df.loc[i + 1, 'SIZE']
next_lab = df.loc[next_start]['PREDICTED_SMOOTH']
if prev_start and next_start:
if prev_size >= next_size:
new_lab = prev_lab
else:
new_lab = next_lab
elif prev_start:
new_lab = prev_lab
elif next_start:
new_lab = next_lab
if lab == 1:
# print(start, end, lab, new_lab)
if new_lab == 0:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 0
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
if new_lab == 2:
df.loc[start:end, 'PREDICTED_SMOOTH'] = 1
df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']
df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']
df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']
return df
def daterange(date1, date2):
for n in range(int((date2 - date1).days) + 1):
yield date1 + timedelta(n)
def correctPredictionsSingleDate(folder, dStr, sampling_rate=80):
dObj = datetime.datetime.strptime(dStr, "%Y-%m-%d")
prev = dObj - datetime.timedelta(days=1)
next = dObj + datetime.timedelta(days=1)
prevStr = prev.strftime("%Y-%m-%d")
nextStr = next.strftime("%Y-%m-%d")
oriDF = pd.DataFrame(data=None)
prevFolder = os.path.join(folder, 'data-watch', prevStr)
if os.path.isdir(prevFolder):
daily_feature_file = os.path.join(prevFolder,"SWaN_accel_" + prevStr+"_dailyfeatures.csv")
if(os.path.isfile(daily_feature_file)):
odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression="infer", quotechar='"',
parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
else:
odf = get_daywise_prediction_df(prevFolder, sampling_rate)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
thisFolder = os.path.join(folder, 'data-watch', dStr)
if os.path.isdir(thisFolder):
daily_feature_file = os.path.join(thisFolder, "SWaN_accel_" + dStr + "_dailyfeatures.csv")
if (os.path.isfile(daily_feature_file)):
odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression="infer", quotechar='"',
parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
else:
odf = get_daywise_prediction_df(thisFolder, sampling_rate)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
nextFolder = os.path.join(folder, 'data-watch', nextStr)
if os.path.isdir(nextFolder):
daily_feature_file = os.path.join(nextFolder, "SWaN_accel_" + nextStr + "_dailyfeatures.csv")
if (os.path.isfile(daily_feature_file)):
odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression="infer", quotechar='"',
parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
else:
odf = get_daywise_prediction_df(nextFolder, sampling_rate)
oriDF = pd.concat([oriDF, odf], ignore_index=True)
if oriDF.empty:
print("No data found for this day or previous and next day.")
return
oriDF.sort_values(by='HEADER_TIME_STAMP', inplace=True)
if oriDF.dropna().empty:
print('No prediction data in the folder: '+folder +' for data: ' + dStr)
return
outPath = os.path.join(folder, 'data-watch', dStr, 'SWaN_accel_' + dStr + '_final.csv')
oriDF.replace({'PREDICTED': {2: 1}}, inplace=True)
oriDF['PREDICTED_SMOOTH'] = None
oriDF['PROB_WEAR_SMOOTH'] = None
oriDF['PROB_SLEEP_SMOOTH'] = None
oriDF['PROB_NWEAR_SMOOTH'] = None
tmp_ar = oriDF['PREDICTED'].values
# compute contigous bouts based on window-level prediction
obout_array = contigous_regions(tmp_ar)
# in case only one type of bout present in the data
if (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 1):
oriDF['PREDICTED_SMOOTH'] = 2
oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]
oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_NWEAR]
oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_SLEEP]
# oriDF.to_csv(outPath, index=False, float_format='%.3f')
# return
elif (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 2):
oriDF['PREDICTED_SMOOTH'] = 2
oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]
oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_SLEEP]
oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_NWEAR]
# oriDF.to_csv(outPath, index=False, float_format='%.3f')
# return
elif (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 0):
oriDF['PREDICTED_SMOOTH'] = 0
oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]
oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_SLEEP]
oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_NWEAR]
# oriDF.to_csv(outPath, index=False, float_format='%.3f')
# return
else:
# use z orientation to filter
f_odf = filterUsingZori(obout_array, oriDF, 'PREDICTED', 'ORI_Z_MEDIAN', PROB_WEAR, PROB_SLEEP, PROB_NWEAR)
oriDF = lookBeforeAfter(f_odf)
# l_f_odf = lookBeforeAfter(f_odf)
# l_f_odf.to_csv(outPath, index=False, float_format='%.3f')
currDateObj = datetime.datetime.strptime(dStr, "%Y-%m-%d")
nextDateObj = currDateObj + datetime.timedelta(days=1)
mask = (oriDF['HEADER_TIME_STAMP'] > currDateObj) & (oriDF['HEADER_TIME_STAMP'] < nextDateObj)
final_df = oriDF.loc[mask][
['HEADER_TIME_STAMP', 'PREDICTED_SMOOTH', 'PROB_WEAR_SMOOTH', 'PROB_SLEEP_SMOOTH', 'PROB_NWEAR_SMOOTH']]
print(datetime.datetime.now().strftime("%H:%M:%S") + " Finished performing rule-based filtering.")
final_df.to_csv(outPath, index=False, float_format='%.3f')
def correctPredictions(folder, startdStr, stopdStr, sampling_rate=80):
startdObj = datetime.datetime.strptime(startdStr, "%Y-%m-%d")
stopdObj = datetime.datetime.strptime(stopdStr, "%Y-%m-%d")
# prev = startdObj - datetime.timedelta(days=1)
# next = stopdObj + datetime.timedelta(days=1)
prev = startdObj
next = stopdObj
pid = os.path.basename(folder)
for dt in daterange(prev, next):
dStr = dt.strftime("%Y-%m-%d")
refPath = os.path.join(folder, 'data-watch', dStr, 'SWaN_accel_' + dStr + '_final.csv')
if not os.path.exists(refPath):
print("Performing rule-based filtering for participant: " + pid + " for date: " + dStr)
correctPredictionsSingleDate(folder, dStr, sampling_rate=sampling_rate)
print("Done rule-based filtering for participant: " + pid + " for date: " + dStr)
else:
print("Final rule-based filtered file present for participant: " + pid + " for date " + dStr)
def readBinary(inFile):
tz = os.path.basename(inFile).split('.')[2].split('-')[-1]
hourdiff = int(tz[1:3])
minutediff = int(tz[3:])
if (tz[0] == 'M'):
hourdiff = -int(tz[1:3])
minutediff = -int(tz[3:])
file = open(inFile, "rb")
b = file.read(20)
diction = {}
i = 0
while len(b) >= 20:
t = int.from_bytes(b[0:8], byteorder='big')
x = struct.unpack('>f', b[8:12])[0]
y = struct.unpack('>f', b[12:16])[0]
z = struct.unpack('>f', b[16:20])[0]
diction[i] = {'time': t, 'x': x, 'y': y, 'z': z}
i = i + 1
b = file.read(20)
df = pd.DataFrame.from_dict(diction, "index")
df.columns = col
df['HEADER_TIME_STAMP'] = pd.to_datetime(df['HEADER_TIME_STAMP'], unit='ms') + \
datetime.timedelta(hours=hourdiff) + datetime.timedelta(minutes=minutediff)
return df
def get_daywise_prediction_df(inFolder, sampling_rate=80):
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
# trainedModel = pickle.load(open(config.modelPath, "rb"))
# standardScalar = pickle.load(open(config.scalePath, "rb"))
trainedModel = pickle.load(pkg_resources.open_binary(__package__,config.modelPath))
standardScalar = pickle.load(pkg_resources.open_binary(__package__,config.scalePath))
final_day_df = pd.DataFrame()
for file in sorted(
glob(os.path.join(inFolder, '*/AndroidWearWatch-AccelerationCalibrated-NA.*.sensor.baf'))):
outfilePath = os.path.join(os.path.dirname(file),
".".join(os.path.basename(file).split('.')[1:-2]) + ".prediction.csv")
if os.path.exists(outfilePath):
print(outfilePath + " present. Reading that file.")
odf = pd.read_csv(outfilePath, header=0, skiprows=0, sep=',', compression="infer", quotechar='"',
parse_dates=[0], date_parser=mhealth_timestamp_parser)
final_day_df = pd.concat([final_day_df, odf], ignore_index=True)
continue
print(datetime.datetime.now().strftime("%H:%M:%S") + ' Reading binary file :' + file)
try:
df = readBinary(file)
except:
print('Issue with converting baf file to a dataframe - ' + file)
continue
print(datetime.datetime.now().strftime("%H:%M:%S") + ' Computing feature set for :' + file)
time_grouper = pd.Grouper(key='HEADER_TIME_STAMP', freq='30s')
grouped_df = df.groupby(time_grouper)
feature_df = pd.DataFrame()
for name, group in grouped_df:
if len(group) > sampling_rate * 15:
op = get_feature_sleep(group, sampling_rate)
op['HEADER_TIME_STAMP'] = name
feature_df = pd.concat([feature_df, op], ignore_index=True)
final_feature_df = feature_df.dropna(how='any', axis=0, inplace=False)
if final_feature_df.empty:
print("No feature row computed or remaining after dropping zero rows. So not moving to prediction.")
continue
final_feature_df.rename(columns={'HEADER_TIME_STAMP': 'START_TIME'}, inplace=True)
final_feature_df['HEADER_TIME_STAMP'] = final_feature_df['START_TIME']
final_feature_df['STOP_TIME'] = final_feature_df['START_TIME'] + pd.Timedelta(seconds=30)
print(datetime.datetime.now().strftime("%H:%M:%S") + " Performing window-level classification for :" + file)
final_feature_df = final_feature_df.dropna()
subfdata = final_feature_df[config.feature_lis]
sfdata = standardScalar.transform(subfdata)
prediction_prob = trainedModel.predict_proba(sfdata)
prediction = np.argmax(prediction_prob, axis=1)
p = prediction.reshape((-1, 1))
final_feature_df["PREDICTED"] = p
final_feature_df['PROB_WEAR'] = prediction_prob[:, 0]
final_feature_df['PROB_SLEEP'] = prediction_prob[:, 1]
final_feature_df['PROB_NWEAR'] = prediction_prob[:, 2]
final_day_df = pd.concat([final_day_df, final_feature_df], ignore_index=True)
dateStr = os.path.basename(inFolder)
outPath = os.path.join(inFolder, "SWaN_accel_" + dateStr + "_dailyfeatures.csv")
final_day_df.to_csv(outPath, index=False, float_format="%.3f")
print("Created prediction file:" + outPath)
return final_day_df
def get_feature_sleep(tdf, sampling):
X_axes = utils.as_float64(tdf.values[:, 1:])
result_axes = feature_set.compute_extra_features(X_axes, sampling)
return result_axes
def main(sampling_rate=None,input_folder=None,file_path=None,startdateStr=None,stopdateStr=None):
# def main():
# sampling_rate = int(sys.argv[1])
# input_folder = sys.argv[2]
# file_path = sys.argv[3]
# startdateStr = sys.argv[4]
# stopdateStr = None
# len_args = len(sys.argv)
# if len_args < 4:
# print("Syntax error. It should be one of these formats:\n"
# "python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPATN_ID/FILE_PATH_WITH_PARTICIPANT_ID\n"
# "python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPANT_ID/FILE_PATH_WITH_PARTICIPANT_ID YYYY_MM_DD\n "
# "python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPANT_ID/FILE_PATH_WITH_PARTICIPANT_ID YYYY_MM_DD YYYY_MM_DD\n")
# return
if (startdateStr is None) and (stopdateStr is None):
print("doing for all dates")
# sampling_rate = int(sys.argv[1])
# input_folder = sys.argv[2]
# file_path = sys.argv[3]
if not (file_path.endswith('.txt')):
pid = file_path + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
final_input_folder = os.path.join(input_folder, pid)
date_lis = [os.path.basename(x) for x in glob(os.path.join(final_input_folder, 'data-watch', '*'))]
for dateStr in date_lis:
final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)
if not os.path.isdir(final_input_folder):
print("Missing folder: " + final_input_folder)
continue
refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')
if not os.path.exists(refPath):
print("Performing rule-based filtering for participant: " + pid + " for date: " + dateStr)
correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
else:
print("Final rule-based filtered file present.")
return
if not (os.path.isfile(file_path)):
print("File with participant ids does not exist")
return
with open(file_path) as f:
content = f.readlines()
pidLis = [x.strip() for x in content]
for pid in pidLis:
pid = pid + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
final_input_folder = os.path.join(input_folder, pid)
date_lis = [os.path.basename(x) for x in glob(os.path.join(final_input_folder, 'data-watch', '*'))]
for dateStr in date_lis:
final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)
if not os.path.isdir(final_input_folder):
print("Missing folder: " + final_input_folder)
continue
refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')
if not os.path.exists(refPath):
print("Performing rule-based filtering for participant: " + pid + " for date: " + dateStr)
correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
else:
print("Final rule-based filtered file present.")
return
if (startdateStr) and (stopdateStr is None):
dateStr = startdateStr
# print("doing for a specific date")
# sampling_rate = int(sys.argv[1])
# input_folder = sys.argv[2]
# file_path = sys.argv[3]
# dateStr = sys.argv[4]
if not (file_path.endswith('.txt')):
pid = file_path + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)
if not os.path.isdir(final_input_folder):
print("Missing folder: " + final_input_folder)
return
refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')
if not os.path.exists(refPath):
print(datetime.datetime.now().strftime("%H:%M:%S") + " Performing rule-based filtering for participant: " + pid + " for date: " + dateStr)
correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
else:
print("Final rule-based filtered file present " + refPath)
return
if not (os.path.isfile(file_path)):
print("File with participant ids does not exist")
return
with open(file_path) as f:
content = f.readlines()
pidLis = [x.strip() for x in content]
for pid in pidLis:
pid = pid + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)
if not os.path.isdir(final_input_folder):
print("Missing folder: " + final_input_folder)
continue
refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')
if not os.path.exists(refPath):
print("Performing rule-based filtering for participant: " + pid + " for date: " + dateStr)
correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
else:
print("Final rule-based filtered file present.")
return
if (startdateStr and stopdateStr):
print("doing for a date range")
# sampling_rate = int(sys.argv[1])
# input_folder = sys.argv[2]
# file_path = sys.argv[3]
# startdateStr = sys.argv[4]
# stopdateStr = sys.argv[5]
if not (file_path.endswith('.txt')):
pid = file_path + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
first_input_folder = os.path.join(input_folder, pid, 'data-watch', startdateStr)
if not os.path.isdir(first_input_folder):
print("Missing folder: " + first_input_folder)
return
last_input_folder = os.path.join(input_folder, pid, 'data-watch', stopdateStr)
if not os.path.isdir(last_input_folder):
print("Missing folder: " + last_input_folder)
return
print(
"Performing rule-based filtering for participant: " + pid + " for date between: " + startdateStr + " and " + stopdateStr)
correctPredictions(sub_folder, startdateStr, stopdateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
return
if not (os.path.isfile(file_path)):
print("File with participant ids does not exist")
return
with open(file_path) as f:
content = f.readlines()
pidLis = [x.strip() for x in content]
for pid in pidLis:
pid = pid + "@timestudy_com"
sub_folder = os.path.join(input_folder, pid)
first_input_folder = os.path.join(input_folder, pid, 'data-watch', startdateStr)
if not os.path.isdir(first_input_folder):
print("Missing folder: " + first_input_folder)
continue
last_input_folder = os.path.join(input_folder, pid, 'data-watch', stopdateStr)
if not os.path.isdir(last_input_folder):
print("Missing folder: " + last_input_folder)
continue
print(
"Performing rule-based filtering for participant: " + pid + " for date between: " + startdateStr + " and " + stopdateStr)
correctPredictions(sub_folder, startdateStr, stopdateStr, sampling_rate=sampling_rate)
print("Done filtering predictions.")
# if __name__ == "__main__":
# main()
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: valle.mrv@gmail.com
# @Filename: models.py
# @Last modified by: valle
# @Last modified time: 15-Feb-2018
# @License: Apache license vesion 2.0
from __future__ import unicode_literals
from django.db.models import Q
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from adminshop.models import (Clientes, Direcciones, Proveedores,
Productos, Presupuesto)
# Create your models here.
CHOICES_TIPO_PAGO = (
('EF', 'Efectivo'),
('TJ', 'Tarjeta'),
('TB', 'Transferencia bancaria'),
('PY', 'Paypal'),
('CR', 'Contrarembolso'),
)
CHOICES_TIPO_VENDEDOR = (
('CL', 'Cliente'),
('PV', 'Proveedor'),
('NO', 'No asignado')
)
CHOICES_TIPO_DOC = (
('CP', 'Compra'),
('FT', 'Factura'),
('RP', 'Reparacion'),
('AB', 'Abono'),
('OS', 'Testeo')
)
class DocumentSendPolice(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentSendGestoria(models.Model):
fecha_creado = models.DateTimeField(auto_now_add=True)
enviado = models.BooleanField(default=False)
intervalo = models.CharField(max_length=25)
class Meta:
ordering = ["-fecha_creado"]
class DocumentoTesteo(models.Model):
cliente = models.ForeignKey("clientes", on_delete=models.CASCADE )
producto = models.ForeignKey("Productos", on_delete=models.CASCADE )
empleado = models.ForeignKey(User, on_delete=models.CASCADE )
firma = models.FileField(upload_to='firmas', null=True)
frimado = models.BooleanField(default=False)
fecha = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.cliente)
class Meta:
ordering = ["-id"]
class ConfigSite(models.Model):
ISP = models.IntegerField(blank=True, default=21)
email_policia = models.EmailField(max_length=100, blank=True)
email_gestoria = models.EmailField(max_length=100, blank=True)
codigo_compra = models.IntegerField("Inicio contador", default=3023)
firma_tienda = models.FileField(upload_to='config', blank=True)
logo_tienda = models.FileField(upload_to='config', blank=True)
class Compras(models.Model):
vendedor_id = models.IntegerField(null=True)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha_entrada = models.DateTimeField(auto_now_add=True)
codigo_compra = models.CharField(max_length=150, null=True)
firma = models.FileField(upload_to='firmas', null=True)
tipo_compra = models.CharField(max_length=4, default="REBU", choices=[("REBU","REBU"), ("ISP","ISP")])
doc_proveedor = models.FileField(upload_to='doc_proveedor', null=True, default=None, max_length=500)
enviar_policia = models.BooleanField("Enviar a la policia", blank=True, default=True)
tipo_vendedor = models.CharField(
max_length=2,
choices=CHOICES_TIPO_VENDEDOR,
default="NO",
)
def set_vendedor(self, vendedor):
if vendedor != None:
self.vendedor_id = vendedor.id
if type(vendedor) == Clientes:
self.tipo_vendedor = "CL"
else:
self.tipo_vendedor = "PV"
else:
self.tipo_vendedor = "NO"
def get_vendedor(self):
if self.tipo_vendedor == "CL":
clientes = Clientes.objects.filter(Q(pk=self.vendedor_id))
if len(clientes) > 0:
cliente = clientes[0]
vendedor = {}
vendedor["DNI"] = cliente.DNI
vendedor["nombre"] = cliente.nombre_completo
direcciones = Direcciones.objects.filter(cliente_id=self.vendedor_id)
if len(direcciones) > 0:
direccion = direcciones[0]
else:
direccion = ""
vendedor["direccion"] = direccion
vendedor["telefono"] = cliente.telefono
vendedor["email"] = cliente.email
vendedor["id"] = cliente.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
elif self.tipo_vendedor == "PV":
ps = Proveedores.objects.filter(Q(pk=self.vendedor_id))
if len(ps) > 0:
p = ps[0]
vendedor = {}
vendedor["DNI"] = p.CIF
vendedor["nombre"] = p.razon_social
vendedor["direccion"] = p.direccion
vendedor["telefono"] = p.telefono
vendedor["email"] = p.email
vendedor["id"] = p.id
return vendedor
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
else:
return {"DNI":"", "nombre":"", 'direccion':"", 'telefono':'', "email": "", "id":-1}
def save(self, *args, **kwargs):
super(Compras, self).save()
if self.codigo_compra == None:
self.codigo_compra = ConfigSite.objects.all()[0].codigo_compra+self.pk
super(Compras, self).save()
class Meta:
ordering= ["-id"]
class Ventas(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
entrega = models.DecimalField(max_digits=10, decimal_places=2, default=0)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasVentas(models.Model):
venta = models.ForeignKey("Ventas", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=6, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Abonos(models.Model):
factura = models.ForeignKey("Ventas", on_delete=models.CASCADE)
cliente = models.ForeignKey("Clientes", on_delete=models.SET_NULL, null=True)
empleado = models.CharField(max_length=150)
empleado_id = models.IntegerField(default=-1)
fecha_salida= models.DateTimeField(auto_now_add=True)
firma = models.FileField(upload_to='firmas', null=True)
forma_pago = models.CharField(
max_length=2,
choices=CHOICES_TIPO_PAGO,
default="EF",
)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
class Meta:
ordering = ['-fecha_salida']
class LineasAbonos(models.Model):
abono = models.ForeignKey("Abonos", on_delete=models.CASCADE)
detalle = models.CharField(max_length=150)
codigo_compra = models.CharField(max_length=150)
ns_imei = models.CharField(max_length=150)
descuento = models.DecimalField(max_digits=5, decimal_places=2)
can = models.IntegerField()
p_unidad = models.DecimalField(max_digits=10, decimal_places=2)
class Historial(models.Model):
cliente = models.ForeignKey("Clientes", on_delete=models.CASCADE)
producto = models.ForeignKey("Productos", on_delete=models.CASCADE)
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
fecha = models.DateTimeField(auto_now_add=True)
detalle = models.CharField(max_length=150)
def __unicode__(self):
return self.detalle
class Meta:
ordering = ["-id"]
class Firmas(models.Model):
tipo_documento = models.CharField(
max_length=2,
choices=CHOICES_TIPO_DOC,
default="CP",
)
empleado_id = models.IntegerField()
documento_id = models.IntegerField()
fecha = models.DateTimeField(auto_now=True)
firmado = models.BooleanField(default=False)
def get_user(self):
empleados = User.objects.filter(pk=self.empleado_id)
if len(empleados) > 0:
return empleados[0]
else:
return User()
def get_nombre_cliente(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
except:
vendedor = { "nombre": "Documento borrado"}
return vendedor["nombre"]
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente.nombre_completo
except:
cliente = "Documento borrado"
return cliente
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
return cliente.nombre_completo
def get_ns_imei(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.ns_imei
except:
return "Documento borrado"
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.ns_imei
except:
ns_imei = "Documento borrado"
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.ns_imei
def get_producto_pk(self):
if self.tipo_documento == "CP":
try:
compra = Compras.objects.get(pk=self.documento_id)
return compra.producto.id
except:
return 0
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
ns_imei = p.producto.id
except:
ns_imei = 0
return ns_imei
elif self.tipo_documento == "OS":
p = DocumentoTesteo.objects.get(pk=self.documento_id)
return p.producto.pk
def get_documento(self):
if self.tipo_documento == "CP":
compra = Compras.objects.get(pk=self.documento_id)
vendedor = compra.get_vendedor()
datos_send= {
"pk": compra.pk,
"id_producto": compra.producto.pk,
'nombre': vendedor["nombre"],
"DNI": vendedor["DNI"],
"ns_imei": compra.producto.ns_imei,
"precio_compra": str(compra.producto.precio_compra),
}
return "tienda/sign/sign_compras.html", datos_send
elif self.tipo_documento == "RP":
try:
p = Presupuesto.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_reparacion.html", datos_send
except:
self.delete()
return None, None
elif self.tipo_documento == "OS":
try:
p = DocumentoTesteo.objects.get(pk=self.documento_id)
cliente = p.cliente
datos_send= {
"pk": p.pk,
"id_producto": p.producto.pk,
'nombre': cliente.nombre_completo,
"DNI": cliente.DNI,
"ns_imei": p.producto.ns_imei,
}
return "tienda/sign/sign_testeo.html", datos_send
except:
self.delete()
return None, None
class Meta:
ordering = ["-fecha"]
|
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs42_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {'row_number', 'place_of_performance_forei', 'place_of_perform_country_c', 'record_type',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test PrimaryPlaceOfPerformanceForeignLocationDescription is required for foreign places of performance
(i.e., when PrimaryPlaceOfPerformanceCountryCode does not equal USA) for record type 2. This test shouldn't
care about content when country_code is USA (that is for another validation).
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='description',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='description',
place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='USA', record_type=2,
correction_delete_indicatr='c')
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UsA', record_type=2,
correction_delete_indicatr='C')
det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=1,
correction_delete_indicatr='')
det_award_6 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='UK', record_type=1,
correction_delete_indicatr='')
# Ignore correction delete indicator of D
det_award_7 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4,
det_award_5, det_award_6, det_award_7])
assert errors == 0
def test_failure(database):
""" Test failure PrimaryPlaceOfPerformanceForeignLocationDescription is required for foreign places of performance
(i.e., when PrimaryPlaceOfPerformanceCountryCode does not equal USA) for record type 2.
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei='',
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,
place_of_perform_country_c='UK', record_type=2,
correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
|
#!/opt/local/bin/pyuhon
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import datetime
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
import sys
from mpl_toolkits.basemap import Basemap
import os
import calendar
#assim_out="assim_out_E2O_womc"
#assim_out="assim_out_E2O_wmc"
#assim_out="assim_out_biased_womc"
#assim_out="assim_out_ECMWF_womc_baised_if_fixed1.10"
#sys.path.append('../'+assim_out+'/')
os.system("ln -sf ../gosh/params.py params.py")
import params as pm
experiment="E2O_wmc_06"
#assim_out=pm.DA_dir()+"/out/"+pm.experiment()+"/assim_out"
assim_out=pm.DA_dir()+"/out/"+experiment+"/assim_out"
print assim_out
#----
def mk_dir(sdir):
try:
os.makedirs(sdir)
except:
pass
#----
mk_dir(assim_out+"/fig")
mk_dir(assim_out+"/fig/AI")
#----
#argvs = sys.argv
year=2004
month=1
date=1
start_dt=datetime.date(year,month,date)
size=60
south= -90
north= 90
west= -180
east= 180
land="#FFFFFF"
water="#C0C0C0"
londiff=(east-west)*4
latdiff=(north-south)*4
npix=(90-north)*4
spix=(90-south)*4
wpix=(180+west)*4
epix=(180+east)*4
#lastday=int(argvs[1])
lastday=365 #int(argvs[1])
if calendar.isleap(year):
lastday=366
else:
lastday=365
N=lastday
#--
# run calc_stat.py to create the statistical maps
ratio=np.fromfile(assim_out+"/stat/annualmeanAI.bin",np.float32).reshape(720,1440)
###ratio=np.zeros((spix-npix)*(epix-wpix)).reshape([spix-npix,epix-wpix])
###count=np.zeros((spix-npix)*(epix-wpix)).reshape([spix-npix,epix-wpix])
#*******************************************************
###if os.path.exists("../"+assim_out+"/img/AImap/annualmeanAI.bin"):
### ratio=np.fromfile("../"+assim_out+"/img/AImap/annualmeanAI.bin",np.float32).reshape(720,1440)
###else:
### for day in np.arange(0,lastday):
### #for day in np.arange(100,110):
### # analyse date
### target_dt=start_dt+datetime.timedelta(days=day)
### yyyy='%04d' % (target_dt.year)
### mm='%02d' % (target_dt.month)
### dd='%02d' % (target_dt.day)
### print yyyy,mm,dd
###
### # next day name
### next_dt=start_dt+datetime.timedelta(days=day+1)
### nxt_yyyy='%04d' % (next_dt.year)
### nxt_mm='%02d' % (next_dt.month)
### nxt_dd='%02d' % (next_dt.day)
###
### # True Discharge
### fname="../assim_out/rivout/true/rivout"+yyyy+mm+dd+".bin"
### org=np.fromfile(fname,np.float32).reshape([720,1440])
###
### # open loop
### opn=[]
### for num in np.arange(1,pm.ens_mem()+1):
### numch = "%03d" % num
### fname = "../assim_out/rivout/open/rivout"+yyyy+mm+dd+"_"+numch+".bin"
### opn.append(np.fromfile(fname,np.float32).reshape([720,1440]))
### opn = np.array(opn)
### opn_mean=np.mean(opn,axis=0)
###
### # assimilated
### asm=[]
### for num in np.arange(1,pm.ens_mem()+1):
### numch = "%03d" % num
### fname = "../assim_out/rivout/assim/rivout"+yyyy+mm+dd+"_"+numch+".bin"
### asm.append(np.fromfile(fname,np.float32).reshape([720,1440]))
### asm = np.array(asm)
### asm_mean=np.mean(asm,axis=0)
###
### # assimilation index 計算
### #ai=1-abs((asm_mean[npix:spix,wpix:epix]-opn_mean[npix:spix,wpix:epix])/(org[npix:spix,wpix:epix]-opn_mean[npix:spix,wpix:epix]+1e-20)-1)
### ai=1.- np.absolute((asm_mean[npix:spix,wpix:epix]-opn_mean[npix:spix,wpix:epix])/((org[npix:spix,wpix:epix]-opn_mean[npix:spix,wpix:epix])+1e-20)-1.)
### # read restart file for making ocean mask
### #fname = "../CaMa_in/restart/true/restart" + nxt_yyyy + nxt_mm + nxt_dd + "T.bin"
### fname = pm.CaMa_dir()+"/map/global_15min/rivout.bin"
### trueforo = np.fromfile(fname,np.float32).reshape([2,720,1440])
### # ocean [ 0:ocean, 1:not ocean ]
### #ocean = (trueforo[0,npix:spix,wpix:epix]<1e18) * 1
###
### # river [ 0:not river, 1:river ]
### river = (trueforo[0,npix:spix,wpix:epix]>500.) * 1
###
### # error < 10%
### error=((np.absolute(org[npix:spix,wpix:epix]-opn_mean[npix:spix,wpix:epix])/(org[npix:spix,wpix:epix]+1e-20))>0.1)*(1)
### error=np.nan_to_num(error)
### #--
### river = river*error
###
### # ratio
### ratio_n = ai * river #* ocean
### ratio_n = np.ma.fix_invalid(ratio_n,fill_value=0.0)
### ratio = ratio + (ratio_n<0)*0+(ratio_n>1)*1+(ratio_n>=0)*(ratio_n<=1)*ratio_n
### count = count + river #* ocean
###
### ratio = ratio / (count.astype(np.float32)+1.0e-20)
### river = (trueforo[0,npix:spix,wpix:epix]>500.) * 1
### ratio = ratio * river
###
### ai = ai * river
# assim/true
plt.close()
cmap=cm.viridis_r
cmap.set_under("w",alpha=0)
resol=1
plt.figure(figsize=(7*resol,3*resol))
m = Basemap(projection='cyl',llcrnrlat=south,urcrnrlat=north,llcrnrlon=west,urcrnrlon=east, lat_ts=0,resolution='c')
#m.drawcoastlines( linewidth=0.3, color='k' )
m.fillcontinents(color=land,lake_color=water)
m.drawmapboundary(fill_color=water)
m.drawparallels(np.arange(south,north+0.1,20), labels = [1,0,0,0], fontsize=10,linewidth=0.1)
m.drawmeridians(np.arange(west,east+0.1,40), labels = [0,0,0,1], fontsize=10,linewidth=0.1)
fname = pm.CaMa_dir()+"/map/glb_15min/outclm.bin"
trueforo = np.fromfile(fname,np.float32).reshape([2,720,1440])[0]
river=(trueforo>100.)*1.0
ratio=np.ma.fix_invalid(ratio).data
ratio=ratio*river
data=ratio[npix:spix,wpix:epix]
im = m.imshow(np.flipud(data),vmin=1e-20, vmax=1,interpolation="nearest",cmap=cmap,zorder=100)
#im = m.imshow(np.flipud(ai),vmin=1e-20, vmax=1,interpolation="nearest",cmap=cmap,zorder=100)
cbar=m.colorbar(im,"right",size="2%")
cbar.set_label("annual mean AI")
plt.title("annual mean Assimilation Index ")#+yyyy+"-"+mm+"-"+dd)
plt.savefig(assim_out+"/fig/AI/AImap.png",dpi=300,bbox_inches="tight", pad_inches=0.05)
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import ClassVar
import warnings
@dataclass(init=False, repr=False, frozen=True)
class Channel:
"""Base class of a hardware channel.
Not to be initialized itself, but rather through a child class and the
``Local`` or ``Global`` classmethods.
Attributes:
name: The name of channel.
basis: The addressed basis name.
addressing: "Local" or "Global".
max_abs_detuning: Maximum possible detuning (in rad/µs), in absolute
value.
max_amp: Maximum pulse amplitude (in rad/µs).
retarget_time: Maximum time to change the target (in ns).
max_targets: How many qubits can be addressed at once by the same beam.
clock_period: The duration of a clock cycle (in ns). The duration of a
pulse or delay instruction is enforced to be a multiple of the
clock cycle.
min_duration: The shortest duration an instruction can take.
max_duration: The longest duration an instruction can take.
Example:
To create a channel targeting the 'ground-rydberg' transition globally,
call ``Rydberg.Global(...)``.
"""
name: ClassVar[str]
basis: ClassVar[str]
addressing: str
max_abs_detuning: float
max_amp: float
retarget_time: int = None
max_targets: int = 1
clock_period: int = 4 # ns
min_duration: int = 16 # ns
max_duration: int = 67108864 # ns
@classmethod
def Local(cls, max_abs_detuning, max_amp, retarget_time=220, **kwargs):
"""Initializes the channel with local addressing.
Args:
max_abs_detuning (float): Maximum possible detuning (in rad/µs), in
absolute value.
max_amp(float): Maximum pulse amplitude (in rad/µs).
retarget_time (int): Maximum time to change the target (in ns).
"""
return cls('Local', max_abs_detuning, max_amp,
retarget_time=retarget_time, **kwargs)
@classmethod
def Global(cls, max_abs_detuning, max_amp, **kwargs):
"""Initializes the channel with global addressing.
Args:
max_abs_detuning (float): Maximum possible detuning (in rad/µs), in
absolute value.
max_amp(float): Maximum pulse amplitude (in rad/µs).
"""
return cls('Global', max_abs_detuning, max_amp, **kwargs)
def validate_duration(self, duration):
"""Validates and adapts the duration of an instruction on this channel.
Args:
duration (int): The duration to validate.
"""
try:
_duration = int(duration)
except (TypeError, ValueError):
raise TypeError("duration needs to be castable to an int but "
"type %s was provided" % type(duration))
if duration < self.min_duration:
raise ValueError("duration has to be at least "
+ f"{self.min_duration} ns.")
if duration > self.max_duration:
raise ValueError("duration can be at most "
+ f"{self.max_duration} ns.")
if duration % self.clock_period != 0:
_duration += self.clock_period - _duration % self.clock_period
warnings.warn(f"A duration of {duration} ns is not a multiple of "
f"the channel's clock period ({self.clock_period} "
f"ns). It was rounded up to {_duration} ns.")
return _duration
def __repr__(self):
s = ".{}(Max Absolute Detuning: {} rad/µs, Max Amplitude: {} rad/µs"
config = s.format(self.addressing, self.max_abs_detuning, self.max_amp)
if self.addressing == 'Local':
config += f", Target time: {self.retarget_time} ns"
if self.max_targets > 1:
config += f", Max targets: {self.max_targets}"
config += f", Basis: '{self.basis}'"
return self.name + config + ")"
@dataclass(init=True, repr=False, frozen=True)
class Raman(Channel):
"""Raman beam channel.
Channel targeting the transition between the hyperfine ground states, in
which the 'digital' basis is encoded. See base class.
"""
name: ClassVar[str] = 'Raman'
basis: ClassVar[str] = 'digital'
@dataclass(init=True, repr=False, frozen=True)
class Rydberg(Channel):
"""Rydberg beam channel.
Channel targeting the transition between the ground and rydberg states,
thus enconding the 'ground-rydberg' basis. See base class.
"""
name: ClassVar[str] = 'Rydberg'
basis: ClassVar[str] = 'ground-rydberg'
@dataclass(init=True, repr=False, frozen=True)
class Microwave(Channel):
"""Microwave adressing channel.
Channel targeting the transition between two rydberg states, thus encoding
the 'XY' basis. See base class.
"""
name: ClassVar[str] = 'Microwave'
basis: ClassVar[str] = 'XY'
|
"""Views for the wordrelaygame app."""
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.views.generic import View, DetailView, ListView
from .forms import WordForm
from .models import Story
class HomeView(DetailView):
"""Main view that displays the current story & information about the game.
Shows the current story and information about the game. If there is a user
logged in and it is their turn, it show a form to add a word to the story.
"""
context_object_name = 'latest_story'
model = Story
template_name = 'wordrelaygame/home.html'
def get_object(self, queryset=None):
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
return None
else:
return latest_story
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Only pass the form to the context if the current user is different
# to the user that wrote the last word of the story.
try:
latest_word_auth_id = (self.object.words.order_by('-id')[0].
author.id)
except (AttributeError, IndexError):
latest_word_auth_id = None
if(kwargs.get('current_user_id') != latest_word_auth_id or
latest_word_auth_id is None):
context['form'] = WordForm()
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object() # pylint: disable=locally-disabled, W0201
if request.user.is_authenticated:
current_user_id = request.user.id
else:
current_user_id = None
context = self.get_context_data(object=self.object,
current_user_id=current_user_id)
return self.render_to_response(context)
class StoryListView(ListView):
"""Show an archive of past stories."""
model = Story
paginate_by = 5
def get_queryset(self):
queryset = super().get_queryset()
return queryset.order_by('-date_created')
class AddWordView(LoginRequiredMixin, View):
"""Add a word to the latest story."""
http_method_names = ['post']
def post(self, request):
"""Handles the POST request to add a word to the latest story."""
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
messages.error(request, 'You need to create a story to add a word.')
return redirect('wordrelaygame:home')
# Check the author of the previous word is different to the current
# logged in user.
try:
latest_word_auth_id = (latest_story.words.order_by('-id')[0].
author.id)
except IndexError:
latest_word_auth_id = None
if latest_word_auth_id == self.request.user.id:
messages.error(request, 'You added the last word. ' +
'Someone else needs to add a word next.')
return redirect('wordrelaygame:home')
# If the form is valid, save the new word
form = WordForm(request.POST)
if form.is_valid():
word = form.save(commit=False)
word.story = latest_story
word.author = self.request.user
word.save()
messages.success(request, 'Your word as been added. Thanks!')
return redirect('wordrelaygame:home')
return render(request, 'wordrelaygame/home.html',
{'form': form, 'latest_story': latest_story})
class AddStoryView(LoginRequiredMixin, View):
"""Create a new story.
Only allow the creation of a new story if there are no stories or if the
latest stories contains at least 64 words.
"""
http_method_names = ['post']
def post(self, request):
"""Handles the POST request to add a new story."""
add_story_allowed = False
try:
latest_story = Story.objects.latest('date_created')
except Story.DoesNotExist:
add_story_allowed = True
else:
if latest_story.words.count() > 64:
add_story_allowed = True
if add_story_allowed:
new_story = Story()
new_story.save()
messages.success(
request,
'A new story has been created. Now add the first word.'
)
else:
messages.error(
request,
('Failed to create new story. Add more '
'words to the current story instead.')
)
return redirect('wordrelaygame:home')
|
from CreateTimeGraphs import *
def create_diag(dc):
"""Time spent on TeamSpeak per user"""
globalTime = timedelta()
for u in dc.users:
# Time in seconds
u.time = timedelta()
for con in u.connections:
# Increase connected time
u.time += con.duration()
us = sorted(dc.users, key = lambda u: -u.time)
for u in us:
globalTime += u.time
us = us[:maxUsers]
# Create users graph
with openTempfile("usertime") as f:
for u in us:
# Time in days
f.write('"{0}"\t{1}\n'.format(gnuplotEscape(u.name), u.time / timedelta(days = 1)))
# Create the diagram
diag = Diagram("usertime", "Time spent on TeamSpeak", 1920, 800)
diag.xlabel = "User"
diag.ylabel = "Connection time (in days)"
diag.legend = "right"
diag.appendText = """\
set timefmt "%H:%M:%S"
set format x "%H:%M:%S"
set yrange [0:]
set xtics rotate by -90
set style histogram clustered gap 4
set boxwidth 0.8 relative
"""
diag.plots.append("using 0:2:xticlabels(1) title 'Time' with boxes")
diag.subtitle = "Sum of all time spent on this server: {0}".format(timeToString(globalTime))
diag.render(dc.diagramTemplate)
dc.generalTab.addDiagram(diag)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.