text
stringlengths 2
999k
|
|---|
import sys
if (sys.version_info >= (2,7)):
import unittest
else:
import unittest2 as unittest
from pydevtest_common import assertiCmd, assertiCmdFail, getiCmdOutput, create_local_testfile, get_hostname, RUN_IN_TOPOLOGY, get_irods_config_dir, get_irods_top_level_dir
import pydevtest_sessions as s
from resource_suite import ResourceSuite, ShortAndSuite
from test_chunkydevtest import ChunkyDevTest
import socket
import os
import commands
import shutil
import subprocess
import re
if( RUN_IN_TOPOLOGY==True ):
hostname1 = "resource1.example.org"
hostname2 = "resource2.example.org"
hostname3 = "resource3.example.org"
else:
hostname = socket.gethostname()
hostname1 = hostname
hostname2 = hostname
hostname3 = hostname
class Test_Random_within_Replication_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc rrResc random",
"iadmin mkresc unixA 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unixAVault",
"iadmin mkresc unixB1 'unix file system' "+hostname2+":" + get_irods_top_level_dir() +"/unixB1Vault",
"iadmin mkresc unixB2 'unix file system' "+hostname3+":" + get_irods_top_level_dir() +"/unixB2Vault",
"iadmin addchildtoresc demoResc rrResc",
"iadmin addchildtoresc demoResc unixA",
"iadmin addchildtoresc rrResc unixB1",
"iadmin addchildtoresc rrResc unixB2",
],
"teardown" : [
"iadmin rmchildfromresc rrResc unixB2",
"iadmin rmchildfromresc rrResc unixB1",
"iadmin rmchildfromresc demoResc unixA",
"iadmin rmchildfromresc demoResc rrResc",
"iadmin rmresc unixB1",
"iadmin rmresc unixB2",
"iadmin rmresc unixA",
"iadmin rmresc rrResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unixB2Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixB1Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixAVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once - replica 1
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource repl 0 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource repl 0 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource repl 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource 1 should have double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) #
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
class Test_RoundRobin_within_Replication_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc rrResc roundrobin",
"iadmin mkresc unixA 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unixAVault",
"iadmin mkresc unixB1 'unix file system' "+hostname2+":" + get_irods_top_level_dir() +"/unixB1Vault",
"iadmin mkresc unixB2 'unix file system' "+hostname3+":" + get_irods_top_level_dir() +"/unixB2Vault",
"iadmin addchildtoresc demoResc rrResc",
"iadmin addchildtoresc demoResc unixA",
"iadmin addchildtoresc rrResc unixB1",
"iadmin addchildtoresc rrResc unixB2",
],
"teardown" : [
"iadmin rmchildfromresc rrResc unixB2",
"iadmin rmchildfromresc rrResc unixB1",
"iadmin rmchildfromresc demoResc unixA",
"iadmin rmchildfromresc demoResc rrResc",
"iadmin rmresc unixB1",
"iadmin rmresc unixB2",
"iadmin rmresc unixA",
"iadmin rmresc rrResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unixB2Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixB1Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixAVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once - replica 1
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource repl 0 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource repl 0 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource repl 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource 1 should have double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) #
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
class Test_UnixFileSystem_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc 'unix file system' "+hostname+":" + get_irods_top_level_dir() + "/demoRescVault",
],
"teardown" : [
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/demoRescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
class Test_Passthru_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc passthru",
"iadmin mkresc unix1Resc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin addchildtoresc demoResc unix1Resc",
],
"teardown" : [
"iadmin rmchildfromresc demoResc unix1Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
class Test_Deferred_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc deferred",
"iadmin mkresc unix1Resc 'unixfilesystem' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin addchildtoresc demoResc unix1Resc",
],
"teardown" : [
"iadmin rmchildfromresc demoResc unix1Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
class Test_Random_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc random",
"iadmin mkresc unix1Resc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin mkresc unix2Resc 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/unix2RescVault",
"iadmin mkresc unix3Resc 'unix file system' "+hostname3+":" + get_irods_top_level_dir() + "/unix3RescVault",
"iadmin addchildtoresc demoResc unix1Resc",
"iadmin addchildtoresc demoResc unix2Resc",
"iadmin addchildtoresc demoResc unix3Resc",
],
"teardown" : [
"iadmin rmchildfromresc demoResc unix3Resc",
"iadmin rmchildfromresc demoResc unix2Resc",
"iadmin rmchildfromresc demoResc unix1Resc",
"iadmin rmresc unix3Resc",
"iadmin rmresc unix2Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix2RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix3RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
class Test_NonBlocking_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc nonblocking "+hostname1+":" + get_irods_top_level_dir() + "/nbVault",
],
"teardown" : [
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
class Test_Compound_with_MockArchive_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc compound",
"iadmin mkresc cacheResc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/cacheRescVault",
"iadmin mkresc archiveResc mockarchive "+hostname1+":" + get_irods_top_level_dir() + "/archiveRescVault univMSSInterface.sh",
"iadmin addchildtoresc demoResc cacheResc cache",
"iadmin addchildtoresc demoResc archiveResc archive",
],
"teardown" : [
"iadmin rmchildfromresc demoResc archiveResc",
"iadmin rmchildfromresc demoResc cacheResc",
"iadmin rmresc archiveResc",
"iadmin rmresc cacheResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/archiveRescVault",
"rm -rf " + get_irods_top_level_dir() + "/cacheRescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("NOTSURE / FIXME ... -K not supported, perhaps")
def test_local_iput_checksum(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) #
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource cache should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource archive should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once - replica 1
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed only once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should not be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed twice - 1 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_Compound_with_UniversalMSS_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc compound",
"iadmin mkresc cacheResc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/cacheRescVault",
"iadmin mkresc archiveResc univmss "+hostname1+":" + get_irods_top_level_dir() + "/archiveRescVault univMSSInterface.sh",
"iadmin addchildtoresc demoResc cacheResc cache",
"iadmin addchildtoresc demoResc archiveResc archive",
],
"teardown" : [
"iadmin rmchildfromresc demoResc archiveResc",
"iadmin rmchildfromresc demoResc cacheResc",
"iadmin rmresc archiveResc",
"iadmin rmresc cacheResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/archiveRescVault",
"rm -rf " + get_irods_top_level_dir() + "/cacheRescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) #
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource cache should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource archive should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once - replica 1
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed only once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should not be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed twice - 1 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_Compound_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc compound",
"iadmin mkresc cacheResc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/cacheRescVault",
"iadmin mkresc archiveResc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/archiveRescVault",
"iadmin addchildtoresc demoResc cacheResc cache",
"iadmin addchildtoresc demoResc archiveResc archive",
],
"teardown" : [
"iadmin rmchildfromresc demoResc archiveResc",
"iadmin rmchildfromresc demoResc cacheResc",
"iadmin rmresc archiveResc",
"iadmin rmresc cacheResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/archiveRescVault",
"rm -rf " + get_irods_top_level_dir() + "/cacheRescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
@unittest.skip("TEMPORARY")
def test_iget_prefer_from_archive__ticket_1660(self):
# define core.re filepath
corefile = get_irods_config_dir() + "/core.re"
backupcorefile = corefile+"--"+self._testMethodName
# new file to put and get
filename = "archivepolicyfile.txt"
filepath = create_local_testfile(filename)
# manipulate core.re (leave as 'when_necessary' - default)
# put the file
assertiCmd(s.adminsession,"iput "+filename) # put file
# manually update the replica in archive vault
output = getiCmdOutput(s.adminsession,"ils -L "+filename)
archivereplicaphypath = output[0].split()[-1] # split into tokens, get the last one
myfile = open(archivereplicaphypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE\n')
myfile.close()
# get file
retrievedfile = "retrieved.txt"
os.system("rm -f %s" % retrievedfile)
assertiCmd(s.adminsession,"iget -f %s %s" % (filename, retrievedfile)) # get file from cache
# confirm retrieved file is same as original
assert 0 == os.system("diff %s %s" % (filepath, retrievedfile))
# manipulate the core.re to add the new policy
shutil.copy(corefile,backupcorefile)
myfile = open(corefile, "a")
myfile.write('pep_resource_resolve_hierarchy_pre(*OUT){*OUT="compound_resource_cache_refresh_policy=always";}\n')
myfile.close()
# restart the server to reread the new core.re
os.system(get_irods_top_level_dir() + "/iRODS/irodsctl stop")
os.system(get_irods_top_level_dir() + "/tests/zombiereaper.sh")
os.system(get_irods_top_level_dir() + "/iRODS/irodsctl start")
# manually update the replica in archive vault
output = getiCmdOutput(s.adminsession,"ils -L "+filename)
archivereplicaphypath = output[0].split()[-1] # split into tokens, get the last one
myfile = open(archivereplicaphypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE **AGAIN**\n')
myfile.close()
# get the file
assertiCmd(s.adminsession,"iget -f %s %s" % (filename, retrievedfile)) # get file from archive
# confirm this is the new archive file
matchfound = False
for line in open(retrievedfile):
if "**AGAIN**" in line:
matchfound = True
assert matchfound
# restore the original core.re
shutil.copy(backupcorefile,corefile)
os.remove(backupcorefile)
# local cleanup
os.remove(filepath)
os.remove(retrievedfile)
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # debugging
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource cache should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource archive should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once - replica 1
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed only once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # should be listed
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed once
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should not be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed twice - 1 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_Replication_within_Replication_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc replResc replication",
"iadmin mkresc unixA 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unixAVault",
"iadmin mkresc unixB1 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/unixB1Vault",
"iadmin mkresc unixB2 'unix file system' "+hostname3+":" + get_irods_top_level_dir() + "/unixB2Vault",
"iadmin addchildtoresc demoResc replResc",
"iadmin addchildtoresc demoResc unixA",
"iadmin addchildtoresc replResc unixB1",
"iadmin addchildtoresc replResc unixB2",
],
"teardown" : [
"iadmin rmchildfromresc replResc unixB2",
"iadmin rmchildfromresc replResc unixB1",
"iadmin rmchildfromresc demoResc unixA",
"iadmin rmchildfromresc demoResc replResc",
"iadmin rmresc unixB1",
"iadmin rmresc unixB2",
"iadmin rmresc unixA",
"iadmin rmresc replResc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unixB2Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixB1Vault",
"rm -rf " + get_irods_top_level_dir() + "/unixAVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # replica 0 should be trimmed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # replica 1 should be listed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # replica 2 should be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # replica 0 should be trimmed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # replica 1 should be listed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # replica 2 should be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # replica 0 should be trimmed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # replica 1 should be listed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # replica 2 should be listed
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # replica 2 should be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # default resource should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," & "+filename]) # default resource should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should not have a replica 4
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 4
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from grid
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["1 ",self.testfile]) # replica 1 should be there
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["2 ",self.testfile]) # replica 2 should be there
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["3 "+self.testresc,self.testfile]) # replica 3 should be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename)
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+filename]) # default resource should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
class Test_Replication_to_two_Compound_Resources(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc compResc1 compound",
"iadmin mkresc compResc2 compound",
"iadmin mkresc cacheResc1 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/cacheResc1Vault",
"iadmin mkresc archiveResc1 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/archiveResc1Vault",
"iadmin mkresc cacheResc2 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/cacheResc2Vault",
"iadmin mkresc archiveResc2 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/archiveResc2Vault",
"iadmin addchildtoresc demoResc compResc1",
"iadmin addchildtoresc demoResc compResc2",
"iadmin addchildtoresc compResc1 cacheResc1 cache",
"iadmin addchildtoresc compResc1 archiveResc1 archive",
"iadmin addchildtoresc compResc2 cacheResc2 cache",
"iadmin addchildtoresc compResc2 archiveResc2 archive",
],
"teardown" : [
"iadmin rmchildfromresc compResc2 archiveResc2",
"iadmin rmchildfromresc compResc2 cacheResc2",
"iadmin rmchildfromresc compResc1 archiveResc1",
"iadmin rmchildfromresc compResc1 cacheResc1",
"iadmin rmchildfromresc demoResc compResc2",
"iadmin rmchildfromresc demoResc compResc1",
"iadmin rmresc archiveResc2",
"iadmin rmresc cacheResc2",
"iadmin rmresc archiveResc1",
"iadmin rmresc cacheResc1",
"iadmin rmresc compResc2",
"iadmin rmresc compResc1",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/archiveResc1Vault",
"rm -rf " + get_irods_top_level_dir() + "/cacheResc1Vault",
"rm -rf " + get_irods_top_level_dir() + "/archiveResc2Vault",
"rm -rf " + get_irods_top_level_dir() + "/cacheResc2Vault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["4 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
@unittest.skipIf( RUN_IN_TOPOLOGY==True, "Skip for Topology Testing")
def test_iget_prefer_from_archive__ticket_1660(self):
# define core.re filepath
corefile = get_irods_config_dir() + "/core.re"
backupcorefile = corefile+"--"+self._testMethodName
# new file to put and get
filename = "archivepolicyfile.txt"
filepath = create_local_testfile(filename)
# manipulate core.re (leave as 'when_necessary' - default)
# put the file
assertiCmd(s.adminsession,"iput "+filename) # put file
# manually update the replicas in archive vaults
output = getiCmdOutput(s.adminsession,"ils -L "+filename)
print output[0]
archive1replicaphypath = output[0].split()[-21] # split into tokens, get the 21st from the end
archive2replicaphypath = output[0].split()[-1] # split into tokens, get the last one
print archive1replicaphypath
print archive2replicaphypath
myfile = open(archive1replicaphypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE 1\n')
myfile.close()
myfile = open(archive2replicaphypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE 2\n')
myfile.close()
# get file
retrievedfile = "retrieved.txt"
os.system("rm -f %s" % retrievedfile)
assertiCmd(s.adminsession,"iget -f %s %s" % (filename, retrievedfile)) # get file from cache
# confirm retrieved file is same as original
assert 0 == os.system("diff %s %s" % (filepath, retrievedfile))
print "original file diff confirmed"
# manipulate the core.re to add the new policy
shutil.copy(corefile,backupcorefile)
myfile = open(corefile, "a")
myfile.write('pep_resource_resolve_hierarchy_pre(*OUT){*OUT="compound_resource_cache_refresh_policy=always";}\n')
myfile.close()
# restart the server to reread the new core.re
os.system(get_irods_top_level_dir() + "/iRODS/irodsctl restart")
# manually update the replicas in archive vaults
output = getiCmdOutput(s.adminsession,"ils -L "+filename)
archivereplica1phypath = output[0].split()[-21] # split into tokens, get the 21st from the end
archivereplica2phypath = output[0].split()[-1] # split into tokens, get the last one
print archive1replicaphypath
print archive2replicaphypath
myfile = open(archivereplica1phypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE 1 **AGAIN**\n')
myfile.close()
myfile = open(archivereplica2phypath, "w")
myfile.write('MANUALLY UPDATED ON ARCHIVE 2 **AGAIN**\n')
myfile.close()
# confirm the new content is on disk
for line in open(archivereplica1phypath):
print line
for line in open(archivereplica2phypath):
print line
# confirm the core file has new policy
print "----- confirm core has new policy ----"
for line in open(corefile):
if "pep_" in line:
print line
else:
print ".",
print "----- confirmation done ----"
# get the file
assertiCmd(s.adminsession,"iget -V -f %s %s" % (filename, retrievedfile), "LIST", "NOTICE") # get file from archive
# confirm this is the new archive file
matchfound = False
for line in open(retrievedfile):
print line
if "AGAIN" in line:
matchfound = True
assert matchfound
# restore the original core.re
shutil.copy(backupcorefile,corefile)
os.remove(backupcorefile)
# local cleanup
os.remove(filepath)
# os.remove(retrievedfile)
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # debugging
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache 1 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive 1 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+filename]) # default resource cache 2 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+filename]) # default resource archive 2 should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache 1 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive 1 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," "+filename]) # default resource cache 2 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" "," "+filename]) # default resource archive 2 should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should not have a replica 6
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 7 "," & "+filename]) # should not have a replica 7
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource cache 1 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource archive 1 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # default resource cache 2 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," & "+filename]) # default resource cache 2 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # default resource archive 2 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" "," & "+filename]) # default resource archive 2 should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 3x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 3x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 3x - replica 3
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 3x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 3x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 3x - replica 3
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 4x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 4x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 4x - replica 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should be listed 4x - replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_Replication_to_two_Compound_Resources_with_Prefer_Archive(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc compResc1 compound",
"iadmin mkresc compResc2 compound",
"iadmin mkresc cacheResc1 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/cacheResc1Vault",
"iadmin mkresc archiveResc1 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/archiveResc1Vault",
"iadmin mkresc cacheResc2 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/cacheResc2Vault",
"iadmin mkresc archiveResc2 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/archiveResc2Vault",
"iadmin addchildtoresc demoResc compResc1",
"iadmin addchildtoresc demoResc compResc2",
"iadmin addchildtoresc compResc1 cacheResc1 cache",
"iadmin addchildtoresc compResc1 archiveResc1 archive",
"iadmin addchildtoresc compResc2 cacheResc2 cache",
"iadmin addchildtoresc compResc2 archiveResc2 archive",
],
"teardown" : [
"iadmin rmchildfromresc compResc2 archiveResc2",
"iadmin rmchildfromresc compResc2 cacheResc2",
"iadmin rmchildfromresc compResc1 archiveResc1",
"iadmin rmchildfromresc compResc1 cacheResc1",
"iadmin rmchildfromresc demoResc compResc2",
"iadmin rmchildfromresc demoResc compResc1",
"iadmin rmresc archiveResc2",
"iadmin rmresc cacheResc2",
"iadmin rmresc archiveResc1",
"iadmin rmresc cacheResc1",
"iadmin rmresc compResc2",
"iadmin rmresc compResc1",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/archiveResc1Vault",
"rm -rf " + get_irods_top_level_dir() + "/cacheResc1Vault",
"rm -rf " + get_irods_top_level_dir() + "/archiveResc2Vault",
"rm -rf " + get_irods_top_level_dir() + "/cacheResc2Vault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
# back up core file
corefile = get_irods_config_dir() + "/core.re"
backupcorefile = corefile+"--"+self._testMethodName
shutil.copy(corefile,backupcorefile)
# manipulate the core.re to add the new policy
myfile = open(corefile, "a")
myfile.write('pep_resource_resolve_hierarchy_pre(*OUT){*OUT="compound_resource_cache_refresh_policy=always";}\n')
myfile.close()
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
# restore the original core.re
corefile = get_irods_config_dir() + "/core.re"
backupcorefile = corefile+"--"+self._testMethodName
shutil.copy(backupcorefile,corefile)
os.remove(backupcorefile)
def test_irm_specific_replica(self):
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+self.testfile) # creates replica
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",self.testfile) # should be listed twice
assertiCmd(s.adminsession,"irm -n 0 "+self.testfile) # remove original from cacheResc only
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",["4 "+self.testresc,self.testfile]) # replica 2 should still be there
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica 0 should be gone
trashpath = "/"+s.adminsession.getZoneName()+"/trash/home/"+s.adminsession.getUserName()+"/"+s.adminsession.sessionId
assertiCmdFail(s.adminsession,"ils -L "+trashpath+"/"+self.testfile,"LIST",["0 "+s.adminsession.getDefResource(),self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
@unittest.skip("this is tested elsewhere")
def test_iget_prefer_from_archive__ticket_1660(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # debugging
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource cache 1 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource archive 1 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+filename]) # default resource cache 2 should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+filename]) # default resource archive 2 should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource cache 1 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource archive 1 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," "+filename]) # default resource cache 2 should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" "," "+filename]) # default resource archive 2 should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should not have a replica 6
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 7 "," & "+filename]) # should not have a replica 7
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource cache 1 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource archive 1 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # default resource cache 2 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," & "+filename]) # default resource cache 2 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # default resource archive 2 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" "," & "+filename]) # default resource archive 2 should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 5
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 3x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 3x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 3x - replica 3
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 3x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 3x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 3x - replica 3
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 4x - replica 1
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 4x - replica 2
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 4x - replica 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 ",filename]) # should be listed 4x - replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 ",filename]) # should not have any extra replicas
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_RoundRobin_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc roundrobin",
"iadmin mkresc unix1Resc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin mkresc unix2Resc 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/unix2RescVault",
"iadmin addchildtoresc demoResc unix1Resc",
"iadmin addchildtoresc demoResc unix2Resc",
],
"teardown" : [
"iadmin rmchildfromresc demoResc unix2Resc",
"iadmin rmchildfromresc demoResc unix1Resc",
"iadmin rmresc unix2Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix2RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_round_robin_mechanism(self):
# local setup
filename = "rrfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
assertiCmd( s.sessions[1],"iput "+filename+" file0.txt" );
assertiCmd( s.sessions[1],"iput "+filename+" file1.txt" );
assertiCmd( s.sessions[1],"ils -l", "LIST", "unix1Resc" );
assertiCmd( s.sessions[1],"ils -l", "LIST", "unix2Resc" );
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_Replication_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc replication",
"iadmin mkresc unix1Resc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin mkresc unix2Resc 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/unix2RescVault",
"iadmin mkresc unix3Resc 'unix file system' "+hostname3+":" + get_irods_top_level_dir() + "/unix3RescVault",
"iadmin addchildtoresc demoResc unix1Resc",
"iadmin addchildtoresc demoResc unix2Resc",
"iadmin addchildtoresc demoResc unix3Resc",
],
"teardown" : [
"iadmin rmchildfromresc demoResc unix3Resc",
"iadmin rmchildfromresc demoResc unix2Resc",
"iadmin rmchildfromresc demoResc unix1Resc",
"iadmin rmresc unix3Resc",
"iadmin rmresc unix2Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix2RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix3RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
def test_irm_specific_replica(self):
# not allowed here - this is a managed replication resource
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",[" 0 "," & "+self.testfile]) # should be listed 3x
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",[" 1 "," & "+self.testfile]) # should be listed 3x
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",[" 2 "," & "+self.testfile]) # should be listed 3x
assertiCmd(s.adminsession,"irm -n 1 "+self.testfile) # try to remove one of the managed replicas
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",[" 0 "," & "+self.testfile]) # should be listed 2x
assertiCmdFail(s.adminsession,"ils -L "+self.testfile,"LIST",[" 1 "," & "+self.testfile]) # should not be listed
assertiCmd(s.adminsession,"ils -L "+self.testfile,"LIST",[" 2 "," & "+self.testfile]) # should be listed 2x
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename)
assertiCmd(s.adminsession,"iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+filename]) # default resource should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+filename]) # default resource should have dirty copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+filename]) # default resource should have dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," "+filename]) # default resource should not have doublesize file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, "LIST", "Creating") # create fourth resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl -R fourthresc "+filename) # replicate to fourth resource
assertiCmd(s.adminsession,"iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession, "ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession,"irepl -U "+filename) # update last replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a dirty copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession, "ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a dirty copy
assertiCmd(s.adminsession, "ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irepl -aU "+filename) # update all replicas
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should have a clean copy
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
assertiCmd(s.adminsession,"iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = create_local_testfile(filename)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate to default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 3
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = create_local_testfile(filename)
hostname = get_hostname()
# assertions
assertiCmd(s.adminsession,"iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, "LIST", "Creating") # create third resource
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate to third resource
assertiCmd(s.adminsession,"irepl "+filename) # replicate overtop default resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R thirdresc "+filename) # replicate overtop third resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 5 "," & "+filename]) # should not have a replica 4
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 6 "," & "+filename]) # should not have a replica 5
assertiCmd(s.adminsession,"irm -f "+filename) # cleanup file
assertiCmd(s.adminsession,"iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
assertiCmd(s.adminsession,"ils -L "+filename,"ERROR","does not exist") # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate to test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",filename) # for debugging
assertiCmd(s.adminsession,"iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," & "+filename]) # default resource 1 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 0 "," "+doublesize+" "," & "+filename]) # default resource 1 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," & "+filename]) # default resource 2 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 "," "+doublesize+" "," & "+filename]) # default resource 2 should have new double clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," & "+filename]) # default resource 3 should have clean copy
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 "," "+doublesize+" "," & "+filename]) # default resource 3 should have new double clean copy
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 3 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 4 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput --purgec "+filename) # put file, but trim 'cache' copy (purgec) (backwards compatibility)
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed first replica)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - replica 2 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed twice - replica 3 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"iget -f --purgec "+filename) # get file and purge 'cached' replica
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed twice - 2 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed twice - 2 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",filename) # should not be listed
assertiCmd(s.adminsession,"iput "+filename) # put file
assertiCmd(s.adminsession,"irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
assertiCmdFail(s.adminsession,"ils -L "+filename,"LIST",[" 0 ",filename]) # should not be listed (trimmed)
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 1 ",filename]) # should be listed 3x - 1 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 2 ",filename]) # should be listed 3x - 3 of 3
assertiCmd(s.adminsession,"ils -L "+filename,"LIST",[" 3 ",filename]) # should be listed 3x - 3 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
class Test_MultiLayered_Resource(unittest.TestCase, ResourceSuite, ChunkyDevTest):
hostname = socket.gethostname()
my_test_resource = {
"setup" : [
"iadmin modresc demoResc name origResc",
"iadmin mkresc demoResc passthru",
"iadmin mkresc pass2Resc passthru",
"iadmin mkresc rrResc roundrobin",
"iadmin mkresc unix1Resc 'unix file system' "+hostname1+":" + get_irods_top_level_dir() + "/unix1RescVault",
"iadmin mkresc unix2Resc 'unix file system' "+hostname2+":" + get_irods_top_level_dir() + "/unix2RescVault",
"iadmin mkresc unix3Resc 'unix file system' "+hostname3+":" + get_irods_top_level_dir() + "/unix3RescVault",
"iadmin addchildtoresc demoResc pass2Resc",
"iadmin addchildtoresc pass2Resc rrResc",
"iadmin addchildtoresc rrResc unix1Resc",
"iadmin addchildtoresc rrResc unix2Resc",
"iadmin addchildtoresc rrResc unix3Resc",
],
"teardown" : [
"iadmin rmchildfromresc rrResc unix3Resc",
"iadmin rmchildfromresc rrResc unix2Resc",
"iadmin rmchildfromresc rrResc unix1Resc",
"iadmin rmchildfromresc pass2Resc rrResc",
"iadmin rmchildfromresc demoResc pass2Resc",
"iadmin rmresc unix3Resc",
"iadmin rmresc unix2Resc",
"iadmin rmresc unix1Resc",
"iadmin rmresc rrResc",
"iadmin rmresc pass2Resc",
"iadmin rmresc demoResc",
"iadmin modresc origResc name demoResc",
"rm -rf " + get_irods_top_level_dir() + "/unix1RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix2RescVault",
"rm -rf " + get_irods_top_level_dir() + "/unix3RescVault",
],
}
def setUp(self):
ResourceSuite.__init__(self)
s.twousers_up()
self.run_resource_setup()
def tearDown(self):
self.run_resource_teardown()
s.twousers_down()
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
|
from datamart.joiners.join_feature.feature_classes import *
from functools import reduce
import numpy as np
class FeatureFactory:
subclasses = {
(DistributeType.CATEGORICAL, DataType.NUMBER): CategoricalNumberFeature,
(DistributeType.CATEGORICAL, DataType.STRING): CategoricalStringFeature,
(DistributeType.TOKEN_CATEGORICAL, DataType.STRING): CategoricalTokenFeature,
(DistributeType.NON_CATEGORICAL, DataType.NUMBER): NonCategoricalNumberFeature,
(DistributeType.NON_CATEGORICAL, DataType.STRING): NonCategoricalStringFeature
}
@classmethod
def create(cls, df: pd.DataFrame, indexes, df_metadata):
"""
TODO: dynamically generate subclass of FeatureBase, by profiled info, datatype etc.
"""
# set default values:
metadata = cls._get_feature_metadata(df_metadata, indexes) or {}
data_type = None
distribute_type = DistributeType.NON_CATEGORICAL
if len(indexes) > 1:
distribute_type = DistributeType.TOKEN_CATEGORICAL
if cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
else:
# single column, not datetime
idx = indexes[0]
profiles = metadata.get('dsbox_profiled', {})
if len(df.iloc[:, idx]) // len(df.iloc[:, idx].unique()) >= 1.5:
distribute_type = DistributeType.CATEGORICAL
elif profiles:
most_common_tokens = profiles.get('most_common_tokens')
if most_common_tokens and cls._get_greater_than(most_common_tokens) >= len(most_common_tokens)//2:
distribute_type = DistributeType.TOKEN_CATEGORICAL
dtype = df.iloc[:, idx].dtype
if dtype == np.int64 or dtype == np.float64:
data_type = DataType.NUMBER
else:
semantic_types = metadata.get('semantic_type')
profiles = metadata.get('dsbox_profiled', {})
data_type = cls._get_data_type_by_semantic_type(semantic_types) \
or cls._get_data_type_by_profile(profiles)
if not data_type and cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
return cls.get_instance(df, indexes, metadata, data_type or DataType.STRING, distribute_type)
@classmethod
def get_instance(cls, df, indices, metadata, data_type, distribute_type):
constructor = cls.get_constructor(data_type, distribute_type)
return constructor(df, indices, metadata, distribute_type, data_type)
@classmethod
def get_constructor(cls, data_type, distribute_type=None):
if data_type == DataType.DATETIME:
return DatetimeFeature
return cls.subclasses.get((distribute_type, data_type))
@staticmethod
def _get_feature_metadata(metadata, indices):
if metadata.get('variables') and indices and indices[0] < len(metadata.get('variables')):
return metadata['variables'][indices[0]]
@staticmethod
def _get_avg(list_of_dict, key='count'):
if len(list_of_dict):
return sum([_.get(key) for _ in list_of_dict])/len(list_of_dict)
@staticmethod
def _get_greater_than(list_of_dict, key='count', threshold=2, inclusive=True):
if inclusive:
return reduce(lambda x, y: x + 1 if float(y[key]) >= threshold else x, list_of_dict, 0)
return reduce(lambda x, y: x + 1 if float(y[key]) > threshold else x, list_of_dict, 0)
@staticmethod
def _get_data_type_by_semantic_type(semantic_types: list):
# TODO: it would be better if we have a close set of used semantic_type, \
# and map them to either STRING, NUMBER or DATETIME
if semantic_types and len(semantic_types):
unique_types = set(t.rsplit('/', 1)[-1].lower() for t in semantic_types)
if 'time' in unique_types or 'date' in unique_types or 'datetime' in unique_types:
return DataType.DATETIME
if 'float' in unique_types or 'int' in unique_types or 'number' in unique_types:
return DataType.NUMBER
@staticmethod
def _get_data_type_by_profile(profiles):
numeric_ratio = profiles.get('ratio_of_numeric_values')
if numeric_ratio and numeric_ratio >= 0.99:
return DataType.NUMBER
@staticmethod
def _try_pd_to_datetime(df, indices):
try:
if len(indices) == 1:
_ = pd.to_datetime(df.iloc[[0, len(df) - 1], indices[0]])
else:
_ = pd.to_datetime(df.iloc[[0, len(df)-1], indices])
return True
except ValueError:
return False
|
import logging
import os
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from nodeeditor.node_edge import Edge, EDGE_TYPE_BEZIER
from nodeeditor.node_graphics_view import QDMGraphicsView
from nodeeditor.node_node import Node
from nodeeditor.node_scene import Scene, InvalidFile
class NodeEditorWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.filename = None
self.initUI()
def initUI(self):
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# crate graphics scene
self.scene = Scene()
# create graphics view
self.view = QDMGraphicsView(self.scene.grScene, self)
self.layout.addWidget(self.view)
def isModified(self):
return self.scene.isModified()
def isFilenameSet(self):
return self.filename is not None
def getSelectedItems(self):
return self.scene.getSelectedItems()
def hasSelectedItems(self):
return self.getSelectedItems() != []
def canUndo(self):
return self.scene.history.canUndo()
def canRedo(self):
return self.scene.history.canRedo()
def getUserFriendlyFilename(self):
name = os.path.basename(self.filename) if self.isFilenameSet() else "New Graph"
return name + ("*" if self.isModified() else "")
def fileNew(self):
self.scene.clear()
self.filename = None
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
def fileLoad(self, filename):
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.scene.loadFromFile(filename)
self.filename = filename
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
return True
except InvalidFile as e:
logging.error(e)
QApplication.restoreOverrideCursor()
QMessageBox.warning(self, "Error loading %s" % os.path.basename(filename), str(e))
return False
finally:
QApplication.restoreOverrideCursor()
def fileSave(self, filename=None):
# when called with empty parameter, we won't store the filename
if filename is not None: self.filename = filename
QApplication.setOverrideCursor(Qt.WaitCursor)
self.scene.saveToFile(self.filename)
QApplication.restoreOverrideCursor()
return True
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1alpha1_data_volume_source_pvc import V1alpha1DataVolumeSourcePVC
class TestV1alpha1DataVolumeSourcePVC(unittest.TestCase):
""" V1alpha1DataVolumeSourcePVC unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1DataVolumeSourcePVC(self):
"""
Test V1alpha1DataVolumeSourcePVC
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1alpha1_data_volume_source_pvc.V1alpha1DataVolumeSourcePVC()
pass
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# Copyright (c) YugaByte, Inc.
# This script would generate a kubeconfig for the given servie account
# by fetching the cluster information and also add the service account
# token for the authentication purpose.
import argparse
from subprocess import check_output
import json
import base64
import tempfile
def run_command(command_args, namespace=None, as_json=True):
command = ['kubectl']
if namespace:
command.extend(['--namespace', namespace])
command.extend(command_args)
if as_json:
command.extend(['-o', 'json'])
return json.loads(check_output(command))
else:
return check_output(command).decode('utf8')
parser = argparse.ArgumentParser(description='Generate KubeConfig with Token')
parser.add_argument('-s', '--service_account', help='Service Account name', required=True)
parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system')
parser.add_argument('-c', '--context', help='kubectl context')
args = vars(parser.parse_args())
# if the context is not provided we use the current-context
context = args['context']
if context is None:
context = run_command(['config', 'current-context'],
args['namespace'], as_json=False)
cluster_attrs = run_command(['config', 'get-contexts', context.strip(),
'--no-headers'], args['namespace'], as_json=False)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(['config', 'view', '-o',
'jsonpath="{.clusters[?(@.name =="' +
cluster_name + '")].cluster.server}"'],
args['namespace'], as_json=False)
service_account_info = run_command(['get', 'sa', args['service_account']],
args['namespace'])
sa_secret = service_account_info['secrets'][0]['name']
secret_data = run_command(['get', 'secret', sa_secret], args['namespace'])
context_name = '{}-{}'.format(args['service_account'], cluster_name)
kube_config = '/tmp/{}.conf'.format(args['service_account'])
with tempfile.NamedTemporaryFile() as ca_crt_file:
ca_crt = base64.b64decode(secret_data['data']['ca.crt'])
ca_crt_file.write(ca_crt)
ca_crt_file.flush()
# create kubeconfig entry
set_cluster_cmd = ['config', 'set-cluster', cluster_name,
'--kubeconfig={}'.format(kube_config),
'--server={}'.format(endpoint.strip('"')),
'--embed-certs=true',
'--certificate-authority={}'.format(ca_crt_file.name)]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8')
set_credentials_cmd = ['config', 'set-credentials', context_name,
'--token={}'.format(user_token),
'--kubeconfig={}'.format(kube_config)]
run_command(set_credentials_cmd, as_json=False)
set_context_cmd = ['config', 'set-context', context_name,
'--cluster={}'.format(cluster_name),
'--user={}'.format(context_name),
'--kubeconfig={}'.format(kube_config)]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ['config', 'use-context', context_name,
'--kubeconfig={}'.format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))
|
# encoding: utf-8
"""
protocol.py
Created by Thomas Mangin on 2009-08-25.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import os
import traceback
# ================================================================ Registration
#
from exabgp.reactor.network.outgoing import Outgoing
# from exabgp.reactor.network.error import NotifyError
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message import Message
from exabgp.bgp.message import NOP
from exabgp.bgp.message import _NOP
from exabgp.bgp.message import Open
from exabgp.bgp.message.open import Version
from exabgp.bgp.message.open.capability import Capabilities
from exabgp.bgp.message.open.capability import Negotiated
from exabgp.bgp.message import Update
from exabgp.bgp.message import EOR
from exabgp.bgp.message import KeepAlive
from exabgp.bgp.message import Notification
from exabgp.bgp.message import Notify
from exabgp.bgp.message import Operational
from exabgp.reactor.api.processes import ProcessError
from exabgp.logger import Logger
from exabgp.logger import FakeLogger
# This is the number of chuncked message we are willing to buffer, not the number of routes
MAX_BACKLOG = 15000
_UPDATE = Update([],'')
_OPERATIONAL = Operational(0x00)
class Protocol (object):
decode = True
def __init__ (self, peer):
try:
self.logger = Logger()
except RuntimeError:
self.logger = FakeLogger()
self.peer = peer
self.neighbor = peer.neighbor
self.negotiated = Negotiated(self.neighbor)
self.connection = None
if self.neighbor.connect:
self.port = self.neighbor.connect
elif os.environ.get('exabgp.tcp.port','').isdigit():
self.port = int(os.environ.get('exabgp.tcp.port'))
elif os.environ.get('exabgp_tcp_port','').isdigit():
self.port = int(os.environ.get('exabgp_tcp_port'))
else:
self.port = 179
# XXX: FIXME: check the the -19 is correct (but it is harmless)
# The message size is the whole BGP message _without_ headers
self.message_size = Message.MAX_LEN-Message.HEADER_LEN
from exabgp.configuration.environment import environment
self.log_routes = environment.settings().log.routes
# XXX: we use self.peer.neighbor.peer_address when we could use self.neighbor.peer_address
def __del__ (self):
self.close('automatic protocol cleanup')
def me (self, message):
return "Peer %15s ASN %-7s %s" % (self.peer.neighbor.peer_address,self.peer.neighbor.peer_as,message)
def accept (self, incoming):
self.connection = incoming
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.connected(self.peer.neighbor)
# very important - as we use this function on __init__
return self
def connect (self):
# allows to test the protocol code using modified StringIO with a extra 'pending' function
if not self.connection:
local = self.neighbor.md5_ip
peer = self.neighbor.peer_address
md5 = self.neighbor.md5_password
ttl_out = self.neighbor.ttl_out
self.connection = Outgoing(peer.afi,peer.top(),local.top(),self.port,md5,ttl_out)
try:
generator = self.connection.establish()
while True:
connected = generator.next()
if not connected:
yield False
continue
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.connected(self.peer.neighbor)
yield True
return
except StopIteration:
# close called by the caller
# self.close('could not connect to remote end')
yield False
return
def close (self, reason='protocol closed, reason unspecified'):
if self.connection:
self.logger.network(self.me(reason))
# must be first otherwise we could have a loop caused by the raise in the below
self.connection.close()
self.connection = None
try:
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.down(self.peer.neighbor,reason)
except ProcessError:
self.logger.message(self.me('could not send notification of neighbor close to API'))
def _to_api (self,direction,message,raw):
packets = self.neighbor.api['%s-packets' % direction]
parsed = self.neighbor.api['%s-parsed' % direction]
consolidate = self.neighbor.api['%s-consolidate' % direction]
if consolidate:
if packets:
self.peer.reactor.processes.message(self.peer.neighbor,direction,message,raw[:19],raw[19:])
else:
self.peer.reactor.processes.message(self.peer.neighbor,direction,message,'','')
else:
if packets:
self.peer.reactor.processes.packets(self.peer.neighbor,direction,int(message.ID),raw[:19],raw[19:])
if parsed:
self.peer.reactor.processes.message(message.ID,self.peer.neighbor,direction,message,'','')
def write (self, message, negotiated=None):
raw = message.message(negotiated)
if self.neighbor.api.get('send-%s' % Message.CODE.short(message.ID),False):
self._to_api('send',message,raw)
for boolean in self.connection.writer(raw):
yield boolean
def send (self,raw):
if self.neighbor.api.get('send-%s' % Message.CODE.short(ord(raw[19])),False):
message = Update.unpack_message(raw[19:],self.negotiated)
self._to_api('send',message,raw)
for boolean in self.connection.writer(raw):
yield boolean
# Read from network .......................................................
def read_message (self):
# This will always be defined by the loop but scope leaking upset scrutinizer/pylint
msg_id = None
packets = self.neighbor.api['receive-packets']
consolidate = self.neighbor.api['receive-consolidate']
parsed = self.neighbor.api['receive-parsed']
body,header = '','' # just because pylint/pylama are getting more clever
for length,msg_id,header,body,notify in self.connection.reader():
if notify:
if self.neighbor.api['receive-%s' % Message.CODE.NOTIFICATION.SHORT]:
if packets and not consolidate:
self.peer.reactor.processes.packets(self.peer.neighbor,'receive',msg_id,header,body)
if not packets or consolidate:
header = ''
body = ''
self.peer.reactor.processes.notification(self.peer.neighbor,'receive',notify.code,notify.subcode,str(notify),header,body)
# XXX: is notify not already Notify class ?
raise Notify(notify.code,notify.subcode,str(notify))
if not length:
yield _NOP
if packets and not consolidate:
self.peer.reactor.processes.packets(self.peer.neighbor,'receive',msg_id,header,body)
if msg_id == Message.CODE.UPDATE:
if not parsed and not self.log_routes:
yield _UPDATE
return
self.logger.message(self.me('<< %s' % Message.CODE.name(msg_id)))
try:
message = Message.unpack(msg_id,body,self.negotiated)
except (KeyboardInterrupt,SystemExit,Notify):
raise
except Exception,exc:
self.logger.message(self.me('Could not decode message "%d"' % msg_id))
self.logger.message(self.me('%s' % str(exc)))
self.logger.message(traceback.format_exc())
raise Notify(1,0,'can not decode update message of type "%d"' % msg_id)
# raise Notify(5,0,'unknown message received')
if self.neighbor.api.get('receive-%s' % Message.CODE.short(msg_id),False):
if parsed:
if not consolidate or not packets:
header = ''
body = ''
self.peer.reactor.processes.message(msg_id,self.neighbor,'receive',message,header,body)
if message.TYPE == Notification.TYPE:
raise message
yield message
# elif msg == Message.CODE.ROUTE_REFRESH:
# if self.negotiated.refresh != REFRESH.ABSENT:
# self.logger.message(self.me('<< ROUTE-REFRESH'))
# refresh = RouteRefresh.unpack_message(body,self.negotiated)
# if self.neighbor.api.receive_refresh:
# if refresh.reserved in (RouteRefresh.start,RouteRefresh.end):
# if self.neighbor.api.consolidate:
# self.peer.reactor.process.refresh(self.peer,refresh,header,body)
# else:
# self.peer.reactor.processes.refresh(self.peer,refresh,'','')
# else:
# # XXX: FIXME: really should raise, we are too nice
# self.logger.message(self.me('<< NOP (un-negotiated type %d)' % msg))
# refresh = UnknownMessage.unpack_message(body,self.negotiated)
# yield refresh
def validate_open (self):
error = self.negotiated.validate(self.neighbor)
if error is not None:
raise Notify(*error)
def read_open (self, ip):
for received_open in self.read_message():
if received_open.TYPE == NOP.TYPE:
yield received_open
else:
break
if received_open.TYPE != Open.TYPE:
raise Notify(5,1,'The first packet recevied is not an open message (%s)' % received_open)
self.logger.message(self.me('<< %s' % received_open))
yield received_open
def read_keepalive (self):
for message in self.read_message():
if message.TYPE == NOP.TYPE:
yield message
else:
break
if message.TYPE != KeepAlive.TYPE:
raise Notify(5,2)
yield message
#
# Sending message to peer
#
def new_open (self, restarted):
sent_open = Open(
Version(4),
self.neighbor.local_as,
self.neighbor.hold_time,
self.neighbor.router_id,
Capabilities().new(self.neighbor,restarted)
)
# we do not buffer open message in purpose
for _ in self.write(sent_open):
yield _NOP
self.logger.message(self.me('>> %s' % sent_open))
yield sent_open
def new_keepalive (self, comment=''):
keepalive = KeepAlive()
for _ in self.write(keepalive):
yield _NOP
self.logger.message(self.me('>> KEEPALIVE%s' % (' (%s)' % comment if comment else '')))
yield keepalive
def new_notification (self, notification):
for _ in self.write(notification):
yield _NOP
self.logger.message(self.me('>> NOTIFICATION (%d,%d,"%s")' % (notification.code,notification.subcode,notification.data)))
yield notification
def new_update (self):
updates = self.neighbor.rib.outgoing.updates(self.neighbor.group_updates)
number = 0
for update in updates:
for message in update.messages(self.negotiated):
number += 1
for boolean in self.send(message):
# boolean is a transient network error we already announced
yield _NOP
if number:
self.logger.message(self.me('>> %d UPDATE(s)' % number))
yield _UPDATE
def new_eor (self, afi, safi):
eor = EOR(afi,safi)
for _ in self.write(eor):
yield _NOP
self.logger.message(self.me('>> EOR %s %s' % (afi,safi)))
yield eor
def new_eors (self, afi=AFI.undefined,safi=SAFI.undefined):
# Send EOR to let our peer know he can perform a RIB update
if self.negotiated.families:
families = self.negotiated.families if (afi,safi) == (AFI.undefined,SAFI.undefined) else [(afi,safi),]
for eor_afi,eor_safi in families:
for _ in self.new_eor(eor_afi,eor_safi):
yield _
else:
# If we are not sending an EOR, send a keepalive as soon as when finished
# So the other routers knows that we have no (more) routes to send ...
# (is that behaviour documented somewhere ??)
for eor in self.new_keepalive('EOR'):
yield _NOP
yield _UPDATE
def new_operational (self, operational, negotiated):
for _ in self.write(operational,negotiated):
yield _NOP
self.logger.message(self.me('>> OPERATIONAL %s' % str(operational)))
yield operational
def new_refresh (self, refresh):
for _ in self.write(refresh,None):
yield _NOP
self.logger.message(self.me('>> REFRESH %s' % str(refresh)))
yield refresh
|
# Generated by Django 3.0.8 on 2020-10-28 17:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_article_version'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '插件内容', 'verbose_name_plural': '插件内容'},
),
]
|
# Copyright 2021 Jacob Baumbach
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic exception for a misconfigured object."""
class ConfigurationError(Exception):
"""The exception raised by any object when it's misconfigured.
(e.g. missing properties, invalid properties, unknown properties).
.. note::
This ``Exception`` should typically be avoided, and instead an
exception that subclasses
:class:`~adorn.exception.type_check_error.TypeCheckError`
or a new custom ``Exception`` should be used. These alternate
``Exception`` objects contain more information, and are therefore
more useful for the caller.
"""
def __init__(self, message: str):
super().__init__()
self.message = message
def __str__(self):
return self.message
|
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
from collections import deque
from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.models import Model
'''
Carla 패키지가 사용하는 egg파일 탐색
'''
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
SHOW_PREVIEW = False
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 10
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 100
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.95
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 10
'''
환경 class 세팅
'''
class CarEnv:
SHOW_CAM = SHOW_PREVIEW # 미리보기 여부
STEER_AMT = 1.0
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
actor_list = []
collision_hist = [] # collision 목록
def __init__(self):
self.client = carla.Client("localhost", 2000)
self.client.set_timeout(2.0)
# client가 켜져 있다면, world 검색 가능.
self.world = self.client.get_world()
# world에는 우리가 시뮬레이션에 액터를 새로 추가할 때 사용할 수 있는 bp 목록이 있다.
self.blueprint_library = self.world.get_blueprint_library()
# 차량 모델 지정
self.model_3 = self.blueprint_library.filter("model3")[0]
def reset(self):
self.collision_hist = []
self.actor_list = []
# 랜덤한 위치에 차량 생성 후 actor list에 추가
self.transform = random.choice(self.world.get_map().get_spawn_points())
self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
# rgb Camera 센서의 bp 가져오기
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
# rgb Camera 센서로 입력 받은 이미지의 크기 조절
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110")
# sensor의 위치 조정
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
# 센서의 생성 및 리스트 추가.
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
'''
차량 생성 시 차가 지면에 부딪히면 충돌이 발생.
또는 센서들이 초기화되고 값을 반환하는 데 시간이 걸릴 수 있음.
따라서 4초 정도의 대기시간을 사용.
'''
time.sleep(4)
# collision 센서의 bp 가져오기
colsensor = self.blueprint_library.find("sensor.other.collision")
# 센서의 생성 및 리스트 추가
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None:
time.sleep(0.01)
'''
에피소드의 실제 확인 시간 기록.
브레이크와 스로틀이 사용되지 않는지 확인 후
첫 번째 관찰 결과 반환.
'''
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
return self.front_camera
# collision data 처리
def collision_data(self, event):
self.collision_hist.append(event)
# image data 처리
def process_img(self, image):
i = np.array(image.raw_data)
#print(i.shape)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("", i3)
cv2.waitKey(1)
self.front_camera = i3
# action, reward, done, any_extra_info 관리
def step(self, action):
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT))
elif action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0))
elif action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
if len(self.collision_hist) != 0:
done = True
reward = -200
elif kmh < 50:
done = False
reward = -1
else:
done = False
reward = 1
if self.episode_start + SECONDS_PER_EPISODE < time.time():
done = True
return self.front_camera, reward, done, None
# 강화 학습
class DQNAgent:
def __init__(self):
self.model = self.create_model()
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0
self.graph = tf.get_default_graph()
self.terminate = False
self.last_logged_episode = 0
self.training_initialized = False
# 모델 생성
def create_model(self):
base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(3, activation="linear")(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=["accuracy"])
return model
def update_replay_memory(self, transition):
# transition = (current_state, action, reward, new_state, done)
self.replay_memory.append(transition)
def train(self):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
with self.graph.as_default():
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)
new_current_states = np.array([transition[3] for transition in minibatch])/255
with self.graph.as_default():
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
# x = input / y = output
X = []
y = []
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
'''
step 단위가 아니라 episode 단위로 log 기록
log_this_step이 true일 때만 TensorBoard에 log 기록
'''
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
with self.graph.as_default():
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
if log_this_step:
self.target_update_counter += 1
# target_model 업데이트 여부 확인
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]
# train 진행
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y = np.random.uniform(size=(1, 3)).astype(np.float32)
with self.graph.as_default():
self.model.fit(X,y, verbose=False, batch_size=1)
self.training_initialized = True
while True:
if self.terminate:
return
self.train()
time.sleep(0.01)
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
# Pyston change: we don't support multiple inheritance yet, so this error class is tricky.
# We could make it so that it only inherits one of the base classes, but I'd rather that
# anyone who tries to use this error gets a loud error message rather than different behavior.
# class MultipartConversionError(MessageError, TypeError):
# """Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect:
"""Base class for a message defect."""
def __init__(self, line=None):
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MalformedHeaderDefect(MessageDefect):
"""Found a header that was missing a colon, or was otherwise malformed."""
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
|
'''
Configure:
python setup.py install
pyVideoDatasets
Colin Lea
2013
'''
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules = []
for e in ext_modules:
e.pyrex_directives = {
"boundscheck": False,
"wraparound": False,
"infer_types": True
}
e.extra_compile_args = ["-w"]
print ext_modules
setup(
author = 'Colin Lea',
author_email = 'colincsl@gmail.com',
description = '',
license = "FreeBSD",
version= "0.1",
name = 'pyVideoDatasets',
cmdclass = {'build_ext': build_ext},
include_dirs = [np.get_include()],
packages= [ "pyVideoDatasets",
"pyKinectTools.dataset_readers",
],
package_data={'':['*.xml', '*.png', '*.yml', '*.txt']},
ext_modules = ext_modules
)
|
from pytest import raises
from ..quoted_or_list import quoted_or_list
def test_does_not_accept_an_empty_list():
with raises(StopIteration):
quoted_or_list([])
def test_returns_single_quoted_item():
assert quoted_or_list(["A"]) == '"A"'
def test_returns_two_item_list():
assert quoted_or_list(["A", "B"]) == '"A" or "B"'
def test_returns_comma_separated_many_item_list():
assert quoted_or_list(["A", "B", "C"]) == '"A", "B" or "C"'
def test_limits_to_five_items():
assert quoted_or_list(["A", "B", "C", "D", "E", "F"]) == '"A", "B", "C", "D" or "E"'
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloViewSet(viewsets.ViewSet):
"""Test ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self,request):
"""teste"""
a_viewset = [
'Use esta metodo para (list,recuperar,atualizar,atualizar um campo',
'Automaticamente mapeia as urls usando Roters',
'Proporciona mais funcionalidades com menos codigo',
]
return(Response({'message':'Hello','a_viewset':a_viewset}))
def create(self,request):
"""Cria uma nova menssagem"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
messagem=f'Hello {name}'
return Response({'messagem':messagem})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self,request, pk = None):
"""Retorna um objeto pela ID"""
return Response({'http_method':'PUT'})
def update(self, request, pk = None):
"""Atualiza um objeto"""
return Response({'http_method':'PUT'})
def partial_update(self,request,pk = None):
"""Atualiza parte de um objeto"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk = None):
"""Remove um objeto"""
return Response({'http_method':'DELETE'})
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self,request,format=None):
"""Returna uma lista de funções da APIView"""
an_apiview = [
'Usando Http metodos (get,post,put,delete,patch)',
'É similar a uma tradiciona view do django',
'te da o controle da logica da aplicação',
'e mapea manualmente as urls',
]
return Response({'message':'hello','an_apiview':an_apiview})
def post(self,request):
"""cria uma messagem de vem vindo com o nome"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self,request,pk = None):
"""Atualizando um objeto"""
return Response({'metodo':'put'})
def patch(self,request, pk = None):
"""Atualizando um campo de um objeto"""
return Response({'metodo':'Patch'})
def delete(self, request, pk = None):
"""Deletando um objeto"""
return Response({'methodo':'Delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Cria e atualiza um usuario """
serializer_class = serializers.UserProfileSerizalizer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fileds = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
"""Cria um token autenticado para o usuario"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Registra e atualiza feed de usuario autenticado"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permissions_classes =(permissions.UpdateOwnStatus,IsAuthenticated)
def perform_create(self, serializer):
"""seta o usuario do perfil para o usuario logado"""
serializer.save(user_profile=self.request.user)
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = 'Test TSEmergency API'
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess('ts')
Test.testName = 'Emergency Shutdown Test'
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 16,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSEmergency_test'
})
# Load plugin
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'emergency_shutdown.so'), ts)
# www.example.com Host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'printf "Emergency Shutdown Test"'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
ts.ReturnCode = 33
ts.Ready = 0 # Need this to be 0 because we are testing shutdown, this is to make autest not think ats went away for a bad reason.
ts.Streams.All = Testers.ExcludesExpression('failed to shutdown', 'should NOT contain "failed to shutdown"')
ts.Disk.diags_log.Content = Testers.IncludesExpression('testing emergency shutdown', 'should contain "testing emergency shutdown"')
|
#!/usr/bin/env python3
#------------------------------------------------------------------------------#
# fortnet-python: Python Tools for the Fortnet Software Package #
# Copyright (C) 2021 - 2022 T. W. van der Heide #
# #
# See the LICENSE file for terms of usage and distribution. #
#------------------------------------------------------------------------------#
'''
Regression tests covering the Fnetout class of Fortformat.
'''
import os
import pytest
import numpy as np
from common import compare_fnetout_references
REFPATH = os.path.join(os.getcwd(), 'test', 'references', 'Fnetout')
def test_predict_atomic():
'''Test extraction capabilities for a prediction run
with a network that was trained on atomic targets.
'''
fname = 'predict_atomic.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['atomicpredictions'] = [
np.array([[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global():
'''Test extraction capabilities for a prediction run
with a network that was trained on global targets.
'''
fname = 'predict_global.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions_atomic'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['atomicpredictions'] = None
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_singleforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_singleforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.301790810131604914e-01],
[-4.301790810131604914e-01]], dtype=float) / 2.0,
np.array([[-5.025593389423121948e-01],
[-5.025593389423121948e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.301790810131604914e-01], dtype=float),
np.array([-5.025593389423121948e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_multiforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_multiforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 3
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01],
[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01]], dtype=float) / 2.0,
np.array([[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01],
[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01], dtype=float),
np.array([-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic():
'''Test extraction capabilities for a validation run
with a network that was trained on atomic targets.
'''
fname = 'validate_atomic.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_global():
'''Test extraction capabilities for a validation run
with a network that was trained on global targets.
'''
fname = 'validate_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['atomicpredictions'] = None
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic_global():
'''Test extraction capabilities for a validation run with a
network that was trained on both, atomic and global targets.
'''
fname = 'validate_atomic_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 2
ref['targets'] = True
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float) * 12.0,
np.array([-4.585193773117663341e+02], dtype=float) * 6.0,
np.array([-2.290754290677185736e+02], dtype=float) * 12.0,
np.array([-6.877477714671086915e+02], dtype=float) * 6.0,
np.array([-5.349057545062817098e+02], dtype=float) * 6.0]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float),
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float),
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float),
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
if __name__ == '__main__':
pytest.main()
|
from rest_framework import serializers
from bumblebee.buzzes.api.serializers.interaction_serializers import (
BuzzInteractionsSerializer,
)
from bumblebee.buzzes.models import Buzz, BuzzImage
from bumblebee.core.exceptions import UnknownModelFieldsError
from .user_serializers import BuzzUserSerializer
######################################
## RETRIEVE
######################################
class BuzzImageSerializer(serializers.ModelSerializer):
""" """
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class ListBuzzImageSerializer(serializers.ModelSerializer):
""" """
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class BuzzDetailSerializer(serializers.ModelSerializer):
""" """
buzzid = serializers.IntegerField(source="id")
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.ChoiceField(choices=Buzz.PrivacyChoices.choices)
content = serializers.CharField(help_text="Something in your mind? Post a buzz")
location = serializers.CharField()
flair = serializers.ListField(child=serializers.CharField())
author = BuzzUserSerializer(many=False)
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
model = Buzz
fields = [
"buzzid",
"created_date",
"edited_date",
"edited",
"privacy",
"content",
"location",
"flair",
"author",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
######################################
## CREATE
######################################
class CreateBuzzSerializer(serializers.ModelSerializer):
""" """
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
flair = serializers.ListField(child=serializers.CharField(), required=False)
location = serializers.CharField(required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
class EditBuzzSerializer(serializers.ModelSerializer):
""" """
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
location = serializers.CharField(required=False)
flair = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
def update_buzz(self, buzz_instance, **validated_data):
""" """
try:
for key, value in validated_data.items():
if buzz_instance.__dict__.__contains__(key):
if buzz_instance.__getattribute__(key) != value:
buzz_instance.__setattr__(key, value)
else:
raise UnknownModelFieldsError(
key,
f"'{buzz_instance.__class__.__name__}' object has no model field called {key}",
)
if buzz_instance.__getattribute__("edited") != True:
buzz_instance.__setattr__("edited", True)
buzz_instance.save()
except UnknownModelFieldsError as error:
print(error)
raise error
except Exception as error:
print("ERROR @update_buzz\n", error)
raise error
class BuzzListSerializer(serializers.Serializer):
""" """
buzzid = serializers.IntegerField(source="id")
author = BuzzUserSerializer()
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.CharField()
content = serializers.CharField()
location = serializers.CharField()
flair = serializers.ListField()
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
""" """
model = Buzz
fields = [
"buzzid",
"author",
"created_date",
"edited_date",
"privacy",
"content",
"location",
"flair",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
# depth = 1
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test suite verifies device_power capability."""
from typing import Type
from gazoo_device.tests.functional_tests.utils import gdm_test_base
import retry
class DevicePowerTestSuite(gdm_test_base.GDMTestBase):
"""Test suite for the device_power capability."""
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determine if this test suite can run on the given device."""
if not device_class.has_capabilities(["device_power"]):
return False
props = ["device_power.hub_name", "device_power.port_number"]
return cls.check_properties_set(device_name, props)
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
@retry.retry(tries=2, delay=30)
def test_device_power_on_off(self):
"""Verifies on() and off() methods work."""
original_mode = self.device.device_power.port_mode
try:
self.device.device_power.off()
self.assertEqual(
self.device.device_power.port_mode, "off",
f"{self.device.name} port {self.device.device_power.port_number} "
"should have been set to off")
self.device.device_power.on()
on_modes = ["on", "charge", "sync"]
self.assertIn(
self.device.device_power.port_mode, on_modes,
f"{self.device.name} port {self.device.device_power.port_number} "
f"should have been set to one of {on_modes}")
finally:
if original_mode == "off":
self.logger.info(
"Restoring device power back to its original mode 'off'")
self.device.device_power.off()
if __name__ == "__main__":
gdm_test_base.main()
|
import numpy as np
from prml.nn.optimizer.optimizer import Optimizer
class AdaDelta(Optimizer):
"""
AdaDelta optimizer
"""
def __init__(self, parameter, rho=0.95, epsilon=1e-8):
super().__init__(parameter, None)
self.rho = rho
self.epsilon = epsilon
self.mean_squared_deriv = []
self.mean_squared_update = []
for p in self.parameter:
self.mean_squared_deriv.append(np.zeros(p.shape))
self.mean_squared_update.append(np.zeros(p.shape))
def update(self):
self.increment_iteration()
for p, msd, msu in zip(self.parameter, self.mean_squared_deriv, self.mean_squared_update):
if p.grad is None:
continue
grad = p.grad
msd *= self.rho
msd += (1 - self.rho) * grad ** 2
delta = np.sqrt((msu + self.epsilon) / (msd + self.epsilon)) * grad
msu *= self.rho
msu *= (1 - self.rho) * delta ** 2
p.value += delta
|
# coding: utf-8
"""
barcodeapi
Barcode APIs let you generate barcode images, and recognize values from images of barcodes. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
_default = None
def __init__(self):
"""Constructor"""
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
# Default Base url
self.host = "https://api.cloudmersive.com"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("cloudmersive_barcode_api_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'Apikey':
{
'type': 'api_key',
'in': 'header',
'key': 'Apikey',
'value': self.get_api_key_with_prefix('Apikey')
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1\n"\
"SDK Package Version: 3.0.2".\
format(env=sys.platform, pyversion=sys.version)
|
from ...utils import DataDump, logger
from ..node import Node
class GenerationNode(Node):
def __init__(self, inputs, trigger_time, idx, dag, parent=None):
"""
Node for data generation jobs
Parameters:
-----------
inputs: bilby_pipe.main.MainInput
The user-defined inputs
trigger_time: float
The trigger time to use in generating analysis data
idx: int
The index of the data-generation job, used to label data products
dag: bilby_pipe.dag.Dag
The dag structure
parent: bilby_pipe.job_creation.node.Node (optional)
Any job to set as the parent to this job - used to enforce
dependencies
"""
super().__init__(inputs)
self.inputs = inputs
self.trigger_time = trigger_time
self.idx = idx
self.dag = dag
self.request_cpus = 1
self.setup_arguments()
self.arguments.add("label", self.label)
self.arguments.add("idx", self.idx)
self.arguments.add("trigger-time", self.trigger_time)
if self.inputs.injection_file is not None:
self.arguments.add("injection-file", self.inputs.injection_file)
if self.inputs.timeslide_file is not None:
self.arguments.add("timeslide-file", self.inputs.timeslide_file)
self.process_node()
if parent:
self.job.add_parent(parent.job)
@property
def executable(self):
return self._get_executable_path("bilby_pipe_generation")
@property
def request_memory(self):
return self.inputs.request_memory_generation
@property
def log_directory(self):
return self.inputs.data_generation_log_directory
@property
def universe(self):
if self.inputs.local_generation:
logger.debug(
"Data generation done locally: please do not use this when "
"submitting a large number of jobs"
)
universe = "local"
else:
logger.debug(f"All data will be grabbed in the {self._universe} universe")
universe = self._universe
return universe
@property
def job_name(self):
job_name = "{}_data{}_{}_generation".format(
self.inputs.label, str(self.idx), self.trigger_time
)
job_name = job_name.replace(".", "-")
return job_name
@property
def label(self):
return self.job_name
@property
def data_dump_file(self):
return DataDump.get_filename(self.inputs.data_directory, self.label)
|
from array import array
from random import randint
import sys
@profile
def create_data():
return array('i', [randint(1, 10000000) for i in range(100000)])
def proc():
cnt = 0
data = create_data()
for i in range(100000):
if randint(1, 10000000) in data:
cnt += 1
if __name__ == '__main__':
print(sys.argv[0])
# print(sys.version_info)
# import timeit
# print(timeit.timeit("proc()", setup="from __main__ import proc", number=3))
# [proc() for i in range(3)]
create_data()
|
# Copyright (c) 2003-2015 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
for the visitors.diadefs module
"""
import os
import unittest
import astroid
from astroid import nodes
from astroid import bases
from astroid import manager
from astroid import test_utils
from pylint.pyreverse import inspector
from unittest_pyreverse_writer import get_project
MANAGER = manager.AstroidManager()
def astroid_wrapper(func, modname):
return func(modname)
class LinkerTest(unittest.TestCase):
def setUp(self):
super(LinkerTest, self).setUp()
self.project = get_project('data', 'data')
self.linker = inspector.Linker(self.project)
self.linker.visit(self.project)
def test_class_implements(self):
klass = self.project.get_module('data.clientmodule_test')['Ancestor']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 1)
self.assertTrue(isinstance(klass.implements[0], nodes.ClassDef))
self.assertEqual(klass.implements[0].name, "Interface")
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 0)
def test_locals_assignment_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'locals_type'))
type_dict = klass.locals_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['TYPE', 'top'])
self.assertEqual(len(type_dict['TYPE']), 1)
self.assertEqual(type_dict['TYPE'][0].value, 'final class')
self.assertEqual(len(type_dict['top']), 1)
self.assertEqual(type_dict['top'][0].value, 'class')
def test_instance_attrs_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'instance_attrs_type'))
type_dict = klass.instance_attrs_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['_id', 'relation'])
self.assertTrue(isinstance(type_dict['relation'][0], bases.Instance),
type_dict['relation'])
self.assertEqual(type_dict['relation'][0].name, 'DoNothing')
self.assertIs(type_dict['_id'][0], astroid.YES)
def test_concat_interfaces(self):
cls = test_utils.extract_node('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound: #@
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
interfaces = inspector.interfaces(cls)
self.assertEqual([i.name for i in interfaces], ['IMachin'])
def test_interfaces(self):
module = astroid.parse('''
class Interface(object): pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class Concrete0(object):
__implements__ = MyIFace
class Concrete1:
__implements__ = (MyIFace, AnotherIFace)
class Concrete2:
__implements__ = (MyIFace, AnotherIFace)
class Concrete23(Concrete1): pass
''')
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = module[klass]
self.assertEqual([i.name for i in inspector.interfaces(klass)],
interfaces)
def test_from_directory(self):
expected = os.path.join('pylint', 'test', 'data', '__init__.py')
self.assertEqual(self.project.name, 'data')
self.assertTrue(self.project.path.endswith(expected), self.project.path)
def test_project_node(self):
expected = [
'data', 'data.clientmodule_test',
'data.suppliermodule_test',
]
self.assertListEqual(sorted(self.project.keys()), expected)
if __name__ == '__main__':
unittest.main()
|
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
String
'''
##
# Import Modules
#
import re
import os.path
from string import strip
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
#
# Regular expression for matching macro used in DSC/DEC/INF file inclusion
#
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
else:
RetString += QuotedStringList[Count-1]
String = RetString
#
# in case there's macro not defined
#
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines = None):
IsRelativePath = False
if Defines == None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
## Convert To Sql String
#
# Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
## GetStringOfList
#
# Get String of a List
#
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split = ' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
## Get HelpTextList
#
# Get HelpTextList from HelpTextClassList
#
# @param HelpTextClassList: Help Text Class List
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
## Get String Array Length
#
# Get String Array Length
#
# @param String: the source string
#
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
## RemoveDupOption
#
# Remove Dup Option
#
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
## Check if the string is HexDgit and its integer value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
#
# Assume Str is a valid feature flag expression.
# Return a list which contains tokens: alpha numeric token and other token
# Whitespace are not stripped
#
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
## ConvertNEToNOTEQ
#
# Convert NE operator to NOT EQ
# For example: 1 NE 2 -> 1 NOT EQ 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
## ConvertNOTEQToNE
#
# Convert NOT EQ operator to NE
# For example: 1 NOT NE 2 -> 1 NE 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
# At least, 'NOT' is in the list
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
## SplitPcdEntry
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
## Check if two arches matched?
#
# @param Arch1
# @param Arch2
#
def IsMatchArch(Arch1, Arch2):
if 'COMMON' in Arch1 or 'COMMON' in Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, basestring):
if Arch1 == Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, list):
return Arch1 in Arch2
if isinstance(Arch2, basestring) and isinstance(Arch1, list):
return Arch2 in Arch1
if isinstance(Arch1, list) and isinstance(Arch2, list):
for Item1 in Arch1:
for Item2 in Arch2:
if Item1 == Item2:
return True
return False
|
from msdsl.expr.expr import ModelExpr
from msdsl.expr.signals import Signal, DigitalSignal, AnalogSignal
from msdsl.expr.format import RealFormat
from msdsl.expr.table import Table
class Assignment:
def __init__(self, signal: Signal, expr: ModelExpr, check_format=True):
self.signal = signal
self.expr = expr
self.check_format = check_format
class BindingAssignment(Assignment):
pass
class ThisCycleAssignment(Assignment):
pass
class NextCycleAssignment(Assignment):
def __init__(self, *args, clk=None, rst=None, ce=None, **kwargs):
self.clk = clk
self.rst = rst
self.ce = ce
super().__init__(*args, **kwargs)
class SyncRomAssignment(Assignment):
def __init__(self, signal: Signal, table: Table, addr: ModelExpr,
clk=None, ce=None, should_bind=False):
self.table = table
self.clk = clk
self.ce = ce
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
class SyncRamAssignment(Assignment):
def __init__(self, signal: AnalogSignal, format_: RealFormat, addr: ModelExpr,
clk: Signal=None, ce: Signal=None, we: Signal=None,
din: Signal=None, should_bind=False):
self.format_ = format_
self.clk = clk
self.ce = ce
self.we = we
self.din = din
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
|
import warnings
import numpy as np
import cmath as math
import scipy as scp
import scipy.optimize as opt
from types import FunctionType
def parse_multidatasets(V,K,weights,precondition=False):
#===============================================================================
# Identify if the signals have already been processed by this function
if type(V) is not list:
if V.size == np.atleast_1d(weights).size:
# If so, just return without doing anything
if precondition:
return V,K,weights,[np.arange(0,len(V))],[1]
else:
return V,K,weights,[np.arange(0,len(V))]
# If multiple signals are specified as a list...
if type(V) is list and all([type(Vs) is np.ndarray for Vs in V]):
nSignals = len(V)
prescales = np.zeros(nSignals)
Vlist = []
# Pre-scale the signals, important for fitregmodel when using global fits with arbitrary scales
for i in range(nSignals):
if precondition:
prescales[i] = max(V[i])
Vlist.append(V[i]/prescales[i])
else:
Vlist.append(V[i])
V = np.concatenate(Vlist, axis=0) # ...concatenate them along the list
elif type(V) is np.ndarray:
nSignals = 1
prescales = [1]
Vlist = [V]
else:
raise TypeError('The input signal(s) must be numpy array or a list of numpy arrays.')
def prepareKernel(K,nSignals):
# If multiple kernels are specified as a list...
if type(K) is tuple:
K = [Ks for Ks in K]
if type(K) is list and all([type(Ks) is np.ndarray for Ks in K]):
nKernels = len(K)
K = np.concatenate(K, axis=0) # ...concatenate them along the list
elif type(K) is np.ndarray:
nKernels = 1
else:
raise TypeError('The input kernel(s) must be numpy array or a list of numpy arrays.')
# Check that the same number of signals and kernel have been passed
if nSignals!=nKernels:
raise KeyError('The same number of kernels and signals must be specified as lists.')
return K
if type(K) is FunctionType:
Kmulti = lambda p: prepareKernel(K(p),nSignals)
else:
Kmulti = prepareKernel(K,nSignals)
# If multiple weights are specified as a list...
if type(weights) is list or not hasattr(weights, "__len__"):
weights = np.atleast_1d(weights)
if len(weights)==1:
weights = np.repeat(weights,nSignals)
weights = weights/sum(weights)
if len(weights)!=nSignals:
raise KeyError('If multiple signals are passed, the same number of weights are required.')
weights_ = []
for i in range(len(weights)):
weights_ = np.concatenate((weights_,weights[i]*np.ones(len(Vlist[i]))))
weights = weights_
else:
raise TypeError('The input weights(s) must be numpy array or a list of numpy arrays.')
# Get the indices to extract the subsets again
Ns = [len(V) for V in Vlist]
subset = [None]*nSignals
for i in range(nSignals):
if i==0:
prev = 0
else:
prev = subset[i-1][-1]+1
subset[i] = np.arange(prev,prev+Ns[i])
if precondition:
return V,Kmulti,weights,subset,prescales
else:
return V,Kmulti,weights,subset
#===============================================================================
def hccm(J,*args):
"""
Heteroscedasticity Consistent Covariance Matrix (HCCM)
======================================================
Computes the heteroscedasticity consistent covariance matrix (HCCM) of
a given LSQ problem given by the Jacobian matrix (J) and the covariance
matrix of the data (V). If the residual (res) is specified, the
covariance matrix is estimated using some of the methods specified in
(mode). The HCCM are valid for both heteroscedasticit and
homoscedasticit residual vectors.
Usage:
------
C = hccm(J,V)
C = hccm(J,res,mode)
Arguments:
----------
J (NxM-element array)
Jacobian matrix of the residual vector
res (N-element array)
Vector of residuals
mode (string)
HCCM estimator, options are:
'HC0' - White, H. (1980)
'HC1' - MacKinnon and White, (1985)
'HC2' - MacKinnon and White, (1985)
'HC3' - Davidson and MacKinnon, (1993)
'HC4' - Cribari-Neto, (2004)
'HC5' - Cribari-Neto, (2007)
Returns:
--------
C (MxM-element array)
Heteroscedasticity consistent covariance matrix
References:
------------
[1]
White, H. (1980). A heteroskedasticity-consistent covariance matrix
estimator and a direct test for heteroskedasticity. Econometrica, 48(4), 817-838
DOI: 10.2307/1912934
[2]
MacKinnon and White, (1985). Some heteroskedasticity-consistent covariance
matrix estimators with improved finite sample properties. Journal of Econometrics, 29 (1985),
pp. 305-325. DOI: 10.1016/0304-4076(85)90158-7
[3]
Davidson and MacKinnon, (1993). Estimation and Inference in Econometrics
Oxford University Press, New York.
[4]
Cribari-Neto, F. (2004). Asymptotic inference under heteroskedasticity of
unknown form. Computational Statistics & Data Analysis, 45(1), 215-233
DOI: 10.1016/s0167-9473(02)00366-3
[5]
Cribari-Neto, F., Souza, T. C., & Vasconcellos, K. L. P. (2007). Inference
under heteroskedasticity and leveraged data. Communications in Statistics –
Theory and Methods, 36(10), 1877-1888. DOI: 10.1080/03610920601126589
"""
# Unpack inputs
if len(args)==2:
res,mode = args
V = []
elif len(args)==1:
V = args[0]
# Hat matrix
H = J@np.linalg.pinv(J.T@J)@J.T
# Get leverage
h = np.diag(H)
# Number of parameters (k) & Number of variables (n)
n,k = np.shape(J)
if isempty(V):
# Select estimation method using established nomenclature
if mode.upper() == 'HC0': # White,(1980),[1]
# Estimate the data covariance matrix
V = np.diag(res**2)
elif mode.upper() == 'HC1': # MacKinnon and White,(1985),[2]
# Estimate the data covariance matrix
V = n/(n-k)*np.diag(res**2)
elif mode.upper() == 'HC2': # MacKinnon and White,(1985),[2]
# Estimate the data covariance matrix
V = np.diag(res**2/(1-h))
elif mode.upper() == 'HC3': # Davidson and MacKinnon,(1993),[3]
# Estimate the data covariance matrix
V = np.diag(res/(1-h))**2
elif mode.upper() == 'HC4': # Cribari-Neto,(2004),[4]
# Compute discount factor
delta = np.minimum(4,n*h/k)
# Estimate the data covariance matrix
V = np.diag(res**2./((1 - h)**delta))
elif mode.upper() == 'HC5': # Cribari-Neto,(2007),[5]
# Compute inflation factor
k = 0.7
alpha = np.minimum(np.maximum(4,k*max(h)/np.mean(h)),h/np.mean(h))
# Estimate the data covariance matrix
V = np.diag(res**2./(np.sqrt((1 - h)**alpha)))
else:
raise KeyError('HCCM estimation mode not found.')
# Heteroscedasticity Consistent Covariance Matrix (HCCM) estimator
C = np.linalg.pinv(J.T@J)@J.T@V@J@np.linalg.pinv(J.T@J)
return C
#===============================================================================
# =================================================================
def metadata(**kwargs):
"""
Decorator: Set model metadata as function attributes
"""
attributes = list(kwargs.keys())
metadata = list(kwargs.values())
def _setmetadata(func):
for attribute,data in zip(attributes,metadata):
setattr(func,attribute,data)
return func
return _setmetadata
# =================================================================
def gsvd(A,B):
#===============================================================================
m,p = A.shape
n = B.shape[0]
# Economy-sized.
useQA = m > p
useQB = n > p
if useQA:
QA,A = scp.linalg.qr(A)
A = A[0:p,0:p]
QA = QA[:,0:p]
m = p
if useQB:
QB,B = scp.linalg.qr(B)
B = B[0:p,0:p]
QB = QB[:,0:p]
n = p
Q,_ = np.linalg.qr(np.vstack((A,B)), mode='reduced')
Q1 = Q[0:m,0:p]
Q2 = Q[m:m+n,0:p]
C,S = csd(Q1,Q2)
# Vector of generalized singular values.
q = min(m+n,p)
# Supress divide by 0 warning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
U = np.vstack((np.zeros((q-m,1),'double'), np.diag(C,max(0,q-m)).reshape(len(np.diag(C,max(0,q-m))),1)))/np.vstack((np.diag(S,0).reshape(len(np.diag(S,0)),1), np.zeros((q-n,1),'double') ))
return U
#===============================================================================
def csd(Q1,Q2):
#===============================================================================
"""
Cosine-Sine Decomposition
-------------------------
Given Q1 and Q2 such that Q1'* Q1 + Q2'* Q2 = I, the
C-S Decomposition is a joint factorization of the form
Q1 = U1*C*V' and Q2=U2*S*V'
where U1,U2,V are orthogonal matrices and C and S are diagonal
matrices (not necessarily square) satisfying
C'* C + S'* S = I
The diagonal entries of C and S are nonnegative and the
diagonal elements of C are in nondecreasing order.
The matrix Q1 cannot have more columns than rows.
Based on the Octave code by Artiste (submitted by S.J.Leon):
http://www.ar-tiste.com/m-fun/m-fun-index.html
"""
m,n = Q1.shape
p,_ = Q2.shape
if m < p:
s,c = csd(Q2,Q1)
j = np.flip(np.arange(n))
c = c[:,j]
s = s[:,j]
m = np.minimum(m,p)
i = np.flip(np.arange(m))
c[np.arange(m),:] = c[i,:]
n = np.minimum(n,p)
i = np.flip(np.arange(n))
s[np.arange(n),:] = s[i,:]
return c,s
_,sdiag,v = np.linalg.svd(Q1)
c = np.zeros((m, n))
np.fill_diagonal(c, sdiag)
v = v.T.conj()
z = np.eye(n,n)
z = scp.linalg.hankel(z[:,n-1])
c[0:n,:] = z@c[0:n,:]@z
v = v@z
Q2 = Q2@v
k=0
for j in range(1,n):
if c[j,j] <= 1/np.sqrt(2): k=j
b = Q2[:,0:k]
u2,r = np.linalg.qr(b,mode='complete')
s = u2.T@Q2
t = np.minimum(p,n)
tt = np.minimum(m,p)
if k<t:
r2 = s[np.ix_(range(k,p),range(k,t))]
_,sdiag,vt = np.linalg.svd(r2)
ss= np.zeros(r2.shape)
np.fill_diagonal(ss, sdiag)
vt = vt.T.conj()
s[k:p,k:t] = ss
c[:,k:t] = c[:,k:t]@vt
w = c[k:tt,k:t]
z,r = np.linalg.qr(w,mode='complete')
c[k:tt,k:t] = r
for j in range(n):
if c[j,j]<0:
c[j,j] = -c[j,j]
for j in range(t):
if s[j,j]<0:
s[j,j] = -s[j,j]
return c,s
#===============================================================================
#===============================================================================
def diagf(X):
"""
Diagonal force
X = diagf(X) zeros all the elements off the main diagonal of X.
"""
X = np.triu(np.tril(X))
return X
#===============================================================================
#===============================================================================
def diagp(Y,X,k):
"""
DIAGP Diagonal positive.
Y,X = diagp(Y,X,k) scales the columns of Y and the rows of X by
unimodular factors to make the k-th diagonal of X real and positive.
"""
D = np.diag(X,k)
j = np.where((D.real < 0) | (D.imag != 0))
D = np.diag(np.conj(D[j])/abs(D[j]))
Y[:,j] = Y[:,j]@D.T
X[j,:] = D@X[j,:]
X = X+0 # use "+0" to set possible -0 elements to 0
return Y,X
#===============================================================================
#===============================================================================
def Jacobian(fcn, x0, lb, ub):
"""
Finite difference Jacobian estimation
Estimates the Jacobian matrix of a vector-valued function ``fcn`` at the
point ``x0`` taking into consideration box-constraints defined by the lower
and upper bounds ``lb`` and ``ub``.
This is a wrapper around the ``scipy.optimize._numdiff.approx_derivative`` function.
"""
J = opt._numdiff.approx_derivative(fcn,x0,method='2-point',bounds=(lb,ub))
J = np.atleast_2d(J)
return J
#===============================================================================
#===============================================================================
def movmean(x, N):
"""
Moving mean
===========
Returns an array of local N-point mean values, where each mean is calculated over a sliding window of length k across neighboring elements of x.
Usage:
------
xfilt = movmean(x,N)
Arguments:
----------
x (array)
Array to be filtered
N (scalar)
Window size
Returns:
--------
xfilt (array)
Filtered array
"""
xfilt = np.convolve(x, np.ones(N)/N, mode='same')
return xfilt
#===============================================================================
#===============================================================================
def ovl(A,B):
"""
Overlap metric
==============
Returns the overlap between two vectors A and B.
Usage:
------
metric = ovl(A,B)
Arguments:
----------
A (N-element array)
First vector
B (N-element array)
Second vector
Returns:
--------
metric (array)
Overlap metric
"""
A /= np.sum(A)
B /= np.sum(B)
metric = np.sum(np.minimum(A,B))
return metric
#===============================================================================
def isempty(A):
#===============================================================================Q
A = np.atleast_1d(A)
boolean = np.size(A)==0
return boolean
#===============================================================================
def multistarts(n,x0,lb,ub):
#===============================================================================
if n<0:
raise ValueError('The number of requested starting points must be n>0.')
if len(x0) != len(lb) or len(x0) != len(ub):
raise ValueError('The lower/upper bound size(s) are not compatible with the initial guess vector x0.')
# Generate n-1 new starting points within the bounds
if n>1:
x0 = np.linspace(lb,ub,n-1)
else:
x0 = [x0]
return x0
#===============================================================================
|
import contextlib
import errno
import socket
import unittest
import sys
from .. import support
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
with socket.socket(family, socktype) as tempsock:
port = bind_port(tempsock)
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEADDR socket option on "
"TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEPORT socket option on "
"TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def bind_unix_socket(sock, addr):
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
_bind_nix_socket_error = None
def skip_unless_bind_unix_socket(test):
"""Decorator for tests requiring a functional bind() for unix sockets."""
if not hasattr(socket, 'AF_UNIX'):
return unittest.skip('No UNIX Sockets')(test)
global _bind_nix_socket_error
if _bind_nix_socket_error is None:
from test.support import TESTFN, unlink
path = TESTFN + "can_bind_unix_socket"
with socket.socket(socket.AF_UNIX) as sock:
try:
sock.bind(path)
_bind_nix_socket_error = False
except OSError as e:
_bind_nix_socket_error = e
finally:
unlink(path)
if _bind_nix_socket_error:
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
return unittest.skip(msg)(test)
else:
return test
def get_socket_conn_refused_errs():
"""
Get the different socket error numbers ('errno') which can be received
when a connection is refused.
"""
errors = [errno.ECONNREFUSED]
if hasattr(errno, 'ENETUNREACH'):
# On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED
errors.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
errors.append(errno.EADDRNOTAVAIL)
if hasattr(errno, 'EHOSTUNREACH'):
# bpo-37583: The destination host cannot be reached
errors.append(errno.EHOSTUNREACH)
if not IPV6_ENABLED:
errors.append(errno.EAFNOSUPPORT)
return errors
_NOT_SET = object()
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=_NOT_SET, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
import nntplib
import urllib.error
if timeout is _NOT_SET:
timeout = support.INTERNET_TIMEOUT
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = support.ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
|
# Generated by Django 2.0.4 on 2018-05-07 07:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180507_0653'),
]
operations = [
migrations.AlterField(
model_name='post',
name='banner_photo',
field=models.ImageField(upload_to='static/media', verbose_name='Image'),
),
]
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieving a list of all Web Service connections
"""
return isamAppliance.invoke_get("Retrieving a list of all Web Service connections",
"/mga/server_connections/ws/v1")
def get(isamAppliance, name=None, check_mode=False, force=False):
"""
Retrieving a Web Service connection
"""
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
if id == {}:
return isamAppliance.create_return_object()
else:
return isamAppliance.invoke_get("Retrieving a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
def set(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
"""
Creating or Modifying a Web Service connection
"""
if _check_exists(isamAppliance, name=name) is False:
# Force the add - we already know connection does not exist
return add(isamAppliance, name, connection, description, locked, servers, check_mode, True)
else:
# Update request
return update(isamAppliance, connection, description, locked, servers, name, None,
check_mode, force)
def add(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
"""
Creating a Web Service connection
"""
# warnings = []
# if isamAppliance.facts["version"] < "9.0.2.1":
# warnings.append(
# "Appliance is at version: {0}. Enabled server connection type (ws) not supported unless at least 9.0.2.1. Ignoring value.".format(
# isamAppliance.facts["version"]))
# return isamAppliance.create_return_object(warnings=warnings)
if force is True or _check_exists(isamAppliance, name=name) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Creating a Web Service connection",
"/mga/server_connections/ws/v1",
_create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection))
return isamAppliance.create_return_object()
def delete(isamAppliance, name=None, check_mode=False, force=False):
"""
Deleting a Web Service connection
"""
if force is True or _check_exists(isamAppliance, name=name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
return isamAppliance.invoke_delete(
"Deleting a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
return isamAppliance.create_return_object()
def update(isamAppliance, connection, description='', locked=False, servers=None, name=None,
new_name=None, check_mode=False, force=False):
"""
Modifying a Web Service connection
Use new_name to rename the connection, cannot compare password so update will take place everytime
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = _create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection)
if new_name is not None: # Rename condition
json_data['name'] = new_name
return isamAppliance.invoke_put(
"Modifying a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id), json_data)
def _create_json(name, description, locked, servers, connection):
"""
Create a JSON to be used for the REST API call
"""
json = {
"connection": connection,
"type": "ws",
"name": name,
"description": description,
"locked": locked
}
# servers is optional
if servers is not None:
json['servers'] = servers
return json
def _get_id(isamAppliance, name):
"""
Retrieve UUID for named Web Service connection
"""
ret_obj = get_all(isamAppliance)
ret_obj_new = isamAppliance.create_return_object()
for obj in ret_obj['data']:
if obj['name'] == name:
ret_obj_new['data'] = obj['uuid']
return ret_obj_new
def _check_exists(isamAppliance, name=None, id=None):
"""
Check if Web Service connection already exists
"""
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']:
if (name is not None and obj['name'] == name) or (id is not None and obj['uuid'] == id):
return True
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare Web Service connections between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
|
import torch
from ..utils.stream import ItemFeature
from .base_infocpler import BaseInfoCpler
class OCRVQAInfoCpler(BaseInfoCpler):
def __init__(self, cfg):
super().__init__(cfg)
def complete_info(self, item_feature: ItemFeature):
tokens = self.tokenizer.tokenize(item_feature.question.strip())
tokens = self.tokenizer.get_limited_tokens(tokens, self.max_seq_length - 2)
tokens, input_lm_label_ids = self.tokenizer.random_mask_tokens(tokens, self.word_mask_ratio)
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOEKN]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(tokens)
input_segment = [0] * len(tokens)
input_lm_label_ids = [-1] * len(tokens)
# while len(input_ids) < self.max_seq_length:
# input_ids.append(int(self.pad_idx))
# input_mask.append(0)
# input_segment.append(0)
# input_lm_label_ids.append(-1)
to_extd_length = self.max_seq_length - len(input_ids)
self.info_extend(to_extd_length, (input_ids, int(self.pad_idx)), (input_mask, 0), (input_segment, 0),
(input_lm_label_ids, -1))
# ocr vectors
ocr_tokens = self.tokenizer.get_limited_tokens(item_feature.ocr_tokens, self.max_ocr_length)
item_feature.ocr_vectors_glove = self.get_tokens_glove_vectors(ocr_tokens)
item_feature.ocr_vectors_order = self.get_tokens_order_vectors(ocr_tokens)
item_feature.ocr_vectors_phoc = self.get_tokens_phoc_vectors(ocr_tokens)
item_feature.ocr_vectors_fasttext = self.get_tokens_fasttext_vectors(ocr_tokens)
# ocr features and bboxes
features_ocr = torch.zeros(
(self.max_ocr_length,
item_feature.features_ocr.shape[1] if item_feature.features_ocr is not None else 2048),
dtype=torch.float)
bbox_ocr_normalized = torch.zeros(
(self.max_ocr_length,
item_feature.ocr_normalized_boxes.shape[1] if item_feature.ocr_normalized_boxes is not None else 4),
dtype=torch.float)
if item_feature.features_ocr is not None:
limit = min(self.max_ocr_length, len(item_feature.features_ocr))
features_ocr[:limit] = torch.tensor(item_feature.features_ocr[:limit])
bbox_ocr_normalized[:limit] = torch.tensor(item_feature.ocr_normalized_boxes[:limit])
item_feature.features_ocr = features_ocr
item_feature.ocr_normalized_boxes = bbox_ocr_normalized
# features and bboxes
img_h = item_feature.image_height
img_w = item_feature.image_width
item_feature.bbox = self._get_bbox_from_normalized(item_feature.obj_normalized_boxes, img_h, img_w)
item_feature.bbox_normalized = item_feature.obj_normalized_boxes
item_feature.bbox_ocr = self._get_bbox_from_normalized(item_feature.ocr_normalized_boxes, img_h, img_w)
item_feature.bbox_ocr_normalized = item_feature.ocr_normalized_boxes
item_feature.input_ids = torch.tensor(input_ids, dtype=torch.long)
item_feature.input_mask = torch.tensor(input_mask, dtype=torch.int)
item_feature.input_segment = torch.tensor(input_segment, dtype=torch.int)
item_feature.input_lm_label_ids = torch.tensor(input_lm_label_ids, dtype=torch.long)
item_feature.qa_ids = [self.qa_ans2id[ans] for ans in item_feature.answers if ans in self.qa_ans2id]
# item_feature.qa_allids = [self.qa_ans2id[ans] for ans in item_feature.all_answers if ans in self.qa_ans2id]
item_feature.answers_scores = self.compute_answers_scores(torch.Tensor(item_feature.qa_ids))
return item_feature
|
# Mini Black Heaven Mount Coupon | (2434762)
if sm.getSkillByItem() == 0:# Check whether item has an vehicleID stored, 0 if false.
sm.chat("An Error occurred whilst trying to find the mount.")
elif sm.hasSkill(sm.getSkillByItem()):
sm.chat("You already have the 'Mini Black Heaven' mount.")
else:
sm.consumeItem()
sm.giveSkill(sm.getSkillByItem())
sm.chat("Successfully added the 'Mini Black Heaven' mount.")
sm.dispose()
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--master", action="store", default="", help="IP address of GKE master")
parser.addoption(
"--namespace", action="store", default="", help="namespace of server")
parser.addoption(
"--service", action="store", default="",
help="The name of the mnist K8s service")
@pytest.fixture
def master(request):
return request.config.getoption("--master")
@pytest.fixture
def namespace(request):
return request.config.getoption("--namespace")
@pytest.fixture
def service(request):
return request.config.getoption("--service")
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import time
import gpp_types
import s1ap_types
import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SessionManagerUtil
import ipaddress
from lte.protos.policydb_pb2 import FlowMatch
class TestAttachServiceWithMultiPdnsAndBearersMtData(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._sessionManager_util = SessionManagerUtil()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_service_with_multi_pdns_and_bearers_mt_data(self):
"""
Attach a UE + add secondary PDN
+ add 2 dedicated bearers + UE context release
+ trigger MT data + service request
+ PDN disconnect + detach
"""
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
ips = []
# APN of the secondary PDN
ims = {
"apn_name": "ims", # APN-name
"qci": 5, # qci
"priority": 15, # priority
"pre_cap": 0, # preemption-capability
"pre_vul": 0, # preemption-vulnerability
"mbr_ul": 200000000, # MBR UL
"mbr_dl": 100000000, # MBR DL
}
# APN list to be configured
apn_list = [ims]
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(i) for i in req.imsi]), apn_list
)
# UL Flow description #1
ulFlow1 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5002, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #2
ulFlow2 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5001, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #3
ulFlow3 = {
"ipv4_dst": "192.168.129.64", # IPv4 destination address
"tcp_dst_port": 5003, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #4
ulFlow4 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5001, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# DL Flow description #1
dlFlow1 = {
"ipv4_src": "192.168.129.42", # IPv4 source address
"tcp_src_port": 5001, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #2
dlFlow2 = {
"ipv4_src": "192.168.129.64", # IPv4 source address
"tcp_src_port": 5002, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #3
dlFlow3 = {
"ipv4_src": "192.168.129.64", # IPv4 source address
"tcp_src_port": 5003, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #4
dlFlow4 = {
"ipv4_src": "192.168.129.42", # IPv4 source address
"tcp_src_port": 5001, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# Flow lists to be configured
flow_list1 = [
ulFlow1,
ulFlow2,
ulFlow3,
dlFlow1,
dlFlow2,
dlFlow3,
]
flow_list2 = [
ulFlow4,
dlFlow4,
]
# QoS
qos1 = {
"qci": 1, # qci value [1 to 9]
"priority": 1, # Range [0-255]
"max_req_bw_ul": 10000000, # MAX bw Uplink
"max_req_bw_dl": 15000000, # MAX bw Downlink
"gbr_ul": 1000000, # GBR Uplink
"gbr_dl": 2000000, # GBR Downlink
"arp_prio": 1, # ARP priority
"pre_cap": 1, # pre-emption capability
"pre_vul": 1, # pre-emption vulnerability
}
qos2 = {
"qci": 2, # qci value [1 to 9]
"priority": 5, # Range [0-255]
"max_req_bw_ul": 10000000, # MAX bw Uplink
"max_req_bw_dl": 15000000, # MAX bw Downlink
"gbr_ul": 1000000, # GBR Uplink
"gbr_dl": 2000000, # GBR Downlink
"arp_prio": 1, # ARP priority
"pre_cap": 1, # pre-emption capability
"pre_vul": 1, # pre-emption vulnerability
}
policy_id1 = "internet"
policy_id2 = "ims"
print(
"************************* Running End to End attach for UE id ",
ue_id,
)
# Now actually complete the attach
attach = self._s1ap_wrapper._s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(default_ip)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Delay to ensure S1APTester sends attach complete before sending UE
# context release
print("Sleeping for 5 seconds")
time.sleep(5)
# Add dedicated bearer for default bearer 5
print(
"********************** Adding dedicated bearer to magma.ipv4"
" PDN"
)
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id1,
flow_list1,
qos1,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_oai_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_oai_apn.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Send PDN Connectivity Request
apn = "ims"
self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn)
# Receive PDN CONN RSP/Activate default EPS bearer context request
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value
)
act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)
addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo
sec_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(sec_ip)
print(
"********************** Sending Activate default EPS bearer "
"context accept for UE id ",
ue_id,
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Add dedicated bearer to 2nd PDN
print("********************** Adding dedicated bearer to ims PDN")
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id2,
flow_list2,
qos2,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_ims_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_ims_apn.bearerId
)
print(
"************* Added dedicated bearer",
act_ded_ber_req_ims_apn.bearerId,
)
print("Sleeping for 5 seconds")
time.sleep(5)
dl_flow_rules = {
default_ip: [flow_list1],
sec_ip: [flow_list2],
}
# 1 UL flow is created per bearer
num_ul_flows = 4
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
print("*********** Moving UE to idle mode")
print(
"************* Sending UE context release request ",
"for UE id ",
ue_id,
)
# Send UE context release request to move UE to idle mode
rel_req = s1ap_types.ueCntxtRelReq_t()
rel_req.ue_Id = ue_id
rel_req.cause.causeVal = (
gpp_types.CauseRadioNetwork.USER_INACTIVITY.value
)
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_CNTXT_REL_REQUEST, rel_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value
)
# Verify if paging flow rules are created
ip_list = [default_ip, sec_ip]
self._s1ap_wrapper.s1_util.verify_paging_flow_rules(ip_list)
print(
"************************* Running UE downlink (UDP) for UE id ",
ue_id,
)
with self._s1ap_wrapper.configDownlinkTest(
req, duration=1, is_udp=True
) as test:
response = self._s1ap_wrapper.s1_util.get_response()
self.assertTrue(response, s1ap_types.tfwCmd.UE_PAGING_IND.value)
# Send service request to reconnect UE
print(
"************************* Sending Service request for UE id ",
ue_id,
)
ser_req = s1ap_types.ueserviceReq_t()
ser_req.ue_Id = ue_id
ser_req.ueMtmsi = s1ap_types.ueMtmsi_t()
ser_req.ueMtmsi.pres = False
ser_req.rrcCause = s1ap_types.Rrc_Cause.TFW_MT_ACCESS.value
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_SERVICE_REQUEST, ser_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value
)
test.verify()
print("Sleeping for 5 seconds")
time.sleep(5)
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
pdn_disconnect_req = s1ap_types.uepdnDisconnectReq_t()
pdn_disconnect_req.ue_Id = ue_id
pdn_disconnect_req.epsBearerId = (
act_def_bearer_req.m.pdnInfo.epsBearerId
)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_PDN_DISCONNECT_REQ, pdn_disconnect_req
)
# Receive UE_DEACTIVATE_BER_REQ
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value
)
print(
"******************* Received deactivate eps bearer context"
" request"
)
# Send DeactDedicatedBearerAccept
deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t)
self._s1ap_wrapper.sendDeactDedicatedBearerAccept(
ue_id, deactv_bearer_req.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
print("************************* Running UE detach for UE id ", ue_id)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""Implements the xonsh parser."""
from xonsh.lazyasd import lazyobject
from xonsh.platform import PYTHON_VERSION_INFO
@lazyobject
def Parser():
if PYTHON_VERSION_INFO > (3, 6):
from xonsh.parsers.v36 import Parser as p
elif PYTHON_VERSION_INFO > (3, 5):
from xonsh.parsers.v35 import Parser as p
else:
from xonsh.parsers.v34 import Parser as p
return p
|
parentheses = input()
stack_opening_brackets = []
pairs = {
'(': ')',
'{': '}',
'[': ']'
}
balanced = True
for el in parentheses:
if el in "({[":
stack_opening_brackets.append(el)
else:
if len(stack_opening_brackets) > 0:
opening_bracket = stack_opening_brackets.pop()
closing_bracket = el
if pairs[opening_bracket] != closing_bracket:
balanced = False
break
else:
balanced = False
break
if balanced and len(stack_opening_brackets) == 0:
print("YES")
else:
print("NO")
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from frontend.tokenizer import *
from frontend.parser import *
from frontend.preprocessor import Preprocessor, PreprocessorException
from frontend.semantic_check import P4SemanticChecker
from frontend.dumper import P4HlirDumper
from frontend.ast import P4Program
from collections import OrderedDict
import hlir.p4 as p4
import itertools
import logging
import json
import pkg_resources
logger = logging.getLogger(__name__)
class HLIR():
def __init__(self, *args):
self.source_files = [] + list(args)
self.source_txt = []
self.preprocessor_args = []
self.primitives = []
self.p4_objects = []
self.p4_actions = OrderedDict()
self.p4_control_flows = OrderedDict()
self.p4_headers = OrderedDict()
self.p4_header_instances = OrderedDict()
self.p4_fields = OrderedDict()
self.p4_field_lists = OrderedDict()
self.p4_field_list_calculations = OrderedDict()
self.p4_parser_exceptions = OrderedDict()
self.p4_parse_value_sets = OrderedDict()
self.p4_parse_states = OrderedDict()
self.p4_counters = OrderedDict()
self.p4_meters = OrderedDict()
self.p4_registers = OrderedDict()
self.p4_nodes = OrderedDict()
self.p4_tables = OrderedDict()
self.p4_action_profiles = OrderedDict()
self.p4_action_selectors = OrderedDict()
self.p4_conditional_nodes = OrderedDict()
self.calculated_fields = []
self.p4_ingress_ptr = {}
self.p4_egress_ptr = None
self.primitives = json.loads(pkg_resources.resource_string('p4_hlir.frontend', 'primitives.json'))
def version(self):
return pkg_resources.require("p4-hlir")[0].version
def add_src_files(self, *args):
self.source_files += args
def add_preprocessor_args (self, *args):
self.preprocessor_args += args
def add_src_txt(self, *args):
self.source_txt += args
def add_primitives (self, primitives_dict):
self.primitives.update(primitives_dict)
def build(self, optimize=True, analyze=True, dump_preprocessed=False):
if len(self.source_files) == 0:
print "no source file to process"
return False
# Preprocess all program text
preprocessed_sources = []
try:
preprocessor = Preprocessor()
preprocessor.args += self.preprocessor_args
for p4_source in self.source_files:
absolute_source = os.path.join(os.getcwd(), p4_source)
if not self._check_source_path(absolute_source):
print "Source file '" + p4_source + "' could not be opened or does not exist."
return False
preprocessed_sources.append(preprocessor.preprocess_file(
absolute_source,
dest='%s.i'%p4_source if dump_preprocessed else None
))
for p4_txt in self.source_txt:
preprocessed_sources.append(preprocessor.preprocess_str(
p4_txt,
dest=None
))
except PreprocessorException as e:
print str(e)
return False
# Parse preprocessed text
all_p4_objects = []
for preprocessed_source in preprocessed_sources:
p4_objects, errors_cnt = P4Parser().parse(preprocessed_source)
if errors_cnt > 0:
print errors_cnt, "errors during parsing"
print "Interrupting compilation"
return False
all_p4_objects += p4_objects
print "parsing successful"
p4_program = P4Program("", -1, all_p4_objects)
# Semantic checking, round 1
sc = P4SemanticChecker()
errors_cnt = sc.semantic_check(p4_program, self.primitives)
if errors_cnt > 0:
print errors_cnt, "errors during semantic checking"
print "Interrupting compilation"
return False
else:
print "semantic checking successful"
# Dump AST to HLIR objects
d = P4HlirDumper()
d.dump_to_p4(self, p4_program, self.primitives)
# Semantic checking, round 2
# TODO: merge these two rounds and try to separate name resolution from
# higher level semantic checks
try:
p4.p4_validate(self)
except p4.p4_compiler_msg as e:
print e
return False
# Perform target-agnostic optimizations
if optimize:
p4.optimize_table_graph(self)
# Analyze program and annotate objects with derived information
if analyze:
p4.p4_dependencies(self)
p4.p4_field_access(self)
return True
def _check_source_path(self, source):
return os.path.isfile(source)
def HLIR_from_txt (program_str, **kwargs):
h = HLIR()
h.add_src_txt(program_str)
if h.build(**kwargs):
return h
else:
return None
|
'''
Created on Aug 9, 2017
@author: Hao Wu
'''
from ScopeFoundry import HardwareComponent
from .daq_do_dev import DAQSimpleDOTask
from PyDAQmx import *
import numpy as np
import time
class DAQdoHW(HardwareComponent):
'''
Hardware Component Class for receiving AI input for breathing, licking etc
'''
name='daq_do'
def setup(self,channels='Dev2/port0/line2'):
'''
add settings for analog input eventsss
'''
self.settings.New(name='channels',initial=channels,dtype=str,ro=False)
self.settings.New(name='on',initial=False,dtype=bool,ro=False)
def connect(self):
self._dev=DAQSimpleDOTask(self.settings.channels.value())
self.settings.on.hardware_set_func = self._dev.write_bool
def disconnect(self):
try:
self._dev.StopTask()
self._dev.ClearTask()
del self._dev
except AttributeError:
pass
if __name__ == '__main__':
ai=DAQdoHW()
ai.connect()
print(ai._data)
time.sleep(1)
ai.disconnect()
|
# test_malquery.py
# This class tests the malquery service class
import os
import sys
import pytest
# Authentication via test_authorization.py
from tests import test_authorization as Authorization
# Import our sibling src folder into the path
sys.path.append(os.path.abspath('src'))
# Classes to test - manually imported from sibling folder
from falconpy import MalQuery
auth = Authorization.TestAuthorization()
config = auth.getConfigObject()
falcon = MalQuery(auth_object=config)
AllowedResponses = [200, 400, 404, 429] # Adding rate-limiting as an allowed response for now
class TestMalQuery:
def mq_get_quotas(self):
returned = False
if falcon.GetMalQueryQuotasV1()["status_code"] in AllowedResponses:
returned = True
return returned
def mq_test_all_paths(self):
error_checks = True
tests = {
"fuzzy_search": falcon.fuzzy_search(body={
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}),
"really_fuzzy": falcon.fuzzy_search(filter_meta="whatevs,something_else",
limit=1,
patterns=[{"type": "file", "value": "test"}]
),
"get_download": falcon.get_download(ids="12345678"),
"get_metadata": falcon.get_metadata(ids="12345678"),
"get_request": falcon.get_request(ids="12345678"),
"get_samples": falcon.get_samples(ids="12345678"),
"multi_download": falcon.samples_multidownload(ids="12345678"),
"exact_search": falcon.exact_search(body={}),
"exact_search_too": falcon.exact_search(filter_filetypes="xls,doc",
filter_meta="whatevers,something",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
patterns=[
{
"type": "file",
"value": "spreadsheet"
}
]),
"hunt": falcon.hunt(body={}),
"cry_of_the_hunter": falcon.hunt(filter_filetypes=["exe"],
filter_meta="some metadata",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
yara_rule="Some Yara rule"
)
}
for key in tests:
if tests[key]["status_code"] not in AllowedResponses:
# print(tests[key])
error_checks = False
pytest.skip("Skipping due to test flakiness")
return error_checks
def test_get_quotas(self):
assert self.mq_get_quotas() is True
def test_all_functionality(self):
assert self.mq_test_all_paths() is True
|
#!/usr/bin/env python
# import required packages
import emcee
import numpy as np
from numpy import exp, log
# import model function from separate file
from mymodel import mymodel
# import post-processing function from theonlinemcmc package
from theonlinemcmc import *
# initialise error code value
errval = 0
# define the log posterior function
def lnprob(theta, x, sigma_gauss, data):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, sigma_gauss, data)
# define the log prior function
def lnprior(theta):
lp = 0.
m,c = theta
if -10 < m < 10:
lp = 0.
else:
return -np.inf
if -10 < c < 10:
lp = 0.
else:
return -np.inf
return lp
# define log likelihood function
def lnlike(theta, x, sigma_gauss, data):
m,c = theta
md = mymodel(m,c,x)
return -0.5*np.sum(((md - data)/sigma_gauss)**2)
# set number of MCMC points
Nmcmc = 1000
Nburnin = 1000
Nens = 100
ndim = 2
# initialise the start ensemble points
try:
mini = -10 + np.random.rand(Nens)*20
cini = -10 + np.random.rand(Nens)*20
pos = np.array([mini, cini]).T
except:
errval = PRIOR_INIT_ERR
# read in the data
if errval == 0:
try:
data = np.loadtxt("data_file.txt")
except:
try:
data = np.loadtxt("data_file.txt", delimiter=",")
except:
errval = DATA_READ_ERR
# read in the abscissa values
if errval == 0:
try:
x = np.loadtxt("abscissa_file.txt")
except:
try:
x = np.loadtxt("abscissa_file.txt", delimiter=",")
except:
errval = ABSCISSA_READ_ERR
# read in sigma (standard deviation) values (there may be nothing here if it not applicable to your likelihood)
# run the MCMC
if errval == 0:
if len(data) != len(x):
errval = DATA_LENGTH_ERR
argslist = (x, 0.65, data)
if errval == 0:
# set up sampler
try:
sampler = emcee.EnsembleSampler(Nens, ndim, lnprob, args=argslist)
except:
errval = MCMC_INIT_ERR
# run sampler
try:
sampler.run_mcmc(pos, Nmcmc+Nburnin)
# remove burn-in and flatten
samples = sampler.chain[:, Nburnin:, :].reshape((-1, ndim))
lnp = np.reshape(sampler.lnprobability[:, Nburnin:].flatten(), (-1,1))
samples = np.hstack((samples, lnp))
except:
errval = MCMC_RUN_ERR
# output the posterior samples, likelihood and variables
try:
np.savetxt('posterior_samples.txt.gz', samples)
fv = open('variables.txt', 'w')
fv.write("m,c")
fv.close()
except:
errval = POST_OUTPUT_ERR
# run post-processing script
try:
postprocessing(samples, "m,c", x, "x", data, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
except:
errval = POST_PROCESS_ERR
success = True
if errval != 0:
# run different script in case error codes are encountered
errorpage(errval, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
success = False
|
from turtle import Turtle
FONT = ("Courier", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.penup()
self.level = 1
self.goto(x=-230, y=260)
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Level: {self.level}", align="center", font=FONT)
def increase_level(self):
self.level += 1
self.update_scoreboard()
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align="center", font=FONT)
|
from django.shortcuts import render
from decouple import config
from .models import Images
# Create your views here.
def index(request, **kwargs):
"""
Render single page application and provide context data.
:param request:
:param kwargs:
:return:
"""
context = {
'images': Images.objects.filter(is_active=True),
'googleForm': config('googleForm', default=''),
'googleMaps': config('googleMaps', default=''),
}
return render(request, 'spa/main_SPA.html', context)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Sparskit(MakefilePackage):
"""SPARSKIT: A basic tool-kit for sparse matrix computations (Version 2).
Made by Yousef Saad, University of Minnesota.
"""
homepage = "https://www-users.cse.umn.edu/~saad/software/SPARSKIT/"
version('develop', sha256='ecdd0a9968d6b45153a328710a42fe87600f0bba0e3c53896090b8ae1c113b7a',
url='http://www-users.cs.umn.edu/~saad/software/SPARSKIT/SPARSKIT2.tar.gz')
# The library uses blas routine which needs to be known when the lib is used.
# A dependent package should add self.spec['blas'].libs.ld_flags
# at the end of its link line.
# But, asis, this packages compiles without needing to know about it.
# depends_on('blas', type='run')
variant('pic', default=True,
description='Compile with position independent code.')
variant('debug', default=False,
description='Builds a debug version of the library')
# We provide the standard Make flags here:
# https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=flag_handler#compiler-flags
def flag_handler(self, name, flags):
spec = self.spec
if '+pic' in spec:
if name == 'fflags':
flags.append(self.compiler.fc_pic_flag)
if name == 'fflags':
if 'gfortran' in self.compiler.fc:
flags.append('-std=legacy')
flags.append('-Wall')
if '+debug' in spec:
if '-g' in self.compiler.debug_flags:
flags.append('-g')
if '-O0' in self.compiler.opt_flags:
flags.append('-O0')
elif '-O' in self.compiler.opt_flags:
flags.append('-O')
else:
if '-O3' in self.compiler.opt_flags:
flags.append('-O3')
elif '-O2' in self.compiler.opt_flags:
flags.append('-O2')
return (None, flags, None)
def edit(self, spec, prefix):
mkfile = FileFilter('makefile')
mkfile.filter(r'^(OPT).*=.+', r'\1= -c $(FFLAGS)')
if os.path.exists('libskit.a'):
os.unlink('libskit.a')
def build(self, spec, prefix):
make('clean')
make('F77={0}'.format(spack_fc))
def install(self, spec, prefix):
mkdirp(prefix.lib)
install('libskit.*', prefix.lib)
@property
def libs(self):
return find_libraries(
"libskit*", root=self.prefix, shared=False, recursive=True
)
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.verification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Intermediate Verification Languages (IVLs).
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Operator, Keyword, Name, Number, \
Punctuation, Text, Generic
__all__ = ['BoogieLexer', 'SilverLexer']
class BoogieLexer(RegexLexer):
"""
For `Boogie <https://boogie.codeplex.com/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Boogie'
aliases = ['boogie']
filenames = ['*.bpl']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
'then', 'var', 'while'),
suffix=r'\b'), Keyword),
(words(('const',), suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
(r'\{.*?\}', Generic.Emph), #triggers
(r"([{}():;,.])", Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
class SilverLexer(RegexLexer):
"""
For `Silver <https://bitbucket.org/viperproject/silver>`_ source code.
.. versionadded:: 2.2
"""
name = 'Silver'
aliases = ['silver']
filenames = ['*.sil', '*.vpr']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'result', 'true', 'false', 'null', 'method', 'function',
'predicate', 'program', 'domain', 'axiom', 'var', 'returns',
'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert',
'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh',
'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection',
'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists',
'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique',
'apply', 'package', 'folding', 'label', 'forperm'),
suffix=r'\b'), Keyword),
(words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator),
(words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r'[!%&*+=|?:<>/\-\[\]]', Operator),
(r'\{.*?\}', Generic.Emph), #triggers
(r'([{}():;,.])', Punctuation),
# Identifier
(r'[\w$]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
|
from DownloadSECFilings import *
# SEC EDGAR system saves all the URL links for each filing under quarterly folders. These files storing
# the URL information are called "master" files.
# 1) You need to call for the "createMasterFile" function if you want to keep the master file updated.
# createMasterFile generates a TXT file under the folder provided as the input.
# 2) You need to call for the "downloadSECFilings" function if you want to download specific filings that are filed
# within dateStart and dateFinish. The "downloadSECFilings" function first creates a folder structure
# , such as "folderPath/10-K/1000015000/" where 1000015000 represents the CIK number (firm identifier)
# and downloads each file to the corresponding path with a file specific name
# YOU CAN SEE A SAMPLE CALL BELOW to download all 10-Ks reported between January 1st 2000 and June 30, 2001.
# WARNING : YOU NEED TO MAKE SURE THAT YOU HAVE ENOUGH HARD DRIVE SPACE BEFORE YOU DOWNLOAD THE FILINGS.
createMasterFile('C:/path to your folder where you want to download the filings to', 2000, 2001)
downloadSECFilings('C:/path to your folder where you want to download the filings to',
formTypesList=['10-K'],
dateStart=20000101,
dateFinish=20010630,
)
|
try:
tests=int(input())
z=[]
for _ in range(tests):
n,x=[int(xi) for xi in input().split(" ")]
arr=list(map(int,input().rstrip().split()))
m=max(arr)-min(arr)
if m>x:
z.append("NO")
else:
z.append("YES")
for x in z:
print(x)
except:pass
|
from enum import Enum
class Suite(Enum):
Diamond = 1
Heart = 2
Club = 3
Spade = 4
|
from quapy.data import LabelledCollection
from .base import BaseQuantifier
class MaximumLikelihoodPrevalenceEstimation(BaseQuantifier):
"""
The `Maximum Likelihood Prevalence Estimation` (MLPE) method is a lazy method that assumes there is no prior
probability shift between training and test instances (put it other way, that the i.i.d. assumpion holds).
The estimation of class prevalence values for any test sample is always (i.e., irrespective of the test sample
itself) the class prevalence seen during training. This method is considered to be a lower-bound quantifier that
any quantification method should beat.
"""
def __init__(self):
self._classes_ = None
def fit(self, data: LabelledCollection):
"""
Computes the training prevalence and stores it.
:param data: the training sample
:return: self
"""
self._classes_ = data.classes_
self.estimated_prevalence = data.prevalence()
return self
def quantify(self, instances):
"""
Ignores the input instances and returns, as the class prevalence estimantes, the training prevalence.
:param instances: array-like (ignored)
:return: the class prevalence seen during training
"""
return self.estimated_prevalence
@property
def classes_(self):
"""
Number of classes
:return: integer
"""
return self._classes_
def get_params(self, deep=True):
"""
Does nothing, since this learner has no parameters.
:param deep: for compatibility with sklearn
:return: `None`
"""
return None
def set_params(self, **parameters):
"""
Does nothing, since this learner has no parameters.
:param parameters: dictionary of param-value pairs (ignored)
"""
pass
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import dis
import inspect
import os.path
from types import FrameType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Type,
Union,
get_type_hints,
)
import testslide.lib
import testslide.mock_callable
if TYPE_CHECKING:
# Hack to enable typing information for mypy
from testslide.mock_callable import _CallableMock, _YieldValuesRunner # noqa: F401
class UndefinedAttribute(BaseException):
"""
Tentative access of an attribute from a StrictMock that is not defined yet.
Inherits from BaseException to avoid being caught by tested code.
"""
def __init__(
self, strict_mock: "StrictMock", name: str, extra_msg: Optional[str] = None
) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
self.extra_msg = extra_msg
def __str__(self) -> str:
message = (
f"'{self.name}' is not set.\n"
f"{repr(self.strict_mock)} must have a value set for this attribute "
"if it is going to be accessed."
)
if self.extra_msg is not None:
message += f"\n{self.extra_msg}"
return message
class NonExistentAttribute(BaseException):
"""
Tentative of setting of an attribute from a StrictMock that is not present
at the template class.
Inherits from BaseException to avoid being caught by tested code.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' is not part of the API.\n"
f"{self.strict_mock} template class API does not have this "
"attribute so the mock can not have it as well.\n"
"If you are inheriting StrictMock, you can define private "
"attributes, that will not interfere with the API, by prefixing "
"them with '__' (and at most one '_' suffix) "
" (https://docs.python.org/3/tutorial/classes.html#tut-private).\n"
"See also: 'runtime_attrs' at StrictMock.__init__."
)
class NonCallableValue(BaseException):
"""
Raised when trying to set a non callable value to a callable attribute of
a StrictMock instance.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a non-callable value.\n"
f"{self.strict_mock} template class requires this attribute to "
"be callable."
)
class NonAwaitableReturn(BaseException):
"""
Raised when a coroutine method at a StrictMock is assigned a not coroutine
callable function.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a callable that does not "
"return an awaitable.\n"
f"{self.strict_mock} template class requires this attribute to "
"be a callable that returns an awaitable (eg: a 'async def' "
"function)."
)
class UnsupportedMagic(BaseException):
"""
Raised when trying to set an unsupported magic attribute to a StrictMock
instance.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return f"setting '{self.name}' is not supported."
class _DefaultMagic:
CONTEXT_MANAGER_METHODS = ["__enter__", "__exit__", "__aenter__", "__aexit__"]
def __init__(self, strict_mock: "StrictMock", name: str):
self.strict_mock = strict_mock
self.name = name
def __call__(self, *args: Any, **kwargs: Any) -> None:
message = None
if self.name in self.CONTEXT_MANAGER_METHODS:
message = (
"Tip: most context managers can be automatically configured "
"with 'default_context_manager=True'."
)
raise UndefinedAttribute(self.strict_mock, self.name, message)
def __copy__(self) -> "_DefaultMagic":
return type(self)(strict_mock=self.strict_mock, name=self.name)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_DefaultMagic":
if memo is None:
memo = {}
self_copy = type(self)(strict_mock=self.strict_mock, name=self.name)
memo[id(self)] = self_copy
return self_copy
class _MethodProxy:
"""
When setting callable attributes, the new value is wrapped by another
function that does signature and async validations. We then need this proxy
around it, so that when the attribute is called, the mock value is called
(the wrapper function which then calls the new value) but all attribute
access is forwarded to the new value.
"""
def __init__(self, value: Any, callable_value: Optional[Callable] = None) -> None:
self.__dict__["_value"] = value
self.__dict__["_callable_value"] = callable_value or value
def __get__(
self, instance: "StrictMock", owner: Optional[Type["StrictMock"]] = None
) -> Union[object, Callable]:
if self.__dict__["_value"] is self.__dict__["_callable_value"]:
return self.__dict__["_callable_value"]
else:
return self
def __getattr__(self, name: str) -> str:
return getattr(self.__dict__["_value"], name)
def __setattr__(self, name: str, value: str) -> None:
return setattr(self.__dict__["_value"], name, value)
def __delattr__(self, name: str) -> None:
return delattr(self.__dict__["_value"], name)
def __call__(self, *args: Any, **kwargs: Any) -> Optional[Any]:
return self.__dict__["_callable_value"](*args, **kwargs)
def __copy__(self) -> "_MethodProxy":
return type(self)(
callable_value=self.__dict__["_callable_value"],
value=self.__dict__["_value"],
)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_MethodProxy":
if memo is None:
memo = {}
self_copy = type(self)(
callable_value=copy.deepcopy(self.__dict__["_callable_value"]),
value=copy.deepcopy(self.__dict__["_value"]),
)
memo[id(self)] = self_copy
return self_copy
def __repr__(self) -> str:
# Override repr to have a representation that provides information
# about the wrapped method
return repr(self.__dict__["_value"])
class StrictMock:
"""
Mock object that won't allow any attribute access or method call, unless its
behavior has been explicitly defined. This is meant to be a safer
alternative to Python's standard Mock object, that will always return
another mock when referred by default.
StrictMock is "safe by default", meaning that it will never misbehave by
lack of configuration. It will raise in the following situations:
- Get/Set attribute that's not part of the specification (template or
runtime_attrs).
- Get attribute that is part of the specification, but has not yet been
defined.
- Call a method with different signature from the template.
When appropriate, raised exceptions inherits from BaseException, in order to
let exceptions raise the test, outside tested code, so we can get a clear
signal of what is happening: either the mock is missing a required behavior
or the tested code is misbehaving.
"""
TRIM_PATH_PREFIX = ""
# All of these magic should be OK to be set at the mock and they are
# expected to work as they should. If implemented by the template class,
# they will have default values assigned to them, that raise
# UndefinedAttribute until configured.
__SETTABLE_MAGICS = [
"__abs__",
"__add__",
"__aenter__",
"__aexit__",
"__aiter__",
"__and__",
"__anext__",
"__await__",
"__bool__",
"__bytes__",
"__call__",
"__ceil__",
"__complex__",
"__contains__",
"__delete__",
"__delitem__",
"__divmod__",
"__enter__",
"__enter__",
"__eq__",
"__exit__",
"__exit__",
"__float__",
"__floor__",
"__floordiv__",
"__format__",
"__ge__",
"__get__",
"__getformat__",
"__getinitargs__",
"__getitem__",
"__getnewargs__",
"__getnewargs_ex__",
"__getstate__",
"__gt__",
"__iadd__",
"__iand__",
"__ifloordiv__",
"__ilshift__",
"__imatmul__",
"__imod__",
"__imul__",
"__index__",
"__instancecheck__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__iter__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__length_hint__",
"__lshift__",
"__lt__",
"__matmul__",
"__missing__",
"__mod__",
"__mul__",
"__name__",
"__ne__",
"__neg__",
"__next__",
"__or__",
"__pos__",
"__pow__",
"__qualname__",
"__radd__",
"__rand__",
"__rdivmod__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__round__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__set__",
"__set_name__",
"__setformat__",
"__setitem__",
"__setstate__",
"__sizeof__",
"__str__",
"__sub__",
"__subclasscheck__",
"__truediv__",
"__trunc__",
"__xor__",
]
# These magics either won't work or makes no sense to be set for mock after
# an instance of a class. Trying to set them will raise UnsupportedMagic.
__UNSETTABLE_MAGICS = [
"__bases__",
"__class__",
"__class_getitem__",
"__copy__",
"__deepcopy__",
"__del__",
"__delattr__",
"__dict__",
"__dir__",
"__getattr__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__mro__",
"__new__",
"__setattr__",
"__slots__",
"__subclasses__",
]
def __new__(
cls,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> "StrictMock":
"""
For every new instance of StrictMock we dynamically create a subclass of
StrictMock and return an instance of it. This allows us to use this new
subclass dictionary for all attributes, including magic ones, that must
be defined at the class to work.
"""
name = f"{template.__name__}{cls.__name__}" if template else cls.__name__
strict_mock_subclass = type(name, (cls,), {})
return object.__new__(strict_mock_subclass)
def __setup_magic_methods(self) -> None:
"""
Populate all template's magic methods with expected default behavior.
This is important as things such as bool() depend on they existing
on the object's class __dict__.
https://github.com/facebook/TestSlide/issues/23
"""
if not self._template:
return
implemented_magic_methods = []
for klass in type(self).mro():
if klass is object:
continue
for name in klass.__dict__:
if name.startswith("__") and name.endswith("__"):
implemented_magic_methods.append(name)
for klass in self._template.mro():
if klass is object:
continue
for name in klass.__dict__:
if name in type(self).__dict__:
continue
if name == "__hash__":
if klass.__dict__["__hash__"] is None:
setattr(self, name, None)
else:
setattr(self, name, lambda: id(self))
continue
if (
callable(klass.__dict__[name])
and name in self.__SETTABLE_MAGICS
and name not in self.__UNSETTABLE_MAGICS
and name not in implemented_magic_methods
):
setattr(self, name, _DefaultMagic(self, name))
def __setup_default_context_manager(self, default_context_manager: bool) -> None:
if self._template and default_context_manager:
if hasattr(self._template, "__enter__") and hasattr(
self._template, "__exit__"
):
self.__enter__ = lambda: self
self.__exit__ = lambda exc_type, exc_value, traceback: None
if hasattr(self._template, "__aenter__") and hasattr(
self._template, "__aexit__"
):
async def aenter():
return self
async def aexit(exc_type, exc_value, traceback):
pass
self.__aenter__ = aenter
self.__aexit__ = aexit
def __get_caller_frame(self, depth: int) -> FrameType:
# Adding extra 3 to account for the stack:
# __get_caller_frame
# __get_caller
# __init__
depth += 3
current_frame = inspect.currentframe()
while current_frame:
depth -= 1
if not depth:
break
current_frame = current_frame.f_back
return current_frame # type: ignore
def __get_caller(self, depth: int) -> Optional[str]:
# Doing inspect.stack will retrieve the whole stack, including context
# and that is really slow, this only retrieves the minimum, and does
# not read the file contents.
caller_frame = self.__get_caller_frame(depth)
# loading the context ends up reading files from disk and that might block
# the event loop, so we don't do it.
frameinfo = inspect.getframeinfo(caller_frame, context=0)
filename = frameinfo.filename
lineno = frameinfo.lineno
if self.TRIM_PATH_PREFIX:
split = filename.split(self.TRIM_PATH_PREFIX)
if len(split) == 2 and not split[0]:
filename = split[1]
if os.path.exists(filename):
return "{}:{}".format(filename, lineno)
else:
return None
def __setup_subclass(self):
"""
When StrictMock is subclassed, any attributes defined at the subclass
will override any of StrictMock's validations. In order to overcome
this, for attributes that makes sense, we set them at StrictMock's
dynamically created subclass from __new__ using __setattr__, so that
all validations work.
"""
if type(self).mro()[1] == StrictMock:
return
for klass in type(self).mro()[1:]:
if klass == StrictMock:
break
for name in klass.__dict__.keys():
if name in [
"__doc__",
"__init__",
"__module__",
]:
continue
# https://docs.python.org/3/tutorial/classes.html#tut-private
if name.startswith(f"_{type(self).__name__}__") and not name.endswith(
"__"
):
continue
if name == "__hash__" and klass.__dict__["__hash__"] is None:
continue
StrictMock.__setattr__(self, name, getattr(self, name))
def __init__(
self,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> None:
"""
template: Template class to be used as a template for the mock.
runtime_attrs: Often attributes are created within an instance's
lifecycle, typically from __init__(). To allow mocking such attributes,
specify their names here.
name: an optional name for this mock instance.
default_context_manager: If the template class is a context manager,
setup a mock for __enter__/__aenter__ that yields itself and an empty function
for __exit__/__aexit__.
type_validation: validate callable attributes calls against the
template's method signature and use type hinting information from template
to validate that mock attribute types match them. Type validation also
happens forcallable attributes (instance/static/class methods) calls.
_attributes_to_skip_type_validation: do not validate type for these attributes
of the strictmock instance.
"""
if template is not None and not inspect.isclass(template):
raise ValueError("Template must be a class.")
self.__dict__["_template"] = template
self.__dict__["_runtime_attrs"] = runtime_attrs or []
self.__dict__["_name"] = name
self.__dict__["_type_validation"] = type_validation
self.__dict__["__caller"] = self.__get_caller(1)
self.__dict__[
"_attributes_to_skip_type_validation"
] = attributes_to_skip_type_validation
caller_frame = inspect.currentframe().f_back # type: ignore
# loading the context ends up reading files from disk and that might block
# the event loop, so we don't do it.
caller_frame_info = inspect.getframeinfo(caller_frame, context=0) # type: ignore
self.__dict__["_caller_frame_info"] = caller_frame_info
self.__setup_magic_methods()
self.__setup_default_context_manager(default_context_manager)
self.__setup_subclass()
@property # type: ignore
def __class__(self) -> type:
return self._template if self._template is not None else type(self)
@property
def _template(self) -> None:
import testslide.mock_constructor # Avoid cyclic dependencies
# If the template class was mocked with mock_constructor(), this will
# return the mocked subclass, which contains all attributes we need for
# introspection.
return testslide.mock_constructor._get_class_or_mock(self.__dict__["_template"])
# FIXME change to __runtime_attrs
@property
def _runtime_attrs(self) -> Optional[List[Any]]:
return self.__dict__["_runtime_attrs"]
def __template_has_attr(self, name: str) -> bool:
def get_class_init(klass: type) -> Callable:
import testslide.mock_constructor # Avoid cyclic dependencies
if not testslide.mock_constructor._is_mocked_class(klass):
return klass.__init__ # type: ignore
# If klass is the mocked subclass, pull the original version of
# __init__ so we can introspect into its implementation (and
# not the __init__ wrapper at the mocked class).
mocked_class = klass
original_class = mocked_class.mro()[1]
return testslide.mock_constructor._get_original_init(
original_class, instance=None, owner=mocked_class
)
def is_runtime_attr() -> bool:
if self._template:
for klass in self._template.mro():
template_init = get_class_init(klass)
if not inspect.isfunction(template_init):
continue
for instruction in dis.get_instructions(template_init):
if (
instruction.opname == "STORE_ATTR"
and name == instruction.argval
):
return True
return False
return (
hasattr(self._template, name)
or name in self._runtime_attrs # type: ignore
or name in getattr(self._template, "__slots__", [])
or is_runtime_attr()
)
@staticmethod
def __is_magic_method(name: str) -> bool:
return name.startswith("__") and name.endswith("__")
def __validate_attribute_type(self, name: str, value: Any) -> None:
if (
not self.__dict__["_type_validation"]
or name in self.__dict__["_attributes_to_skip_type_validation"]
):
return
if self._template is not None:
try:
annotations = get_type_hints(self._template)
except Exception:
# Some modules can throw KeyError : https://bugs.python.org/issue41515
annotations = {}
if name in annotations:
testslide.lib._validate_argument_type(annotations[name], name, value)
def __validate_and_wrap_mock_value(self, name: str, value: Any) -> Any:
if self._template:
if not self.__template_has_attr(name):
if not (
name.startswith(f"_{type(self).__name__}__")
and not name.endswith("__")
):
raise NonExistentAttribute(self, name)
self.__validate_attribute_type(name, value)
if hasattr(self._template, name):
template_value = getattr(self._template, name)
if callable(template_value):
if not callable(value):
raise NonCallableValue(self, name)
if self.__dict__["_type_validation"]:
signature_validation_wrapper = (
testslide.lib._wrap_signature_and_type_validation(
value,
self._template,
name,
self.__dict__["_type_validation"],
)
)
if inspect.iscoroutinefunction(template_value):
async def awaitable_return_validation_wrapper(
*args, **kwargs
):
result_awaitable = signature_validation_wrapper(
*args, **kwargs
)
if not inspect.isawaitable(result_awaitable):
raise NonAwaitableReturn(self, name)
return_value = await result_awaitable
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = awaitable_return_validation_wrapper
else:
def return_validation_wrapper(*args, **kwargs):
return_value = signature_validation_wrapper(
*args, **kwargs
)
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = return_validation_wrapper
else:
callable_value = None
return _MethodProxy(value=value, callable_value=callable_value)
else:
if callable(value):
# We don't really need the proxy here, but it serves the
# double purpose of swallowing self / cls when needed.
return _MethodProxy(value=value)
else:
if callable(value):
# We don't really need the proxy here, but it serves the
# double purpose of swallowing self / cls when needed.
return _MethodProxy(value=value)
return value
def __setattr__(self, name: str, value: Any) -> None:
if self.__is_magic_method(name):
# ...check whether we're allowed to mock...
if (
name in self.__UNSETTABLE_MAGICS
or (name in StrictMock.__dict__ and name not in self.__SETTABLE_MAGICS)
) and name != "__hash__":
raise UnsupportedMagic(self, name)
# ...or if it is something unsupported.
if name not in self.__SETTABLE_MAGICS and name != "__hash__":
raise NotImplementedError(
f"StrictMock does not implement support for {name}"
)
if name == "__hash__" and name in type(self).__dict__:
raise UnsupportedMagic(self, name)
mock_value = self.__validate_and_wrap_mock_value(name, value)
setattr(type(self), name, mock_value)
def __getattr__(self, name: str) -> Any:
if self._template and self.__template_has_attr(name):
raise UndefinedAttribute(self, name)
else:
raise AttributeError(f"'{name}' was not set for {repr(self)}.")
def __delattr__(self, name: str) -> None:
if name in type(self).__dict__:
delattr(type(self), name)
def __repr__(self) -> str:
template_str = (
" template={}.{}".format(self._template.__module__, self._template.__name__) # type: ignore
if self._template
else ""
)
if self.__dict__["_name"]:
name_str = " name={}".format(repr(self.__dict__["_name"]))
else:
name_str = ""
if self.__dict__["__caller"]:
caller_str = " {}".format(self.__dict__["__caller"])
else:
caller_str = ""
return "<StrictMock 0x{:02X}{name}{template}{caller}>".format(
id(self), name=name_str, template=template_str, caller=caller_str
)
def __str__(self) -> str:
return self.__repr__()
def __get_copy(self) -> "StrictMock":
self_copy = StrictMock(
template=self._template,
runtime_attrs=self._runtime_attrs,
name=self._name,
type_validation=self._type_validation,
attributes_to_skip_type_validation=self._attributes_to_skip_type_validation,
)
self_copy.__dict__["__caller"] = self.__get_caller(2)
return self_copy
def __get_copyable_attrs(self, self_copy: "StrictMock") -> List[str]:
return [
name
for name in type(self).__dict__
if name not in self_copy.__dict__
and (
not name.startswith("__")
or not name.endswith("__")
or name in self.__SETTABLE_MAGICS
)
]
def __copy__(self) -> "StrictMock":
self_copy = self.__get_copy()
for name in self.__get_copyable_attrs(self_copy):
setattr(type(self_copy), name, type(self).__dict__[name])
return self_copy
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "StrictMock":
if memo is None:
memo = {}
self_copy = self.__get_copy()
memo[id(self)] = self_copy
for name in self.__get_copyable_attrs(self_copy):
value = copy.deepcopy(type(self).__dict__[name], memo)
setattr(type(self_copy), name, value)
return self_copy
def _extract_StrictMock_template(mock_obj: StrictMock) -> Optional[Any]:
if "_template" in mock_obj.__dict__ and mock_obj._template is not None:
return mock_obj._template
return None
testslide.lib.MOCK_TEMPLATE_EXTRACTORS[StrictMock] = _extract_StrictMock_template # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
DATA_ROOT = {
'BIT': './data/BIT',
'UT': './data/ut120',
'highfive': './data/highfive'
}
FRAMES_ROOT = {
'BIT': 'Bit-frames',
}
ANNO_ROOT = {
'BIT': 'BIT-anno/tidy_anno'
}
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
HID_DATASETS = Registry('hid_dataset')
HID_PIPELINES = Registry('hid_pipeline')
def build_dataset(cfg, default_args=None):
dataset = build_from_cfg(cfg, HID_DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
# DistributedGroupSampler will definitely shuffle the data to satisfy
# that images on each GPU are in the same group
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
|
from sys import path
from os.path import dirname as dir
import webbrowser
import os
path.append(dir(path[0]))
from tkinter import ttk
import tkinter as tk
from tkinter import *
from ui.Pantalla_TS import *
from ui.Pantalla_AST import *
from ui.Pantalla_Error import *
import tkinter.messagebox
from analizer import interpreter
class Pantalla:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
self.inicializarScreen()
def inicializarScreen(self):
# inicializacion de la pantalla
self.window = Tk()
self.window.geometry("700x750")
self.window.resizable(0, 0)
self.window.title("Query Tool")
self.frame_entrada = Frame(
self.window, height=300, width=520, bd=10, bg="#d3d3d3"
)
self.txt_scroll = Scrollbar(self.frame_entrada)
self.txt_scroll.pack(side=RIGHT, fill=Y)
self.txt_entrada = tk.Text(
self.frame_entrada, yscrollcommand=self.txt_scroll.set, height=15, width=80
)
self.txt_entrada.pack(side=TOP)
self.txt_scroll.config(command=self.txt_entrada.yview)
self.frame_entrada.pack()
# Definicion del menu de items
navMenu = Menu(self.window)
navMenu.add_command(label="Tabla de Simbolos", command=self.open_ST)
navMenu.add_command(label="AST", command=self.open_AST)
navMenu.add_command(label="AST pdf", command=self.open_PDF)
navMenu.add_command(
label="Reporte de errores",
command=self.open_Reporte,
)
self.window.config(menu=navMenu)
frame_btn = Frame(self.window)
btn = Button(frame_btn, text="Consultar", command=self.analize)
btn.pack(side=LEFT, anchor=E, padx=25, pady=20)
btn_1 = Button(frame_btn, text="Parsear", command=self.parse)
btn_1.pack(side=LEFT, anchor=E, padx=25, pady=20)
frame_btn.pack()
# Creacion del notebook
self.tabControl = ttk.Notebook(self.window, width=650, height=300)
console_frame = Frame(self.tabControl, height=20, width=150, bg="#d3d3d3")
self.text_Consola = tk.Text(console_frame, height=20, width=150)
self.text_Consola.pack(fill=BOTH)
console_frame.pack(fill=BOTH)
self.tabControl.add(console_frame, text="Consola")
self.tabControl.pack()
self.window.mainloop()
def show_result(self, consults):
if consults != None:
i = 0
for consult in consults:
i += 1
if consult != None:
frame = Frame(self.tabControl, height=300, width=450, bg="#d3d3d3")
# Creacion del scrollbar
table_scroll = Scrollbar(frame, orient="vertical")
table_scrollX = Scrollbar(frame, orient="horizontal")
table = ttk.Treeview(
frame,
yscrollcommand=table_scroll.set,
xscrollcommand=table_scrollX.set,
height=12,
)
table_scroll.config(command=table.yview)
table_scrollX.config(command=table.xview)
self.fill_table(consult[0], consult[1], table)
table_scroll.pack(side=RIGHT, fill=Y)
table_scrollX.pack(side=BOTTOM, fill=X)
table.pack(side=LEFT, fill=BOTH)
frame.pack(fill=BOTH)
self.tabControl.add(frame, text="Consulta " + str(i))
else:
self.text_Consola.insert(
INSERT, "Error: Consulta sin resultado" + "\n"
)
self.tabControl.pack()
def parse(self):
self.refresh()
input = ""
input = self.txt_entrada.get(
"1.0", END
) # variable de almacenamiento de la entrada
result = interpreter.parser(input)
if len(result["lexical"]) + len(result["syntax"]) == 0:
tkinter.messagebox.showerror(
title="Mensaje", message="La consulta no contiene errores"
)
else:
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
def analize(self):
self.refresh()
entrada = ""
entrada = self.txt_entrada.get(
"1.0", END
) # variable de almacenamiento de la entrada
result = interpreter.execution(entrada)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
if len(self.postgreSQL) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------ERRORS----------" + "\n")
while i < len(self.postgreSQL):
self.text_Consola.insert(INSERT, self.postgreSQL[i] + "\n")
i += 1
querys = result["querys"]
self.show_result(querys)
messages = result["messages"]
if len(messages) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------MESSAGES----------" + "\n")
while i < len(messages):
self.text_Consola.insert(INSERT, ">> " + str(messages[i]) + "\n")
i += 1
self.tabControl.pack()
def refresh(self):
tabls = self.tabControl.tabs()
i = 1
while i < len(tabls):
self.tabControl.forget(tabls[i])
i += 1
self.text_Consola.delete("1.0", "end")
self.semanticErrors.clear()
self.syntacticErrors.clear()
self.lexicalErrors.clear()
self.postgreSQL.clear()
self.ts.clear()
def fill_table(
self, columns, rows, table
): # funcion que muestra la salida de la/s consulta/s
table["columns"] = columns
"""
Definicion de columnas y encabezado
"""
table.column("#0", width=25, minwidth=50)
i = 0
ancho = int(600 / len(columns))
if ancho < 100:
ancho = 100
while i < len(columns):
table.column(str(i), width=ancho, minwidth=50, anchor=CENTER)
i += 1
table.heading("#0", text="#", anchor=CENTER)
i = 0
while i < len(columns):
table.heading(str(i), text=str(columns[i]), anchor=CENTER)
i += 1
"""
Insercion de filas
"""
i = 0
for row in rows:
i += 1
table.insert(parent="", index="end", iid=i, text=i, values=(row))
def open_ST(self): # Abre la pantalla de la table de simbolos
windowTableS = Pantalla_TS(self.window, self.ts, self.indexes)
def open_AST(self): # Abre la pantalla del AST
windowTableS = Pantalla_AST(self.window)
def open_Reporte(self): # Abre la pantalla de los reportes de errores
windowTableS = Pantalla_Error(
self.window, self.lexicalErrors, self.syntacticErrors, self.semanticErrors
)
def open_PDF(self):
url = "file:///" + os.path.realpath("test-output/round-table.gv.pdf")
webbrowser.open(url)
class Pantalla2:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
def MetodoParser(self, texto):
#DECLARAR RETORNO
salida = "";
# EJECUTAR PARSER
result = interpreter.execution(texto)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
if len(self.postgreSQL) > 0:
i = 0
salida += "================================================== \n"
salida += " TYTUS ERROR \n"
salida += "================================================== \n"
while i < len(self.postgreSQL):
salida += ">> " + str(self.postgreSQL[i]) + "\n"
i += 1
querys = result["querys"]
messages = result["messages"]
if len(messages) > 0:
i = 0
salida += "==================================================\n"
salida += " TYTUS \n"
salida += "================================================== \n"
while i < len(messages):
salida += ">> " + str(messages[i]) + "\n"
i += 1
return salida
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Configuration file for JupyterHub
import os
# pre-spawn settings
NB_UID = 65534
NB_GID = 65534
CUDA = 'cuda' in os.environ['HOSTNODE']
c = get_config()
# read users/teams & images
import os, yaml
with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile:
cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)
team_map = cfg['users']
# Whitlelist users and admins # google: remove @gmail.com
c.Authenticator.allowed_users = list(team_map.keys())
c.Authenticator.admin_users = admin = set()
for u, team in team_map.items():
if 'admin' in team:
admin.add(u)
# Spawn single-user servers as Docker containers
# CustomDockerSpawner
# form to select image
def get_options_form(spawner):
username = spawner.user.name # .split('@')[0]
teams = cfg['users'][username]
images = cfg['images']
# list of image letters for user
img = {k:v for k,v in images.items() if k in teams }
images = [] # unique list
for t,i in img.items():
for k in i:
if k not in images:
images.append(k)
if not CUDA:
images = [i for i in images if i != 'G']
# dict of image label:build
available_images = cfg['available_images']
allowed_images = [v for k,v in available_images.items() if k in images]
images=[]
for i in allowed_images:
images = images | i.items()
allowed_images = dict(images)
allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0]))
# prepare form
if len(allowed_images) > 1:
option_t = '<option value="{image}" {selected}>{label}</option>'
options = [
option_t.format(
image=image, label=label, selected='selected' if image == spawner.image else ''
)
for label, image in allowed_images.items()
]
return """
<br><br>
<h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '')
else:
spawner.image = [v for k,v in allowed_images.items()][0]
c.DockerSpawner.options_form = get_options_form
def set_sudo(spawner):
username = spawner.user.name
teams = cfg['users'][username]
if 'sudo' in teams:
return 'yes'
else:
return 'no'
def set_USER(spawner):
username = spawner.user.name
if username[0:4].isnumeric():
return username.upper()
else:
return username
def set_HOME(spawner):
return '/home/' + spawner.user.name
def set_UID(spawner):
UID = cfg['users'][spawner.user.name][0]['uid']
if UID >= 1 and UID < 65536:
return UID
else:
return 1000
def set_GID(spawner):
GID = cfg['users'][spawner.user.name][1]['gid']
if GID >= 1 and GID < 65536:
return GID
else:
return 100
c.DockerSpawner.environment = {
'NB_USER': set_USER,
'NB_UID': set_UID,
'NB_GID': set_GID,
'NB_UMASK':'002',
'CHOWN_HOME':'yes',
'GRANT_SUDO': set_sudo,
}
home_dir = os.environ.get('HOME_DIR')
# notebook_dir = '/home/' + spawner.user.name
# c.DockerSpawner.notebook_dir = notebook_dir
from dockerspawner import DockerSpawner
class CustomDockerSpawner(DockerSpawner):
# mount volumes by team
def start(self):
username = set_USER(self)
# username = self.user.name
# home dir
self.volumes[f"{home_dir}/{username.split('@')[0]}"] = {
'bind': '/home/' + username ,
'mode': 'rw',
}
# copy system /etc/group file
self.volumes['/etc/group'] = {
'bind': '/tmp/group',
'mode': 'ro',
}
# mount /srv from files in /singleuser/srv/setup
self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = {
'bind': '/srv',
'mode': 'ro',
}
# user specific mounts as in config.yaml
teams = cfg['users'][self.user.name] # lowercase
mounts = cfg['mounts']
mounts = {k:v for k,v in mounts.items() if k in teams }
for k,v in mounts.items():
for h,d in v.items():
self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] }
return super().start()
# c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = CustomDockerSpawner
# hub runs as 'root',
c.DockerSpawner.extra_create_kwargs = {
'user': 'root',
'hostname': 'hub',
}
# nvidia
# /dev/shm 64M > 16G
if CUDA:
c.DockerSpawner.extra_host_config = {
'runtime': 'nvidia',
'shm_size': '16gb'
}
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
spawn_cmd = "start-singleuser.sh"
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Connect containers to this Docker network
network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name })
# Mount the real user's Docker volume on the host to the notebook user's
# notebook directory in the container
#c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# external proxy
c.JupyterHub.cleanup_servers = False
# tells the hub to not stop servers when the hub restarts (proxy runs separately).
c.ConfigurableHTTPProxy.should_start = False
# tells the hub that the proxy should not be started (because you start it yourself).
c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN')
# token for authenticating communication with the proxy.
c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001'
# the URL which the hub uses to connect to the proxy’s API.
# Remove containers once they are stopped
c.DockerSpawner.remove_containers = True
# User containers will access hub by container name on the Docker network
c.JupyterHub.base_url = '/jhub/'
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# don't need because we are behind an https reverse proxy
# # TLS config: requires generating certificates
# c.JupyterHub.port = 443
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite'
# c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
# host=os.environ['POSTGRES_HOST'],
# password=os.environ['POSTGRES_PASSWORD'],
# db=os.environ['POSTGRES_DB'],
# )
# reset database
# c.JupyterHub.reset_db = False
# Authenticate users
'''
# GitHub
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
# Native
# admin users in c.Authenticator.admin_users are automatically authorized when signup
c.JupyterHub.authenticator_class = 'nativeauthenticator.NativeAuthenticator'
'''
##### multioauth
# https://github.com/jupyterhub/oauthenticator/issues/136
from traitlets import List
from jupyterhub.auth import Authenticator
def url_path_join(*parts):
return '/'.join([p.strip().strip('/') for p in parts])
class MultiOAuthenticator(Authenticator):
authenticators = List(help="The subauthenticators to use", config=True)
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self._authenticators = []
for authenticator_klass, url_scope, configs in self.authenticators:
c = self.trait_values()
c.update(configs)
self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope})
def get_custom_html(self, base_url):
html = []
for authenticator in self._authenticators:
login_service = authenticator["instance"].login_service
if login_service == 'User/Pass':
url = url_path_join(authenticator["url_scope"], "login")
else:
url = url_path_join(authenticator["url_scope"], "oauth_login")
# html.append(
# f"""
# <div class="service-login">
# <a role="button" class='btn btn-jupyter btn-lg' href='{url}'>
# Sign in with {login_service}
# </a>
# </div>
# """
# )
return "\n".join(html)
def get_handlers(self, app):
routes = []
for _authenticator in self._authenticators:
for path, handler in _authenticator["instance"].get_handlers(app):
class SubHandler(handler):
authenticator = _authenticator["instance"]
routes.append((f'{_authenticator["url_scope"]}{path}', SubHandler))
return routes
c.JupyterHub.authenticator_class = MultiOAuthenticator
from oauthenticator.github import GitHubOAuthenticator
from oauthenticator.google import GoogleOAuthenticator
from nativeauthenticator import NativeAuthenticator
#from oauthenticator.azuread import AzureAdOAuthenticator
c.MultiOAuthenticator.authenticators = [
(GitHubOAuthenticator, '/github', {
'client_id': os.environ['GITHUB_CLIENT_ID'],
'client_secret': os.environ['GITHUB_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL']
}),
(GoogleOAuthenticator, '/google', {
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'],
'login_service': 'Google'
}),
(NativeAuthenticator, '/', {
'login_service': 'User/Pass'
}),
]
import nativeauthenticator
c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"]
# template modified to allow github/google oauth
# ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"]
# google
# https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html
c.GoogleOAuthenticator.hosted_domain = ['gmail.com']
c.GoogleOAuthenticator.login_service = 'Google'
c.GoogleOAuthenticator.delete_invalid_users = True
c.NativeAuthenticator.check_common_password = True
c.NativeAuthenticator.minimum_password_length = 8
c.NativeAuthenticator.allowed_failed_logins = 3
c.NativeAuthenticator.enable_signup = True
# recaptcha config
# https://www.google.com/recaptcha/admin/site/500725121/settings
c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY']
c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET']
c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>'
## enable authentication state0
c.MultiOAuthenticator.enable_auth_state = True
import warnings
if 'JUPYTERHUB_CRYPT_KEY' not in os.environ:
warnings.warn(
"Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n"
" export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)"
)
c.CryptKeeper.keys = [ os.urandom(32) ]
pass
'''
# remove idle notebooks after inactive time
# https://github.com/jupyterhub/jupyterhub-idle-culler
import sys
c.JupyterHub.services = [
{
'name': 'idle-culler',
'admin': True,
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'],
}
]
'''
# max simultaneous users
c.JupyterHub.concurrent_spawn_limit = 10
# user limits
# c.Spawner.cpu_limit = 2 # cores
# c.Spawner.mem_limit = 8G
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import os
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
if not demisto.params().get('useProxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
SERVER_URL_V1 = 'https://www.cymon.io:443/api/nexus/v1'
SERVER_DASHBOARD_URL_V1 = 'https://www.cymon.io:443/api/dashboard/v1'
SERVER_URL_V2 = 'https://api.cymon.io/v2/ioc/search'
VERIFY_CERTIFICATES = False if demisto.params().get('unsecure') else True
DEFAULT_HEADERS = {
"Content-Type": "application/json"
}
''' HELPER FUNCTIONS '''
def cymon_says():
return_error('Cymon service discontinued. Please disable or delete the integration instance.')
def http_request(method, url, headers):
try:
res = requests.request(method,
url,
verify=VERIFY_CERTIFICATES,
headers=headers)
if res.status_code == 200:
return res.json()
# 204 HTTP status code is returned when api rate limit has been exceeded
elif res.status_code == 204:
return_error("You've reached your API call quota.")
elif res.status_code == 404:
return {}
res.raise_for_status()
except Exception as e:
raise (e)
''' DOMAIN COMMAND '''
# def get_domain_full_report(domain):
# report_results = []
#
# from_param = 0
# size_param = 10
# total = None
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# while total is None or total > from_param:
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# hits = response.get('hits', [])
# for hit in hits:
# timestamp = datetime.strptime(
# hit.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%S.%fZ')
#
# report_results.append({
# 'Title': hit.get('title', "").title(),
# 'Feed': hit.get('feed'),
# 'Timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
# # Formatting the timestamp to human readable date and time
# 'Tags': hit.get('tags'),
# 'Hostname': hit.get('ioc', {}).get('hostname'),
# 'IP': hit.get('ioc', {}).get('ip'),
# 'Domain': hit.get('ioc', {}).get('domain'),
# 'Reported By': hit.get('reported_by'),
# 'Location': hit.get('location', {}).get('country')
# })
#
# from_param = from_param + size_param
# total = int(response.get('total', 0))
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# return report_results
# def get_domain_report(domain_full_report):
# reports = {} # type:dict
#
# for report in domain_full_report:
# title = report.get('Title')
# timestamp = datetime.strptime(
# report.get('Timestamp', datetime.now().strftime("%Y-%m-%d %H:%M:%S")), '%Y-%m-%d %H:%M:%S')
#
# if (title in reports and reports.get(title).get('Timestamp') < timestamp) or title not in reports: # type: ignore
# reports.update({title: {
# 'Feed': report.get('Feed'),
# 'Timestamp': timestamp,
# 'Tags': report.get('Tags'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP'),
# 'Domain': report.get('Domain'),
# 'Reported By': report.get('Reported By'),
# 'Location': report.get('Location')
# }})
#
# report_results = []
#
# for report in reports:
# report_results.append({
# 'Title': report,
# 'Feed': reports.get(report).get('Feed'), # type: ignore
# 'Timestamp': reports.get(report).get('Timestamp').strftime("%Y-%m-%d %H:%M:%S"), # type: ignore
# # Formatting the timestamp to human readable date and time
# 'Tags': reports.get(report).get('Tags'), # type: ignore
# 'Hostname': reports.get(report).get('Hostname'), # type: ignore
# 'IP': reports.get(report).get('IP'), # type: ignore
# 'Domain': reports.get(report).get('Domain'), # type: ignore
# 'Reported By': reports.get(report).get('Reported By'), # type: ignore
# 'Location': reports.get(report).get('Location') # type: ignore
# })
#
# return {
# 'reports': report_results,
# 'total': len(domain_full_report)
# }
# def create_domain_command_markdown(domain, total_hits, reports, domain_full_report, is_full_response):
# md = '## Cymon Domain report for: {}\n'.format(domain)
#
# md += '\n'
#
# md += '**Total Hits:** {}'.format(total_hits)
#
# md += '\n'
#
# md += tableToMarkdown("The following reports are the latest malicious hits resolved to the given domain:", reports,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By', 'Domain'])
#
# if is_full_response:
# md += tableToMarkdown("Full report list:", domain_full_report,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By',
# 'Domain'])
#
# return md
# def create_context_domain_command(domain, reports):
# cymon_domain_context_activities = []
# description = 'Reported suspicious activities: '
#
# for report in reports:
# cymon_domain_context_activities.append({
# 'Title': report.get('Title'),
# 'Tags': report.get('Tags'),
# 'Time': report.get('Timestamp'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP')
# })
#
# description += '{}, '.format(report.get('Title'))
#
# description = description[:-2]
#
# context = {
# outputPaths['domain']: {
# 'Name': domain,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# },
# 'Cymon': {
# 'Domain': {
# 'Activities': cymon_domain_context_activities
# }
# }
# }
#
# return context
# def get_domain_report_command():
# args = demisto.args()
#
# domain = args.get('domain')
# is_full_response = args.get('fullResponse') == 'true'
#
# domain_full_report = get_domain_full_report(domain)
# domain_summarized_report = get_domain_report(domain_full_report)
#
# if len(domain_full_report) == 0:
# return "Domain " + domain + " is not in Cymons's dataset"
#
# markdown = create_domain_command_markdown(domain, domain_summarized_report.get('total'),
# domain_summarized_report.get('reports'), domain_full_report,
# is_full_response)
# context = create_context_domain_command(domain, domain_summarized_report.get('reports'))
#
# return {
# 'Type': entryTypes['note'],
# 'Contents': domain_full_report,
# 'ContentsFormat': formats['json'],
# 'HumanReadable': markdown,
# 'EntryContext': context
# }
''' IP COMMAND '''
# def get_ip_events_sources(ip):
# url = '{}/{}/{}'.format(SERVER_URL_V1, 'ip', ip)
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# return response.get('sources', None)
# def get_ip_events(ip):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'events', 100)
# events = {} # type:dict
#
# next_link = url
#
# while next_link is not None:
# response = http_request('GET', next_link, DEFAULT_HEADERS)
#
# for event in response.get('results', []):
# tag = event.get('tag')
# date = datetime.strptime(
# event.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# if (tag in events and events[tag] < date) or tag not in events:
# events.update({tag: date})
#
# next_link = response.get('next')
#
# for event in events:
# events[event] = events[event].strftime(
# "%Y-%m-%d %H:%M:%S") # Formatting the timestamp to human readable date and time
#
# return events
# def get_ip_location(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'geolocation', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# lon = response.get('longitude', None)
# lat = response.get('latitude', None)
#
# if not lon or not lat:
# return {}
# else:
# return {
# 'lon': lon,
# 'lat': lat
# }
# def get_ip_domains(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'domains', max_len)
# domains = []
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for domain in response.get('results', []):
# date = datetime.strptime(
# domain.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# domains.append({'Hostname': domain.get('name'),
# 'Last Resolved': date.strftime("%Y-%m-%d %H:%M:%S")})
#
# return domains
# def get_ip_urls(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'urls', max_len)
# urls = {} # type:dict
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for response_url in response.get('results', []):
# url = response_url.get('location')
# if url.endswith("/"):
# url = url[:-1]
#
# date = datetime.strptime(
# response_url.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%SZ')
#
# if (url in urls and urls[url] < date) or url not in urls:
# urls.update({url: date})
#
# urls_result = []
# for url in urls:
# urls_result.append({'Url': url, "Last Resolved": urls[url].strftime(
# "%Y-%m-%d %H:%M:%S")}) # Formatting the timestamp to human readable date and time
#
# return urls_result
# def get_ip_asn(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'ipwhois', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# asn = response.get('asn')
# asn_country_code = response.get('asn_country_code')
#
# if not asn or not asn_country_code:
# return {}
# else:
# return {
# 'asn': asn,
# 'country': asn_country_code
# }
# def create_ip_command_markdown(ip, sources, events, domains, urls, asn):
# md = '## Cymon IP report for: {}\n'.format(ip)
#
# if asn:
# md += 'ASN: **{}** ({})\n'.format(asn.get('asn'), asn.get('country'))
#
# md += '\n'
#
# if events:
# md += '### Reports\n'
# for event in events:
# md += '**{}** (Last reported on: {})\n'.format(event.title(), events[event])
#
# if sources:
# md += '#### Sources\n'
# for source in sources:
# md += '{}\n'.format(source)
#
# if domains and len(domains) > 0:
# md += tableToMarkdown("The following domains were resolved to the given IP address:", domains)
#
# if urls and len(urls) > 0:
# md += tableToMarkdown("The following urls were resolved to the given IP address:", urls)
#
# return md
# def create_ip_command_context(ip, asn, events, domains):
# if events:
# description = 'Reported suspicious activities: '
#
# for event in events:
# description += '{}, '.format(event)
#
# description = description[:-2]
# else:
# description = 'No suspicious activities were reported'
#
# asn_in_context = {} # type:dict
#
# if asn:
# asn_in_context = {
# 'ASN': asn.get('asn'),
# 'Geo': {
# 'Country': asn.get('country')
# }
# }
#
# context = {'Cymon': {
# 'IP': {
# 'Domains': domains
# }
# }, outputPaths['ip']: {
# 'Address': ip,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# }}
#
# context[outputPaths['ip']].update(asn_in_context)
#
# return context
# def get_ip_report_command():
# args = demisto.args()
#
# full_response = args.get('fullResponse') == 'true'
#
# ip = args.get('ip')
# if not is_ip_valid(ip):
# return_error('An inalid IP was specified')
#
# sources = get_ip_events_sources(ip)
#
# if not sources:
# return "IP " + ip + " is not in Cymons's dataset"
#
# if full_response:
# max_len = 1000
# else:
# max_len = 50
#
# events = get_ip_events(ip)
# location = get_ip_location(ip)
# domains = get_ip_domains(ip, max_len)
# urls = get_ip_urls(ip, max_len)
# asn = get_ip_asn(ip)
#
# markdown = create_ip_command_markdown(ip, sources, events, domains, urls, asn)
# context = create_ip_command_context(ip, asn, events, domains)
#
# return [
# {
# 'Type': entryTypes['map'],
# 'Contents': {
# 'lat': float(location.get('lat')),
# 'lng': float(location.get('lon'))
# },
# 'ContentsFormat': formats['json']
# },
# {
# 'Type': entryTypes['note'],
# 'Contents': {
# 'events': events,
# 'sources': sources,
# 'location': location,
# 'domains': domains,
# 'urls': urls,
# 'asn': asn
# },
# 'HumanReadable': markdown,
# 'EntryContext': context,
# 'ContentsFormat': formats['json']
# }]
''' EXECUTION CODE '''
try:
command = demisto.command()
if command == 'test-module':
demisto.results('Cymon has been Deprecated and is no longer in service. Please delete the instance.')
elif command == 'ip':
cymon_says()
elif command == 'domain':
cymon_says()
except Exception as e:
raise
|
import bpy
import mathutils
import compas_blender
from compas.robots.base_artist import BaseRobotModelArtist
__all__ = [
'RobotModelArtist',
]
class RobotModelArtist(BaseRobotModelArtist):
"""Visualizer for robot models inside a Blender environment.
Parameters
----------
model : :class:`compas.robots.RobotModel`
Robot model.
"""
def __init__(self, model, collection=None):
self.collection = collection
super(RobotModelArtist, self).__init__(model)
def transform(self, native_mesh, transformation):
native_mesh.matrix_world = mathutils.Matrix(transformation.matrix) @ native_mesh.matrix_world
def create_geoemetry(self, geometry, name=None, color=None):
# Imported colors take priority over a the parameter color
if 'mesh_color.diffuse' in geometry.attributes:
color = geometry.attributes['mesh_color.diffuse']
# If we have a color, we'll discard alpha because draw_mesh is hard coded for a=1
if color:
r, g, b, _a = color
color = (r, g, b)
else:
color = (1., 1., 1.)
if self.collection and self.collection not in bpy.data.collections.keys():
compas_blender.utilities.create_collection(self.collection)
v, f = geometry.to_vertices_and_faces()
native_mesh = compas_blender.draw_mesh(vertices=v, faces=f, name=name, color=color, centroid=False, collection=self.collection)
native_mesh.hide_set(True)
return native_mesh
def redraw(self, timeout=0.0):
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1, time_limit=timeout)
def draw_visual(self):
visuals = super(RobotModelArtist, self).draw_visual()
for visual in visuals:
visual.hide_set(False)
def draw_collision(self):
collisions = super(RobotModelArtist, self).draw_collision()
for collision in collisions:
collision.hide_set(False)
|
import itertools
import pytest
import numpy as np
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage._shared.utils import convert_to_float
from skimage._shared import testing
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = shepp_logan_phantom()[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def test_iradon_bias_circular_phantom():
"""
test that a uniform circular phantom has a small reconstruction bias
"""
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta)
reconstruction_fbp = iradon(sinogram, theta=theta)
error = reconstruction_fbp - image
tol = 5e-5
roi_err = np.abs(np.mean(error))
assert roi_err < tol
def check_radon_center(shape, circle, dtype, preserve_range):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=dtype)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle,
preserve_range=preserve_range)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
@testing.parametrize("shape", [(16, 16), (17, 17)])
@testing.parametrize("circle", [False, True])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
@testing.parametrize("shape", [(32, 16), (33, 17)])
@testing.parametrize("circle", [False])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center_rectangular(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@testing.parametrize("size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_iradon_center))
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
# cubic interpolation is slow; only run one test for it
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@testing.parametrize("interpolation_type, filter_type",
radon_iradon_inputs)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
@pytest.mark.parametrize("filter_type", filter_types)
def test_iradon_new_signature(filter_type):
image = PHANTOM
sinogram = radon(image, circle=False)
with pytest.warns(FutureWarning):
assert np.array_equal(iradon(sinogram, filter=filter_type),
iradon(sinogram, filter_name=filter_type))
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@testing.parametrize("shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes))
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with testing.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@testing.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
# if adding more shapes to test data, you might want to look at commit d0f2bac3f
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@testing.parametrize("shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle,
interpolations, output_sizes))
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
multichannel=False, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])]).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
@pytest.mark.parametrize("preserve_range", [True, False])
def test_iradon_dtype(preserve_range):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
assert iradon(sinogram, theta=[0],
preserve_range=preserve_range).dtype == 'float64'
assert iradon(sinogram64, theta=[0],
preserve_range=preserve_range).dtype == sinogram64.dtype
assert iradon(sinogram32, theta=[0],
preserve_range=preserve_range).dtype == sinogram32.dtype
def test_radon_dtype():
img = convert_to_float(PHANTOM, False)
img32 = img.astype(np.float32)
assert radon(img).dtype == img.dtype
assert radon(img32).dtype == img32.dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_iradon_sart_dtype(dtype):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
with expected_warnings(['Input data is cast to float']):
assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'
assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype
assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype
def test_iradon_sart_wrong_dtype():
sinogram = np.zeros((16, 1))
with testing.raises(ValueError):
iradon_sart(sinogram, dtype=int)
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Basic plotting methods using matplotlib.
These include methods to plot Bloch vectors, histograms, and quantum spheres.
Author: Andrew Cross, Jay Gambetta
"""
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
import numpy as np
from collections import Counter
from functools import reduce
def plot_histogram(data, number_to_keep=None):
"""Plot a histogram of data.
data is a dictionary of {'000': 5, '010': 113, ...}
number_to_keep is the number of terms to plot and rest is made into a
single bar called other values
"""
if number_to_keep is not None:
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp["rest"] = sum(data.values()) - sum(data_temp.values())
data = data_temp
labels = sorted(data)
values = np.array([data[key] for key in labels], dtype=float)
pvalues = values / sum(values)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects = ax.bar(ind, pvalues, width, color='seagreen')
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=12)
ax.set_xticks(ind)
ax.set_xticklabels(labels, fontsize=12)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%f' % float(height),
ha='center', va='bottom')
plt.show()
# Functions used for plotting on the qsphere.
#
# See:
# lex_index:
# https://msdn.microsoft.com/en-us/library/aa289166%28v=vs.71%29.aspx
# n_choose_k: http://stackoverflow.com/questions/
# 2096573/counting-combinations-and-permutations-efficiently
class Arrow3D(FancyArrowPatch):
"""Standard 3D arrow."""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""Create arrow."""
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Draw the arrow."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def compliment(value):
"""Swap 1 and 0 in a vector."""
return ''.join(COMPLEMENT[x] for x in value)
COMPLEMENT = {'1': '0', '0': '1'}
def n_choose_k(n, k):
"""Return the number of combinations."""
if n == 0:
return 0.0
else:
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
def lex_index(n, k, lst):
"""Return the index of a combination."""
assert len(lst) == k, "list should have length k"
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
m = dualm
return int(m)
def bit_string_index(s):
"""Return the index of a string of 0s and 1s."""
n = len(s)
k = s.count("1")
assert s.count("0") == n - k, "s must be a string of 0 and 1"
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones)
def plot_qsphere(data, number_to_keep, number_of_qubits):
"""Plot the qsphere of data."""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(-1.0, 1.0)
ax.axes.set_ylim3d(-1.0, 1.0)
ax.axes.set_zlim3d(-1.0, 1.0)
ax.set_aspect("equal")
ax.axes.grid(False)
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 25)
v = np.linspace(0, np.pi, 25)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k', alpha=0.05,
linewidth=0)
# wireframe
# Get rid of the panes
# ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the spines
# ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the ticks
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
d = number_of_qubits
total_values = sum(data.values())
for key in data:
weight = key.count("1")
zvalue = -2 * weight / d + 1
number_of_divisions = n_choose_k(d, weight)
weight_order = bit_string_index(key)
if weight_order >= number_of_divisions / 2:
com_key = compliment(key)
weight_order_temp = bit_string_index(com_key)
weight_order = np.floor(
number_of_divisions / 2) + weight_order_temp + 1
print(key + " " + str(weight_order))
angle = (weight_order) * 2 * np.pi / number_of_divisions
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
linewidth = 5 * data.get(key) / total_values
print([xvalue, yvalue, zvalue])
a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue], mutation_scale=20,
lw=linewidth, arrowstyle="->", color="k")
ax.add_artist(a)
for weight in range(d + 1):
theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
z = -2 * weight / d + 1
if weight == 0:
z = z - 0.001
if weight == d:
z = z + 0.001
r = np.sqrt(1 - z**2)
x = r * np.cos(theta)
y = r * np.sin(theta)
ax.plot(x, y, z, 'k')
plt.show()
# Functions used for plotting tomography.
def plot_bloch_vector(bloch, title=""):
"""Plot a Bloch vector.
Plot a sphere, axes, the Bloch vector, and its projections onto each axis.
bloch is a 3-tuple (x, y, z)
title is a string, the plot title
"""
# Set arrow lengths
arlen = 1.3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect("equal")
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, color="b", alpha=0.1)
# Plot arrows (axes, Bloch vector, its projections)
xa = Arrow3D([0, arlen], [0, 0], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
ya = Arrow3D([0, 0], [0, arlen], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
za = Arrow3D([0, 0], [0, 0], [0, arlen], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
a = Arrow3D([0, bloch[0]], [0, bloch[1]], [0, bloch[2]], mutation_scale=20,
lw=2, arrowstyle="simple", color="k")
bax = Arrow3D([0, bloch[0]], [0, 0], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="r")
bay = Arrow3D([0, 0], [0, bloch[1]], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="g")
baz = Arrow3D([0, 0], [0, 0], [0, bloch[2]], mutation_scale=20, lw=2,
arrowstyle="-", color="b")
arrowlist = [xa, ya, za, a, bax, bay, baz]
for arr in arrowlist:
ax.add_artist(arr)
# Rotate the view
ax.view_init(30, 30)
# Annotate the axes, shifts are ad-hoc for this (30, 30) view
xp, yp, _ = proj3d.proj_transform(arlen, 0, 0, ax.get_proj())
plt.annotate("x", xy=(xp, yp), xytext=(-3, -8),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, arlen, 0, ax.get_proj())
plt.annotate("y", xy=(xp, yp), xytext=(6, -5),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, 0, arlen, ax.get_proj())
plt.annotate("z", xy=(xp, yp), xytext=(2, 0),
textcoords='offset points', ha='right', va='bottom')
plt.title(title)
plt.show()
# Functions used by randomized benchmarking.
def plot_rb_data(xdata, ydatas, yavg, fit, survival_prob):
"""Plot randomized benchmarking data.
xdata = list of subsequence lengths
ydatas = list of lists of survival probabilities for each sequence
yavg = mean of the survival probabilities at each sequence length
fit = list of fitting parameters [a, b, alpha]
survival_prob = function that computes survival probability
"""
# Plot the result for each sequence
for ydata in ydatas:
plt.plot(xdata, ydata, 'rx')
# Plot the mean
plt.plot(xdata, yavg, 'bo')
# Plot the fit
plt.plot(xdata, survival_prob(xdata, *fit), 'b-')
plt.show()
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import MoselbitTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (MoselbitTestFramework):
def run_test (self):
MoselbitTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
from torchvision import transforms
from ..base import BaseTransforms
class ResizeCenterCropFlipHVToTensor(BaseTransforms):
def __call__(self, sample):
data_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
return data_transforms(sample)
class ResizeCenterCropToTensor(BaseTransforms):
def __call__(self, sample):
data_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
return data_transforms(sample)
|
import subprocess
import sys
import setup_util
from os.path import expanduser
home = expanduser("~")
##############
# start(args)
##############
def start(args, logfile, errfile):
setup_util.replace_text("treefrog/config/database.ini", "HostName=.*", "HostName=" + args.database_host)
setup_util.replace_text("treefrog/config/application.ini", "MultiProcessingModule=.*", "MultiProcessingModule=hybrid")
# 1. Generate Makefile
# 2. Compile applicaton
# 3. Clean log files
# 4. Start TreeFrog
try:
subprocess.check_call("qmake -r CONFIG+=release", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile)
subprocess.check_call("make clean", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile)
subprocess.check_call("make -j8", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile)
subprocess.check_call("rm -f log/*.log", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile)
subprocess.check_call("treefrog -d " + home + "/FrameworkBenchmarks/treefrog", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
##############
# stop()
##############
def stop(logfile, errfile):
try:
subprocess.call("treefrog -k abort " + home + "/FrameworkBenchmarks/treefrog", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
|
"""Plot intensity profile of theoretical beam patterns."""
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import binned_statistic as bstat
from frbpoppy.survey import Survey
OBSERVATORIES = [('parkes', 'htru'),
('apertif', 'apertif')]
n = int(1e6)
for obs in OBSERVATORIES:
survey = obs[1]
pattern = obs[0]
s = Survey(survey, gain_pattern=pattern)
int_pro, offset = s.intensity_profile(n_gen=n)
# Sort the values
sorted_int = np.argsort(offset)
int_pro = int_pro[sorted_int]
offset = offset[sorted_int]
# Offset in degrees
offset = offset/60.
bins = 1e2
bin_means, bin_edges, bin_numbers = bstat(offset,
int_pro,
statistic='mean',
bins=bins)
bin_mins, _, _ = bstat(offset, int_pro, statistic='min', bins=bins)
bin_maxs, _, _ = bstat(offset, int_pro, statistic='max', bins=bins)
center = (bin_edges[:-1] + bin_edges[1:]) / 2
plt.plot(center, bin_means, label=pattern)
plt.fill_between(center, bin_mins, bin_maxs, alpha=0.2)
plt.xlabel(f'Offset ($\degree$)')
plt.ylabel('Intensity Profile')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig('plots/int_pro_surveys.pdf')
|
from sciwing.modules.embedders import *
from sciwing.modules.bow_encoder import BOW_Encoder
from sciwing.modules.lstm2vecencoder import LSTM2VecEncoder
from sciwing.modules.lstm2seqencoder import Lstm2SeqEncoder
from sciwing.modules.charlstm_encoder import CharLSTMEncoder
|
import itertools
import datetime
import util
import asyncio
import random
import os
import sys
import traceback
import subprocess
import time
import json
import urllib.request
import ast
from google.cloud import storage
# ========================================================================
def collect_cluster_crashes(v1, namespace, cluster_crashes):
print('collecting cluster crashes / restarts')
pods = v1.list_namespaced_pod(namespace, watch=False)
containers = list(itertools.chain(*[ pod.to_dict()['status']['container_statuses'] for pod in pods.items ]))
mina_containers = list(filter(lambda c: c['name'] in [ 'coda', 'seed', 'coordinator', 'archive' ], containers))
def restarted_recently(c):
if c['restart_count'] == 0:
return False
terminated = c['last_state']['terminated']
if terminated is None:
return False
restart_time = terminated['started_at']
retart_age_seconds = (datetime.datetime.now(datetime.timezone.utc) - restart_time).total_seconds()
# restarted less than 30 minutes ago
return retart_age_seconds <= 30*60
recently_restarted_containers = list(filter(lambda c: restarted_recently(c), mina_containers))
fraction_recently_restarted = len(recently_restarted_containers)/len(mina_containers)
print(len(recently_restarted_containers), 'of', len(mina_containers), 'recently restarted')
cluster_crashes.set(fraction_recently_restarted)
# ========================================================================
def pods_with_no_new_logs(v1, namespace, nodes_with_no_new_logs):
print('counting pods with no new logs')
pods = v1.list_namespaced_pod(namespace, watch=False)
ten_minutes = 10 * 60
count = 0
for pod in pods.items:
containers = pod.status.container_statuses
mina_containers = list(filter(lambda c: c.name in [ 'coda', 'seed', 'coordinator' ], containers))
if len(mina_containers) != 0:
name = pod.metadata.name
recent_logs = v1.read_namespaced_pod_log(name=name, namespace=namespace, since_seconds=ten_minutes, container=mina_containers[0].name)
if len(recent_logs) == 0:
count += 1
total_count = len(pods.items)
fraction_no_new_logs = float(count) / float(total_count)
print(count, 'of', total_count, 'pods have no logs in the last 10 minutes')
nodes_with_no_new_logs.set(fraction_no_new_logs)
# ========================================================================
from node_status_metrics import collect_node_status_metrics
# ========================================================================
def check_google_storage_bucket(v1, namespace, recent_google_bucket_blocks):
print('checking google storage bucket')
bucket = 'mina_network_block_data'
now = time.time()
storage_client = storage.Client()
blobs = list(storage_client.list_blobs(bucket, prefix=namespace))
blob_ages = [ now - b.generation/1e6 for b in blobs ]
newest_age = min([ age for age in blob_ages ])
recent_google_bucket_blocks.set(newest_age)
# ========================================================================
def daemon_containers(v1, namespace):
pods = v1.list_namespaced_pod(namespace, watch=False)
for pod in pods.items:
containers = pod.status.container_statuses
for c in containers:
if c.name in [ 'coda', 'mina', 'seed']:
yield (pod.metadata.name, c.name)
def get_chain_id(v1, namespace):
for (pod_name, container_name) in daemon_containers(v1, namespace):
resp = util.exec_on_pod(v1, namespace, pod_name, container_name, 'mina client status --json')
try:
resp = resp.strip()
if resp[0] != '{':
#first line could be 'Using password from environment variable CODA_PRIVKEY_PASS'
resp = resp.split("\n", 1)[1]
resp_dict = ast.literal_eval(resp.strip())
print("Chain ID: {}".format(resp_dict['chain_id']))
return resp_dict['chain_id']
except Exception as e:
print("Exception when extracting chain id: {}\n mina client status response: {}".format(e, resp))
continue
def check_seed_list_up(v1, namespace, seeds_reachable):
print('checking seed list up')
seed_peers_list_url = os.environ.get('SEED_PEERS_URL')
with urllib.request.urlopen(seed_peers_list_url) as f:
contents = f.read().decode('utf-8')
seeds = ' '.join(contents.split('\n'))
#stdbuf -o0 is to disable buffering
chain_id = get_chain_id(v1, namespace)
if chain_id is None:
print('could not get chain id')
else:
command = 'stdbuf -o0 check_libp2p/check_libp2p ' + chain_id + ' ' + seeds
proc = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, text=True)
for line in proc.stderr.readlines():
print("check_libp2p error: {}".format(line))
val = proc.stdout.read()
print("check_libp2p output: {}".format(val))
proc.stdout.close()
proc.wait()
res = json.loads(val)
#checklibp2p returns whether or not the connection to a peerID errored
fraction_up = sum(res.values())/len(res.values())
seeds_reachable.set(fraction_up)
# ========================================================================
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Context, Template, TemplateSyntaxError
from django.utils.translation import override
import pytest
from djmoney.models.fields import MoneyPatched
from djmoney.templatetags.djmoney import MoneyLocalizeNode
from moneyed import Money
def render(template, context):
return Template(template).render(Context(context))
class TestMoneyLocalizeNode:
def test_repr(self):
assert repr(MoneyLocalizeNode(Money(5, 'EUR'))) == '<MoneyLocalizeNode 5 EUR>'
def test_invalid_instance(self):
with pytest.raises(Exception) as exc:
MoneyLocalizeNode(Money(5, 'EUR'), amount=15)
assert str(exc.value) == 'You can define either "money" or the "amount" and "currency".'
@pytest.mark.parametrize('template, context, error_text', (
(
'{% load djmoney %}{% money_localize "2.5" "PLN" as NEW_M and blabla %}{{NEW_M}}',
{},
'Wrong number of input data to the tag.'
),
(
'{% load djmoney %}{% money_localize money %}{{NEW_M}}',
{'money': 'Something else'},
'The variable "money" must be an instance of Money.'
),
(
'{% load djmoney %}{% money_localize amount currency %}',
{'amount': None, 'currency': 'PLN'},
'You must define both variables: amount and currency.'
)
))
def test_invalid_input(template, context, error_text):
with pytest.raises(TemplateSyntaxError) as exc:
render(template, context)
assert str(exc.value) == error_text
def assert_template(string, result, context=None):
context = context or {}
with override('pl'):
assert render(string, context) == result
@pytest.mark.parametrize(
'string, result, context',
(
(
'{% load djmoney %}{% money_localize "2.5" "PLN" as NEW_M %}{{NEW_M}}',
'2,50 zł',
{}
),
(
'{% load djmoney %}{% money_localize "2.5" "PLN" %}',
'2,50 zł',
{}
),
(
'{% load djmoney %}{% money_localize amount currency %}',
'2,60 zł',
{'amount': 2.6, 'currency': 'PLN'}
),
(
'{% load djmoney %}{% money_localize money as NEW_M %}{{NEW_M}}',
'2,30 zł',
{'money': Money(2.3, 'PLN')}
),
(
'{% load djmoney %}{% money_localize money off as NEW_M %}{{NEW_M}}',
'2.30 zł',
{'money': Money(2.3, 'PLN')}
),
(
'{% load djmoney %}{% money_localize money off as NEW_M %}{{NEW_M}}',
'0.00 zł',
{'money': Money(0, 'PLN')}
),
(
# with a tag template "money_localize"
'{% load djmoney %}{% money_localize money %}',
'2,30 zł',
{'money': Money(2.3, 'PLN')}
),
(
# without a tag template "money_localize"
'{{ money }}',
'2,30 zł',
{'money': MoneyPatched(2.3, 'PLN')}
),
(
'{% load djmoney %}{% money_localize money off %}',
'2.30 zł',
{'money': Money(2.3, 'PLN')}
),
(
'{% load djmoney %}{% money_localize money on %}',
'2,30 zł',
{'money': Money(2.3, 'PLN')}
)
)
)
def test_tag(string, result, context):
assert_template(string, result, context)
@pytest.mark.parametrize(
'string, result, context',
(
(
# money_localize has a default setting USE_L10N = True
'{% load djmoney %}{% money_localize money %}',
'2,30 zł',
{'money': Money(2.3, 'PLN')}
),
(
# without a tag template "money_localize"
'{{ money }}',
'2.30 zł',
{'money': Money(2.3, 'PLN')}
),
(
'{% load djmoney %}{% money_localize money on %}',
'2,30 zł',
{'money': Money(2.3, 'PLN')}
),
)
)
def test_l10n_off(settings, string, result, context):
settings.USE_L10N = False
assert_template(string, result, context)
def test_forced_l10n():
mp = MoneyPatched(2.3, 'PLN')
mp.use_l10n = True
assert_template('{{ money }}', '2,30 zł', {'money': mp})
|
from . import distributions
from . import functional
from . import nn
|
def mergesort(items):
if len(items) <= 1:
return items
mid = len(items) // 2
left = items[:mid]
right = items[mid:]
left = mergesort(left)
right = mergesort(right)
return merge(left, right)
def merge(left, right):
merged = []
left_index = 0
right_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(right[right_index])
right_index += 1
else:
merged.append(left[left_index])
left_index += 1
merged += left[left_index:]
merged += right[right_index:]
return merged
test_list_1 = [8, 3, 1, 7, 0, 10, 2]
test_list_2 = [1, 0]
test_list_3 = [97, 98, 99]
print('{} to {}'.format(test_list_1, mergesort(test_list_1)))
print('{} to {}'.format(test_list_2, mergesort(test_list_2)))
print('{} to {}'.format(test_list_3, mergesort(test_list_3)))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bluebird import BlueBird
for username in BlueBird().get_followers('brunn3is'):
print(username)
|
# !/usr/bin/env python
import re
import requests
import collections
import hashlib
import os
import json
import time
def _prefix(cmd):
prefix = os.path.basename(cmd.replace('-', '_'))
for i in ['.pyc', '.py', '-cli', '-tool', '-util']:
prefix = prefix.replace(i, '')
return prefix.upper()
PREFIX = _prefix(__file__)
CACHE_DIR = '~/.' + PREFIX.lower()
TIME = not os.environ.get('TIME_API') is None
DEFAULT_TIMEOUT = 45
LIST = 'list-'
CREATE = 'create-'
UPDATE = 'update-'
DELETE = 'delete-'
ACTION = 'action-'
TRIM = True
JSON = False
GET_METHOD = 'GET'
POST_METHOD = 'POST'
PUT_METHOD = 'PUT'
DELETE_METHOD = 'DELETE'
HEADERS = {'Accept': 'application/json'}
LIST_METHODS = {'__iter__': True, '__len__': True, '__getitem__': True}
def echo(fn):
def wrapped(*args, **kw):
ret = fn(*args, **kw)
print(fn.__name__, repr(ret))
return ret
return wrapped
def timed_url(fn):
def wrapped(*args, **kw):
if TIME:
start = time.time()
ret = fn(*args, **kw)
delta = time.time() - start
print(delta, args[1], fn.__name__)
return ret
else:
return fn(*args, **kw)
return wrapped
class RestObject(object):
def __init__(self):
pass
def __str__(self):
return self.__repr__()
def __repr__(self):
data = {}
for k, v in self.__dict__.items():
if self._is_public(k, v):
data[k] = v
return repr(data)
def __getattr__(self, k):
if self._is_list() and k in LIST_METHODS:
return getattr(self.data, k)
return getattr(self.__dict__, k)
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
if self._is_list():
return iter(self.data)
else:
data = {}
for k, v in self.__dict__.items():
if self._is_public(k, v):
data[k] = v
return iter(data.keys())
def __len__(self):
if self._is_list():
return len(self.data)
else:
data = {}
for k, v in self.__dict__.items():
if self._is_public(k, v):
data[k] = v
return len(data)
@staticmethod
def _is_public(k, v):
return not callable(v)
def _is_list(self):
return 'data' in self.__dict__ and isinstance(self.data, list)
def data_dict(self):
data = {}
for k, v in self.__dict__.items():
if self._is_public(k, v):
data[k] = v
return data
class Schema(object):
def __init__(self, text, obj):
self.text = text
self.types = {}
for t in obj:
if t.type != 'schema':
continue
self.types[t.id] = t
t.creatable = False
try:
if POST_METHOD in t.collectionMethods:
t.creatable = True
except AttributeError:
pass
t.updatable = False
try:
if PUT_METHOD in t.resourceMethods:
t.updatable = True
except AttributeError:
pass
t.deletable = False
try:
if DELETE_METHOD in t.resourceMethods:
t.deletable = True
except AttributeError:
pass
t.listable = False
try:
if GET_METHOD in t.collectionMethods:
t.listable = True
except AttributeError:
pass
if not hasattr(t, 'collectionFilters'):
t.collectionFilters = {}
def __str__(self):
return str(self.text)
def __repr(self):
return repr(self.text)
class ApiError(Exception):
def __init__(self, obj):
self.error = obj
try:
msg = '{} : {}\n\t{}'.format(obj.code, obj.message, obj)
super(ApiError, self).__init__(self, msg)
except Exception:
super(ApiError, self).__init__(self, 'API Error')
class ClientApiError(Exception):
pass
class Client(object):
def __init__(self, access_key=None, secret_key=None, url=None, cache=False,
cache_time=86400, strict=False, headers=None, token=None,
verify=True, **kw):
if verify == 'False':
verify = False
self._headers = HEADERS.copy()
if headers is not None:
for k, v in headers.items():
self._headers[k] = v
if token is not None:
self.token = token
self._headers['Authorization'] = 'Bearer ' + token
self._access_key = access_key
self._secret_key = secret_key
if self._access_key is None:
self._auth = None
else:
self._auth = (self._access_key, self._secret_key)
self._url = url
self._cache = cache
self._cache_time = cache_time
self._strict = strict
self.schema = None
self._session = requests.Session()
self._session.verify = verify
if not self._cache_time:
self._cache_time = 60 * 60 * 24 # 24 Hours
self._load_schemas()
def valid(self):
return self._url is not None and self.schema is not None
def object_hook(self, obj):
if isinstance(obj, list):
return [self.object_hook(x) for x in obj]
if isinstance(obj, dict):
result = RestObject()
for k, v in obj.items():
setattr(result, k, self.object_hook(v))
for link in ['next', 'prev']:
try:
url = getattr(result.pagination, link)
if url is not None:
setattr(result, link, lambda url=url: self._get(url))
except AttributeError:
pass
if hasattr(result, 'type') and isinstance(getattr(result, 'type'),
str):
if hasattr(result, 'links'):
for link_name, link in result.links.items():
def cb_link(_link=link, **kw):
return self._get(_link, data=kw)
if hasattr(result, link_name):
setattr(result, link_name + '_link', cb_link)
else:
setattr(result, link_name, cb_link)
if hasattr(result, 'actions'):
for link_name, link in result.actions.items():
def cb_action(_link_name=link_name, _result=result,
*args, **kw):
return self.action(_result, _link_name,
*args, **kw)
if hasattr(result, link_name):
setattr(result, link_name + '_action', cb_action)
else:
setattr(result, link_name, cb_action)
return result
return obj
def object_pairs_hook(self, pairs):
ret = collections.OrderedDict()
for k, v in pairs:
ret[k] = v
return self.object_hook(ret)
def _get(self, url, data=None):
return self._unmarshall(self._get_raw(url, data=data))
def _error(self, text):
raise ApiError(self._unmarshall(text))
@timed_url
def _get_raw(self, url, data=None):
r = self._get_response(url, data)
return r.text
def _get_response(self, url, data=None):
r = self._session.get(url, auth=self._auth, params=data,
headers=self._headers)
if r.status_code < 200 or r.status_code >= 300:
self._error(r.text)
return r
@timed_url
def _post(self, url, data=None):
r = self._session.post(url, auth=self._auth, data=self._marshall(data),
headers=self._headers)
if r.status_code < 200 or r.status_code >= 300:
self._error(r.text)
print(r.status_code)
return self._unmarshall(r.text)
@timed_url
def _put(self, url, data=None):
r = self._session.put(url, auth=self._auth, data=self._marshall(data),
headers=self._headers)
if r.status_code < 200 or r.status_code >= 300:
self._error(r.text)
return self._unmarshall(r.text)
@timed_url
def _delete(self, url):
r = self._session.delete(url, auth=self._auth, headers=self._headers)
if r.status_code < 200 or r.status_code >= 300:
self._error(r.text)
return self._unmarshall(r.text)
def _unmarshall(self, text):
if text is None or text == '':
return text
obj = json.loads(text, object_hook=self.object_hook,
object_pairs_hook=self.object_pairs_hook)
return obj
def _marshall(self, obj, indent=None, sort_keys=False):
if obj is None:
return None
return json.dumps(self._to_dict(obj), indent=indent, sort_keys=True)
def _load_schemas(self, force=False):
if self.schema and not force:
return
schema_text = self._get_cached_schema()
if force or not schema_text:
response = self._get_response(self._url)
schema_url = response.headers.get('X-API-Schemas')
if schema_url is not None and self._url != schema_url:
schema_text = self._get_raw(schema_url)
else:
schema_text = response.text
self._cache_schema(schema_text)
obj = self._unmarshall(schema_text)
schema = Schema(schema_text, obj)
if len(schema.types) > 0:
self._bind_methods(schema)
self.schema = schema
def reload_schema(self):
self._load_schemas(force=True)
def by_id(self, type, id, **kw):
id = str(id)
url = self.schema.types[type].links.collection
if url.endswith('/'):
url += id
else:
url = '/'.join([url, id])
try:
return self._get(url, self._to_dict(**kw))
except ApiError as e:
if e.error.status == 404:
return None
else:
raise e
def update_by_id(self, type, id, *args, **kw):
url = self.schema.types[type].links.collection
if url.endswith('/'):
url = url + id
else:
url = '/'.join([url, id])
return self._put_and_retry(url, *args, **kw)
def update(self, obj, *args, **kw):
url = obj.links.self
return self._put_and_retry(url, *args, **kw)
def _put_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try:
return self._put(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1:
raise e
if e.error.status == 409:
time.sleep(.1)
else:
raise e
def _post_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try:
return self._post(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1:
raise e
if e.error.status == 409:
time.sleep(.1)
else:
raise e
def _validate_list(self, type, **kw):
if not self._strict:
return
collection_filters = self.schema.types[type].collectionFilters
for k in kw:
if hasattr(collection_filters, k):
return
for filter_name, filter_value in collection_filters.items():
for m in filter_value.modifiers:
if k == '_'.join([filter_name, m]):
return
raise ClientApiError(k + ' is not searchable field')
def list(self, type, **kw):
if type not in self.schema.types:
raise ClientApiError(type + ' is not a valid type')
self._validate_list(type, **kw)
collection_url = self.schema.types[type].links.collection
return self._get(collection_url, data=self._to_dict(**kw))
def reload(self, obj):
return self.by_id(obj.type, obj.id)
def create(self, type, *args, **kw):
collection_url = self.schema.types[type].links.collection
return self._post(collection_url, data=self._to_dict(*args, **kw))
def delete(self, *args):
for i in args:
if isinstance(i, RestObject):
return self._delete(i.links.self)
def action(self, obj, action_name, *args, **kw):
url = getattr(obj.actions, action_name)
return self._post_and_retry(url, *args, **kw)
def _is_list(self, obj):
if isinstance(obj, list):
return True
if isinstance(obj, RestObject) and 'type' in obj.__dict__ and \
obj.type == 'collection':
return True
return False
def _to_value(self, value):
if isinstance(value, dict):
ret = {}
for k, v in value.items():
ret[k] = self._to_value(v)
return ret
if isinstance(value, list):
ret = []
for v in value:
ret.append(self._to_value(v))
return ret
if isinstance(value, RestObject):
ret = {}
for k, v in vars(value).items():
if not k.startswith('_') and \
not isinstance(v, RestObject) and not callable(v):
ret[k] = self._to_value(v)
elif not k.startswith('_') and isinstance(v, RestObject):
ret[k] = self._to_dict(v)
return ret
return value
def _to_dict(self, *args, **kw):
if len(kw) == 0 and len(args) == 1 and self._is_list(args[0]):
ret = []
for i in args[0]:
ret.append(self._to_dict(i))
return ret
ret = {}
for i in args:
value = self._to_value(i)
if isinstance(value, dict):
for k, v in value.items():
ret[k] = v
for k, v in kw.items():
ret[k] = self._to_value(v)
return ret
@staticmethod
def _type_name_variants(name):
ret = [name]
python_name = re.sub(r'([a-z])([A-Z])', r'\1_\2', name)
if python_name != name:
ret.append(python_name.lower())
return ret
def _bind_methods(self, schema):
bindings = [
('list', 'collectionMethods', GET_METHOD, self.list),
('by_id', 'collectionMethods', GET_METHOD, self.by_id),
('update_by_id', 'resourceMethods', PUT_METHOD, self.update_by_id),
('create', 'collectionMethods', POST_METHOD, self.create)
]
for type_name, typ in schema.types.items():
for name_variant in self._type_name_variants(type_name):
for method_name, type_collection, test_method, m in bindings:
# double lambda for lexical binding hack, I'm sure there's
# a better way to do this
def cb_bind(type_name=type_name, method=m):
def _cb(*args, **kw):
return method(type_name, *args, **kw)
return _cb
if test_method in getattr(typ, type_collection, []):
setattr(self, '_'.join([method_name, name_variant]),
cb_bind())
def _get_schema_hash(self):
h = hashlib.new('sha1')
h.update(self._url)
if self._access_key is not None:
h.update(self._access_key)
return h.hexdigest()
def _get_cached_schema_file_name(self):
if not self._cache:
return None
h = self._get_schema_hash()
cachedir = os.path.expanduser(CACHE_DIR)
if not cachedir:
return None
if not os.path.exists(cachedir):
os.mkdir(cachedir)
return os.path.join(cachedir, 'schema-' + h + '.json')
def _cache_schema(self, text):
cached_schema = self._get_cached_schema_file_name()
if not cached_schema:
return None
with open(cached_schema, 'w') as f:
f.write(text)
def _get_cached_schema(self):
if not self._cache:
return None
cached_schema = self._get_cached_schema_file_name()
if not cached_schema:
return None
if os.path.exists(cached_schema):
mod_time = os.path.getmtime(cached_schema)
if time.time() - mod_time < self._cache_time:
with open(cached_schema) as f:
data = f.read()
return data
return None
def wait_success(self, obj, timeout=-1):
obj = self.wait_transitioning(obj, timeout)
if obj.transitioning != 'no':
raise ClientApiError(obj.transitioningMessage)
return obj
def wait_transitioning(self, obj, timeout=-1, sleep=0.01):
timeout = _get_timeout(timeout)
start = time.time()
obj = self.reload(obj)
while obj.transitioning == 'yes':
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
obj = self.reload(obj)
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] to be done after {} seconds'
msg = msg.format(obj.type, obj.id, delta)
raise Exception(msg)
return obj
def _get_timeout(timeout):
if timeout == -1:
return DEFAULT_TIMEOUT
return timeout
if __name__ == '__main__':
print("This cli has been deprecated in favor of " +
"https://github.com/rancher/cli")
|
import json
import os
from mininet.topo import Topo
import subprocess
class Mapper(object):
def __init__(self, topo, physical_network_file, mapper):
self.topo = topo
self.physical_network_file = physical_network_file
self.mapper = mapper
self.topo_file = self.create_topo_file(topo)
self.mapping_json_path = self._run_python3_distriopt(virtual_topo_file=self.topo_file,
physical_topo_file=physical_network_file,
mapper=mapper)
@staticmethod
def check_valid_path(physical_network_file):
pass
def create_topo_file(self,topo):
assert isinstance(topo, Topo), "Invalid Network Format"
filename = os.tempnam()
json_topo={"nodes": {}, "links": {}}
for node in topo.nodes():
attrs = {"cores": topo.nodeInfo(node).get("cores", 1),
"memory": topo.nodeInfo(node).get("memory", 100)}
json_topo["nodes"][node] = attrs
for (u, v, attrs) in topo.iterLinks(withInfo=True):
rate = attrs["bw"]
edge_attrs = {"rate": rate}
json_topo["links"][" ".join((u,v))]= edge_attrs
with open(filename, "w") as f:
json.dump(json_topo, f)
return filename
def create_mapping(self):
with open(self.mapping_json_path, "r") as f:
mapping = json.load(f)
if "Infeasible" in mapping:
print("MAPPING INFEASIBLE")
exit(1)
elif "mapping" in mapping:
mapping = mapping["mapping"]
return mapping
else:
raise ValueError("Returned value by the script not managed {}".format(mapping))
def _run_python3_distriopt(self,virtual_topo_file, physical_topo_file, mapper, python3_script="/root/MaxiNet/MaxiNet/Frontend/distriopt_runner.py"):
python3_command = "python3 {} {} {} {}".format(python3_script,virtual_topo_file,physical_topo_file,mapper) # launch python3 script using bash
process = subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
#return the temporary path for the mapping
return output
|
"""
Time Series API For Digital Portals
Time series data, end-of-day or intraday, tick-by-tick or subsampled. Additional vendor-specific endpoints provide a modified interface for seamless integration with the ChartIQ chart library. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.TimeSeriesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.TimeSeriesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2001_data import InlineResponse2001Data
from fds.sdk.TimeSeriesAPIforDigitalPortals.model.inline_response2001_meta import InlineResponse2001Meta
globals()['InlineResponse2001Data'] = InlineResponse2001Data
globals()['InlineResponse2001Meta'] = InlineResponse2001Meta
class InlineResponse2001(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (InlineResponse2001Data,), # noqa: E501
'meta': (InlineResponse2001Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2001 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2001Data): [optional] # noqa: E501
meta (InlineResponse2001Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2001 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (InlineResponse2001Data): [optional] # noqa: E501
meta (InlineResponse2001Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
import unittest
def soma(param, param1):
return param + param1
class BasicoTests(unittest.TestCase):
def test_soma(self):
resultado = soma(1, 2)
self.assertEqual(3, resultado)
resultado = soma(3, 2)
self.assertEqual(5, resultado)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Test suite specifically targeting ICC profiles
"""
# Standard library imports ...
from datetime import datetime
try:
import importlib.resources as ir
except ImportError: # pragma: no cover
# before 3.7
import importlib_resources as ir
import struct
import tempfile
import unittest
import warnings
# Third party library imports
import numpy as np
# Local imports
import glymur
from glymur import Jp2k
from glymur._iccprofile import _ICCProfile
from glymur.jp2box import (
ColourSpecificationBox, ContiguousCodestreamBox, FileTypeBox,
ImageHeaderBox, JP2HeaderBox, JPEG2000SignatureBox, InvalidJp2kError
)
from glymur.core import SRGB
from . import fixtures, data
class TestColourSpecificationBox(fixtures.TestCommon):
"""Test suite for colr box instantiation."""
def setUp(self):
super(TestColourSpecificationBox, self).setUp()
j2k = Jp2k(self.j2kfile)
codestream = j2k.get_codestream()
height = codestream.segment[1].ysiz
width = codestream.segment[1].xsiz
num_components = len(codestream.segment[1].xrsiz)
self.jp2b = JPEG2000SignatureBox()
self.ftyp = FileTypeBox()
self.jp2h = JP2HeaderBox()
self.jp2c = ContiguousCodestreamBox()
self.ihdr = ImageHeaderBox(height=height, width=width,
num_components=num_components)
self.icc_profile = ir.read_binary(data, 'sgray.icc')
def test_bad_method_printing(self):
"""
SCENARIO: An ICC profile is both too short and has an invalid method
value.
EXPECTED RESULT: Warnings are issued. Printing the string
representation should not error out.
"""
with ir.path(data, 'issue405.dat') as path:
with path.open('rb') as f:
f.seek(8)
with warnings.catch_warnings():
# Lots of things wrong with this file.
warnings.simplefilter('ignore')
box = ColourSpecificationBox.parse(f, length=80, offset=0)
str(box)
def test_colr_with_out_enum_cspace(self):
"""must supply an enumerated colorspace when writing"""
j2k = Jp2k(self.j2kfile)
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
boxes[2].box = [self.ihdr, ColourSpecificationBox(colorspace=None)]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(InvalidJp2kError):
j2k.wrap(tfile.name, boxes=boxes)
def test_missing_colr_box(self):
"""jp2h must have a colr box"""
j2k = Jp2k(self.j2kfile)
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
boxes[2].box = [self.ihdr]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(InvalidJp2kError):
j2k.wrap(tfile.name, boxes=boxes)
def test_bad_approx_jp2_field(self):
"""JP2 has requirements for approx field"""
j2k = Jp2k(self.j2kfile)
boxes = [self.jp2b, self.ftyp, self.jp2h, self.jp2c]
colr = ColourSpecificationBox(colorspace=SRGB, approximation=1)
boxes[2].box = [self.ihdr, colr]
with open(self.temp_jp2_filename, mode='wb') as tfile:
with self.assertRaises(InvalidJp2kError):
j2k.wrap(tfile.name, boxes=boxes)
def test_default_colr(self):
"""basic colr instantiation"""
colr = ColourSpecificationBox(colorspace=SRGB)
self.assertEqual(colr.method, glymur.core.ENUMERATED_COLORSPACE)
self.assertEqual(colr.precedence, 0)
self.assertEqual(colr.approximation, 0)
self.assertEqual(colr.colorspace, SRGB)
self.assertIsNone(colr.icc_profile)
def test_icc_profile(self):
"""basic colr box with ICC profile"""
colr = ColourSpecificationBox(icc_profile=self.icc_profile)
self.assertEqual(colr.method, glymur.core.ENUMERATED_COLORSPACE)
self.assertEqual(colr.precedence, 0)
self.assertEqual(colr.approximation, 0)
icc_profile = _ICCProfile(colr.icc_profile)
self.assertEqual(icc_profile.header['Version'], '2.1.0')
self.assertEqual(icc_profile.header['Color Space'], 'gray')
self.assertIsNone(icc_profile.header['Datetime'])
# Only True for version4
self.assertFalse('Profile Id' in icc_profile.header.keys())
def test_colr_with_bad_color(self):
"""
SCENARIO: A colr box has an invalid colorspace.
EXPECTED RESULT: An InvalidJp2kError is raised when attempting to
write the box.
"""
with self.assertWarns(UserWarning):
# A warning is issued due to the bad colorspace.
colr = ColourSpecificationBox(colorspace=-1, approximation=0)
with tempfile.TemporaryFile() as tfile:
with self.assertRaises(InvalidJp2kError):
colr.write(tfile)
def test_write_colr_with_bad_method(self):
"""
SCENARIO: A colr box has an invalid method value.
EXPECTED RESULT: InvalidJp2kError
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
colr = ColourSpecificationBox(colorspace=SRGB, method=5)
with tempfile.TemporaryFile() as tfile:
with self.assertRaises(InvalidJp2kError):
colr.write(tfile)
class TestSuite(unittest.TestCase):
"""Test suite for ICC Profile code."""
def setUp(self):
self.buffer = ir.read_binary(data, 'sgray.icc')
def test_bad_rendering_intent(self):
"""
The rendering intent is not in the range 0-4.
It should be classified as 'unknown'
"""
intent = struct.pack('>I', 10)
self.buffer = self.buffer[:64] + intent + self.buffer[68:]
icc_profile = _ICCProfile(self.buffer)
self.assertEqual(icc_profile.header['Rendering Intent'], 'unknown')
def test_version4(self):
"""
ICC profile is version 4
"""
leadoff = struct.pack('>IIBB', 416, 0, 4, 0)
self.buffer = leadoff + self.buffer[10:]
icc_profile = _ICCProfile(self.buffer)
self.assertEqual(icc_profile.header['Version'], '4.0.0')
self.assertTrue('Profile Id' in icc_profile.header.keys())
def test_icc_profile(self):
"""
SCENARIO: The ColourDefinitionBox has an ICC profile.
EXPECTED RESULT: Verify the ICC profile metadata.
"""
with ir.path(data, 'text_GBR.jp2') as path:
with self.assertWarns(UserWarning):
# The brand is wrong, this is JPX, not JP2.
j = Jp2k(path)
box = j.box[3].box[1]
self.assertEqual(box.icc_profile_header['Size'], 1328)
self.assertEqual(box.icc_profile_header['Color Space'], 'RGB')
self.assertEqual(box.icc_profile_header['Connection Space'], 'XYZ')
self.assertEqual(box.icc_profile_header['Datetime'],
datetime(2009, 2, 25, 11, 26, 11))
self.assertEqual(box.icc_profile_header['File Signature'], 'acsp')
self.assertEqual(box.icc_profile_header['Platform'], 'APPL')
self.assertEqual(box.icc_profile_header['Flags'],
'not embedded, can be used independently')
self.assertEqual(box.icc_profile_header['Device Manufacturer'], 'appl')
self.assertEqual(box.icc_profile_header['Device Model'], '')
self.assertEqual(box.icc_profile_header['Device Attributes'],
('reflective, glossy, positive media polarity, '
'color media'))
self.assertEqual(box.icc_profile_header['Rendering Intent'],
'perceptual')
np.testing.assert_almost_equal(box.icc_profile_header['Illuminant'],
np.array([0.9642023, 1.0, 0.824905]),
decimal=6)
self.assertEqual(box.icc_profile_header['Creator'], 'appl')
|
import sys
from copy import deepcopy
from random import SystemRandom
from .aggregation_info import AggregationInfo
from .ec import (AffinePoint, JacobianPoint, default_ec, generator_Fq,
hash_to_point_Fq2, hash_to_point_prehashed_Fq2, y_for_x)
from .fields import Fq
from .signature import Signature
from .threshold import Threshold
from .util import hash256, hmac256
RNG = SystemRandom()
class PublicKey:
"""
Public keys are G1 elements, which are elliptic curve points (x, y), where
each x, y is a 381 bit Fq element. The serialized represenentation is just
the x value, and thus 48 bytes. (With the 1st bit determining the valid y).
"""
PUBLIC_KEY_SIZE = 48
def __init__(self, value):
self.value = value
@staticmethod
def from_bytes(buffer):
bit1 = buffer[0] & 0x80
buffer = bytes([buffer[0] & 0x1f]) + buffer[1:]
x = Fq(default_ec.q, int.from_bytes(buffer, "big"))
y_values = y_for_x(Fq(default_ec.q, x))
y_values.sort()
y = y_values[0]
if bit1:
y = y_values[1]
return PublicKey(AffinePoint(x, y, False, default_ec).to_jacobian())
@staticmethod
def from_g1(g1_el):
assert type(g1_el) == JacobianPoint
return PublicKey(g1_el)
def get_fingerprint(self):
ser = self.serialize()
return int.from_bytes(hash256(ser)[:4], "big")
def serialize(self):
return self.value.serialize()
def size(self):
return self.PUBLIC_KEY_SIZE
def __eq__(self, other):
return self.value.serialize() == other.value.serialize()
def __hash__(self):
return int.from_bytes(self.value.serialize(), "big")
def __lt__(self, other):
return self.value.serialize() < other.value.serialize()
def __str__(self):
return "PublicKey(" + self.value.to_affine().__str__() + ")"
def __repr__(self):
return "PublicKey(" + self.value.to_affine().__repr__() + ")"
def __deepcopy__(self, memo):
return PublicKey.from_g1(deepcopy(self.value, memo))
class PrivateKey:
"""
Private keys are just random integers between 1 and the group order.
"""
PRIVATE_KEY_SIZE = 32
def __init__(self, value):
self.value = value
@staticmethod
def from_bytes(buffer):
return PrivateKey(int.from_bytes(buffer, "big"))
@staticmethod
def from_seed(seed):
hashed = hmac256(seed, b"BLS private key seed")
return PrivateKey(int.from_bytes(hashed, "big") % default_ec.n)
@staticmethod
def new_threshold(T, N):
"""
Create a new private key with associated data suitable for
T of N threshold signatures under a Joint-Feldman scheme.
After the dealing phase, one needs cooperation of T players
out of N in order to sign a message with the master key pair.
Return:
- poly[0] - your share of the master secret key
- commitments to your polynomial P
- secret_fragments[j] = P(j), to be sent to player j
(All N secret_fragments[j] can be combined to make a secret share.)
"""
assert 1 <= T <= N
g1 = generator_Fq()
poly = [Fq(default_ec.n, RNG.randint(1, default_ec.n - 1))
for _ in range(T)]
commitments = [g1 * c for c in poly]
secret_fragments = [sum(c * pow(x, i, default_ec.n) for i, c in enumerate(poly))
for x in range(1, N+1)]
return PrivateKey(poly[0]), commitments, secret_fragments
def get_public_key(self):
return PublicKey.from_g1((self.value * generator_Fq())
.to_jacobian())
def sign(self, m):
r = hash_to_point_Fq2(m).to_jacobian()
aggregation_info = AggregationInfo.from_msg(self.get_public_key(), m)
return Signature.from_g2(self.value * r, aggregation_info)
def sign_prehashed(self, h):
r = hash_to_point_prehashed_Fq2(h).to_jacobian()
aggregation_info = AggregationInfo.from_msg_hash(self.get_public_key(),
h)
return Signature.from_g2(self.value * r, aggregation_info)
def sign_threshold(self, m, player, players):
"""
As the given player out of a list of player indices,
return a signature share for the given message.
"""
assert player in players
r = hash_to_point_Fq2(m).to_jacobian()
i = players.index(player)
lambs = Threshold.lagrange_coeffs_at_zero(players)
return Signature.from_g2(self.value * (r * lambs[i]))
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return self.value
def serialize(self):
return self.value.to_bytes(self.PRIVATE_KEY_SIZE, "big")
def size(self):
return self.PRIVATE_KEY_SIZE
def __str__(self):
return "PrivateKey(" + hex(self.value) + ")"
def __repr__(self):
return "PrivateKey(" + hex(self.value) + ")"
class ExtendedPrivateKey:
version = 1
EXTENDED_PRIVATE_KEY_SIZE = 77
def __init__(self, version, depth, parent_fingerprint,
child_number, chain_code, private_key):
self.version = version
self.depth = depth
self.parent_fingerprint = parent_fingerprint
self.child_number = child_number
self.chain_code = chain_code
self.private_key = private_key
@staticmethod
def from_seed(seed):
i_left = hmac256(seed + bytes([0]), b"BLS HD seed")
i_right = hmac256(seed + bytes([1]), b"BLS HD seed")
sk_int = int.from_bytes(i_left, "big") % default_ec.n
sk = PrivateKey.from_bytes(
sk_int.to_bytes(PrivateKey.PRIVATE_KEY_SIZE, "big"))
return ExtendedPrivateKey(ExtendedPrivateKey.version, 0, 0,
0, i_right, sk)
def private_child(self, i):
if (self.depth >= 255):
raise Exception("Cannot go further than 255 levels")
# Hardened keys have i >= 2^31. Non-hardened have i < 2^31
hardened = (i >= (2 ** 31))
if (hardened):
hmac_input = self.private_key.serialize()
else:
hmac_input = self.private_key.get_public_key().serialize()
hmac_input += i.to_bytes(4, "big")
i_left = hmac256(hmac_input + bytes([0]), self.chain_code)
i_right = hmac256(hmac_input + bytes([1]), self.chain_code)
sk_int = ((int.from_bytes(i_left, "big") + self.private_key.value)
% default_ec.n)
sk = PrivateKey.from_bytes(
sk_int.to_bytes(PrivateKey.PRIVATE_KEY_SIZE, "big"))
return ExtendedPrivateKey(ExtendedPrivateKey.version, self.depth + 1,
self.private_key.get_public_key()
.get_fingerprint(), i,
i_right, sk)
def public_child(self, i):
return self.private_child(i).get_extended_public_key()
def get_extended_public_key(self):
serialized = (self.version.to_bytes(4, "big") +
bytes([self.depth]) +
self.parent_fingerprint.to_bytes(4, "big") +
self.child_number.to_bytes(4, "big") +
self.chain_code +
self.private_key.get_public_key().serialize())
return ExtendedPublicKey.from_bytes(serialized)
def get_private_key(self):
return self.private_key
def get_public_key(self):
return self.private_key.get_public_key()
def size(self):
return self.EXTENDED_PRIVATE_KEY_SIZE
def serialize(self):
return (self.version.to_bytes(4, "big") +
bytes([self.depth]) +
self.parent_fingerprint.to_bytes(4, "big") +
self.child_number.to_bytes(4, "big") +
self.chain_code +
self.private_key.serialize())
def __eq__(self, other):
return self.serialize() == other.serialize()
def __hash__(self):
return int.from_bytes(self.serialize())
class ExtendedPublicKey:
EXTENDED_PUBLIC_KEY_SIZE = 93
def __init__(self, version, depth, parent_fingerprint,
child_number, chain_code, public_key):
self.version = version
self.depth = depth
self.parent_fingerprint = parent_fingerprint
self.child_number = child_number
self.chain_code = chain_code
self.public_key = public_key
@staticmethod
def from_bytes(serialized):
version = int.from_bytes(serialized[:4], "big")
depth = int.from_bytes(serialized[4:5], "big")
parent_fingerprint = int.from_bytes(serialized[5:9], "big")
child_number = int.from_bytes(serialized[9:13], "big")
chain_code = serialized[13:45]
public_key = PublicKey.from_bytes(serialized[45:])
return ExtendedPublicKey(version, depth, parent_fingerprint,
child_number, chain_code, public_key)
def public_child(self, i):
if (self.depth >= 255):
raise Exception("Cannot go further than 255 levels")
# Hardened keys have i >= 2^31. Non-hardened have i < 2^31
if i >= (2 ** 31):
raise Exception("Cannot derive hardened children from public key")
hmac_input = self.public_key.serialize() + i.to_bytes(4, "big")
i_left = hmac256(hmac_input + bytes([0]), self.chain_code)
i_right = hmac256(hmac_input + bytes([1]), self.chain_code)
sk_left_int = (int.from_bytes(i_left, "big") % default_ec.n)
sk_left = PrivateKey.from_bytes(
sk_left_int.to_bytes(PrivateKey.PRIVATE_KEY_SIZE, "big"))
new_pk = PublicKey.from_g1(sk_left.get_public_key().value +
self.public_key.value)
return ExtendedPublicKey(self.version, self.depth + 1,
self.public_key.get_fingerprint(), i,
i_right, new_pk)
def get_public_key(self):
return self.public_key
def size(self):
return self.EXTENDED_PUBLIC_KEY_SIZE
def serialize(self):
return (self.version.to_bytes(4, "big") +
bytes([self.depth]) +
self.parent_fingerprint.to_bytes(4, "big") +
self.child_number.to_bytes(4, "big") +
self.chain_code +
self.public_key.serialize())
def __eq__(self, other):
return self.serialize() == other.serialize()
def __hash__(self):
return int.from_bytes(self.serialize())
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
#!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import sys
# TODO make this less brittle
sys.path = [
os.path.join(os.path.dirname(__file__), "../"),
# os.path.join(os.path.dirname(__file__), '../build-bundledmagnum/src/deps/magnum-bindings/src/python/')
] + sys.path
import habitat_sim
# TODO: remove once m.css handles class hierarchies better
habitat_sim.logging.HabitatSimFormatter.formatStack.__doc__ = ""
# Monkey patch the registry to be the _Registry class instead of the singleton for docs
habitat_sim.registry = type(habitat_sim.registry)
# TODO: remove once utils/__init__.py is removed again
habitat_sim.utils.__all__.remove("quat_from_angle_axis")
habitat_sim.utils.__all__.remove("quat_rotate_vector")
PROJECT_TITLE = "Habitat"
PROJECT_SUBTITLE = "Sim Docs"
PROJECT_LOGO = "habitat.svg"
FAVICON = "habitat-blue.png"
MAIN_PROJECT_URL = "/"
INPUT_MODULES = [habitat_sim]
INPUT_DOCS = ["docs.rst", "gfx.rst", "noise_models.rst"]
INPUT_PAGES = [
"pages/index.rst",
"pages/new-actions.rst",
"pages/attributesJSON.rst",
"pages/stereo-agent.rst",
"pages/lighting-setups.rst",
"pages/image-extractor.rst",
"pages/asset-viewer-tutorial.rst",
"pages/managed-rigid-object-tutorial.rst",
"pages/logging.rst",
]
PLUGINS = [
"m.abbr",
"m.code",
"m.components",
"m.dox",
"m.gh",
"m.htmlsanity",
"m.images",
"m.link",
"m.math",
"m.sphinx",
]
CLASS_INDEX_EXPAND_LEVELS = 2
NAME_MAPPING = {
# I have no idea what is going on with this thing -- it reports itself as
# being from the builtins module?
"quaternion": "quaternion.quaternion",
# TODO: remove once the inventory file contains this info
"_magnum": "magnum",
}
PYBIND11_COMPATIBILITY = True
ATTRS_COMPATIBILITY = True
OUTPUT = "../build/docs/habitat-sim/"
LINKS_NAVBAR1 = [
(
"Pages",
"pages",
[
("Add new actions", "new-actions"),
("Attributes JSON", "attributesJSON"),
("Stereo agent", "stereo-agent"),
("Lighting Setups", "lighting-setups"),
("Image extraction", "image-extractor"),
("View Assets in Habitat-sim", "asset-viewer-tutorial"),
("Managed Rigid Object Tutorial", "managed-rigid-object-tutorial"),
],
),
("Classes", "classes", []),
]
# When adding new pages / tutorials to LINKS_NAVBAR, the same option in
# Doxyfile-mcss needs to be updated accordingly to keep the C++ and Python
# navbar in sync.
LINKS_NAVBAR2 = [
("C++ API", "./cpp.html", []),
("Habitat Lab Docs", "../habitat-lab/index.html", []),
]
FINE_PRINT = f"""
| {PROJECT_TITLE} {PROJECT_SUBTITLE}. Copyright © 2021 Facebook AI Research.
| `Terms of Use </terms-of-use>`_ `Data Policy </data-policy>`_ `Cookie Policy </cookie-policy>`_
| Created with `m.css Python doc generator <https://mcss.mosra.cz/documentation/python/>`_."""
THEME_COLOR = "#478cc3"
STYLESHEETS = [
"https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,400i,600,600i%7CSource+Code+Pro:400,400i,600",
"theme.compiled.css",
]
M_SPHINX_INVENTORIES = [
("python.inv", "https://docs.python.org/3/", [], ["m-doc-external"]),
("numpy.inv", "https://docs.scipy.org/doc/numpy/", [], ["m-doc-external"]),
(
"quaternion.inv",
"https://quaternion.readthedocs.io/en/latest/",
[],
["m-doc-external"],
),
(
"magnum-bindings.inv",
"https://doc.magnum.graphics/python/",
[],
["m-doc-external"],
),
]
M_SPHINX_INVENTORY_OUTPUT = "objects.inv"
M_SPHINX_PARSE_DOCSTRINGS = True
M_HTMLSANITY_SMART_QUOTES = True
# Will people hate me if I enable this?
# M_HTMLSANITY_HYPHENATION = True
_hex_colors_src = re.compile(
r"""<span class="s2">"0x(?P<hex>[0-9a-f]{6})"</span>"""
)
_hex_colors_dst = r"""<span class="s2">"0x\g<hex>"</span><span class="m-code-color" style="background-color: #\g<hex>;"></span>"""
M_CODE_FILTERS_POST = {
("Python", "string_hex_colors"): lambda code: _hex_colors_src.sub(
_hex_colors_dst, code
)
}
M_DOX_TAGFILES = [
(
"corrade.tag",
"https://doc.magnum.graphics/corrade/",
["Corrade::"],
["m-doc-external"],
),
(
"magnum.tag",
"https://doc.magnum.graphics/magnum/",
["Magnum::"],
["m-doc-external"],
),
("../build/docs/habitat-cpp.tag", "../habitat-sim/", [], ["m-doc-external"]),
]
|
"""The tests for Monoprice Media player platform."""
from collections import defaultdict
from serial import SerialException
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
DOMAIN as MEDIA_PLAYER_DOMAIN,
SERVICE_SELECT_SOURCE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.monoprice.const import (
CONF_SOURCES,
DOMAIN,
SERVICE_RESTORE,
SERVICE_SNAPSHOT,
)
from homeassistant.const import (
CONF_PORT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.helpers.entity_component import async_update_entity
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_CONFIG = {CONF_PORT: "fake port", CONF_SOURCES: {"1": "one", "3": "three"}}
MOCK_OPTIONS = {CONF_SOURCES: {"2": "two", "4": "four"}}
ZONE_1_ID = "media_player.zone_11"
ZONE_2_ID = "media_player.zone_12"
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockMonoprice:
"""Mock for pymonoprice object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(
lambda: AttrDict(power=True, volume=0, mute=True, source=1)
)
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].source = source_idx
def set_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_mute(self, zone_id, mute):
"""Mute/unmute zone."""
self.zones[zone_id].mute = mute
def set_volume(self, zone_id, volume):
"""Set volume for zone."""
self.zones[zone_id].volume = volume
def restore_zone(self, zone):
"""Restore zone status."""
self.zones[zone.zone] = AttrDict(zone)
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.monoprice.get_monoprice", side_effect=SerialException,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get(ZONE_1_ID) is None
async def _setup_monoprice(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice", new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
async def _setup_monoprice_with_options(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice", new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONFIG, options=MOCK_OPTIONS
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
# setup_component(self.hass, DOMAIN, MOCK_CONFIG)
# self.hass.async_block_till_done()
await hass.async_block_till_done()
async def _call_media_player_service(hass, name, data):
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN, name, service_data=data, blocking=True
)
async def _call_homeassistant_service(hass, name, data):
await hass.services.async_call(
"homeassistant", name, service_data=data, blocking=True
)
async def _call_monoprice_service(hass, name, data):
await hass.services.async_call(DOMAIN, name, service_data=data, blocking=True)
async def test_service_calls_with_entity_id(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": ZONE_1_ID})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring other media player to its previous state
# The zone should not be restored
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_2_ID})
await hass.async_block_till_done()
# Checking that values were not (!) restored
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_with_all_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "all"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_without_relevant_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "light.demo"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def test_restore_without_snapshort(hass):
"""Test restore when snapshot wasn't called."""
await _setup_monoprice(hass, MockMonoprice())
with patch.object(MockMonoprice, "restore_zone") as method_call:
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
assert not method_call.called
async def test_update(hass):
"""Test updating values from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def test_failed_update(hass):
"""Test updating failure from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", side_effect=SerialException):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_empty_update(hass):
"""Test updating with no state from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", return_value=None):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_supported_features(hass):
"""Test supported features property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
assert (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
== state.attributes["supported_features"]
)
async def test_source_list(hass):
"""Test source list property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["one", "three"]
async def test_source_list_with_options(hass):
"""Test source list property."""
await _setup_monoprice_with_options(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["two", "four"]
async def test_select_source(hass):
"""Test source selection methods."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "three"},
)
assert monoprice.zones[11].source == 3
# Trying to set unknown source
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "no name"},
)
assert monoprice.zones[11].source == 3
async def test_unknown_source(hass):
"""Test behavior when device has unknown source."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
monoprice.set_source(11, 5)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
async def test_turn_on_off(hass):
"""Test turning on the zone."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(hass, SERVICE_TURN_OFF, {"entity_id": ZONE_1_ID})
assert not monoprice.zones[11].power
await _call_media_player_service(hass, SERVICE_TURN_ON, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].power
async def test_mute_volume(hass):
"""Test mute functionality."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.5}
)
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": False}
)
assert not monoprice.zones[11].mute
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": True}
)
assert monoprice.zones[11].mute
async def test_volume_up_down(hass):
"""Test increasing volume by one."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
assert monoprice.zones[11].volume == 0
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
# should not go below zero
assert monoprice.zones[11].volume == 0
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].volume == 1
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
assert monoprice.zones[11].volume == 38
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
# should not go above 38
assert monoprice.zones[11].volume == 38
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
assert monoprice.zones[11].volume == 37
|
"""Character count
This program counts how often each character appears in a string.
"""
def main():
message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
"""str: Message to count characters."""
count = {}
"""dict: Characters as keys and counts as values."""
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
if __name__ == '__main__':
main()
|
import sys
import os
import torch
import argparse
import collections
uer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, uer_dir)
from scripts.convert_bert_from_huggingface_to_uer import convert_bert_transformer_encoder_from_huggingface_to_uer
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input_model_path", type=str, default="huggingface_model.bin",
help=".")
parser.add_argument("--output_model_path", type=str, default="pytorch_model.bin",
help=".")
parser.add_argument("--layers_num", type=int, default=12, help=".")
args = parser.parse_args()
path = args.input_model_path
input_model = torch.load(args.input_model_path, map_location='cpu')
output_model = collections.OrderedDict()
output_model["embedding.word_embedding.weight"] = input_model["bert.embeddings.word_embeddings.weight"]
output_model["embedding.position_embedding.weight"] = input_model["bert.embeddings.position_embeddings.weight"]
output_model["embedding.segment_embedding.weight"] = torch.cat((torch.Tensor([[0]*input_model["bert.embeddings.token_type_embeddings.weight"].size()[1]]), input_model["bert.embeddings.token_type_embeddings.weight"]), dim=0)
output_model["embedding.layer_norm.gamma"] = input_model["bert.embeddings.LayerNorm.weight"]
output_model["embedding.layer_norm.beta"] = input_model["bert.embeddings.LayerNorm.bias"]
convert_bert_transformer_encoder_from_huggingface_to_uer(input_model, output_model, args.layers_num)
output_model["output_layer_1.weight"] = input_model["bert.pooler.dense.weight"]
output_model["output_layer_1.bias"] = input_model["bert.pooler.dense.bias"]
output_model["output_layer_2.weight"] = input_model["classifier.weight"]
output_model["output_layer_2.bias"] = input_model["classifier.bias"]
torch.save(output_model, args.output_model_path)
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid.api_client import ApiClient
from lusid.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DataTypesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_data_type(self, **kwargs): # noqa: E501
"""[BETA] CreateDataType: Create data type definition # noqa: E501
Create a new data type definition Data types cannot be created in either the \"default\" or \"system\" scopes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_data_type(async_req=True)
>>> result = thread.get()
:param create_data_type_request: The definition of the new data type
:type create_data_type_request: CreateDataTypeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DataType
"""
kwargs['_return_http_data_only'] = True
return self.create_data_type_with_http_info(**kwargs) # noqa: E501
def create_data_type_with_http_info(self, **kwargs): # noqa: E501
"""[BETA] CreateDataType: Create data type definition # noqa: E501
Create a new data type definition Data types cannot be created in either the \"default\" or \"system\" scopes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_data_type_with_http_info(async_req=True)
>>> result = thread.get()
:param create_data_type_request: The definition of the new data type
:type create_data_type_request: CreateDataTypeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DataType, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'create_data_type_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_data_type" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_data_type_request' in local_var_params:
body_params = local_var_params['create_data_type_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.4425'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
201: "DataType",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_data_type(self, scope, code, **kwargs): # noqa: E501
"""GetDataType: Get data type definition # noqa: E501
Get the definition of a specified data type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_data_type(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param as_at: The asAt datetime at which to retrieve the data type definition. Defaults to return the latest version of the instrument definition if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DataType
"""
kwargs['_return_http_data_only'] = True
return self.get_data_type_with_http_info(scope, code, **kwargs) # noqa: E501
def get_data_type_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""GetDataType: Get data type definition # noqa: E501
Get the definition of a specified data type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_data_type_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param as_at: The asAt datetime at which to retrieve the data type definition. Defaults to return the latest version of the instrument definition if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DataType, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_data_type" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DataType",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes/{scope}/{code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_units_from_data_type(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetUnitsFromDataType: Get units from data type # noqa: E501
Get the definitions of the specified units associated bound to a specific data type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_units_from_data_type(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param units: One or more unit identifiers for which the definition is being requested
:type units: list[str]
:param filter: Optional. Expression to filter the result set. For example, to filter on the Schema, use \"schema eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param as_at: Optional. The as at of the requested data type
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfIUnitDefinitionDto
"""
kwargs['_return_http_data_only'] = True
return self.get_units_from_data_type_with_http_info(scope, code, **kwargs) # noqa: E501
def get_units_from_data_type_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetUnitsFromDataType: Get units from data type # noqa: E501
Get the definitions of the specified units associated bound to a specific data type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_units_from_data_type_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param units: One or more unit identifiers for which the definition is being requested
:type units: list[str]
:param filter: Optional. Expression to filter the result set. For example, to filter on the Schema, use \"schema eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param as_at: Optional. The as at of the requested data type
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfIUnitDefinitionDto, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'units',
'filter',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_units_from_data_type" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_units_from_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_units_from_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_units_from_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_units_from_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_units_from_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_units_from_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 16384): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_units_from_data_type`, length must be less than or equal to `16384`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_units_from_data_type`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `get_units_from_data_type`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'units' in local_var_params and local_var_params['units'] is not None: # noqa: E501
query_params.append(('units', local_var_params['units'])) # noqa: E501
collection_formats['units'] = 'multi' # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfIUnitDefinitionDto",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes/{scope}/{code}/units', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_data_type_summaries(self, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListDataTypeSummaries: List all data type summaries, without the reference data # noqa: E501
List all data type summaries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_data_type_summaries(async_req=True)
>>> result = thread.get()
:param as_at: The asAt datetime at which to list the data type summaries. Defaults to returning the latest version of each summary if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing data type summaries. This value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 100 if not specified.
:type limit: int
:param filter: Optional. Expression to filter the result set. For example, to filter on the Scope, use \"id.scope eq 'myscope'\", to filter on Schema, use \"schema eq 'string'\", to filter on AcceptableValues use \"acceptableValues any (~ eq 'value')\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param sort_by: Sort the results by these fields. Use use the '-' sign to denote descending allocation e.g. -MyFieldName.
:type sort_by: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PagedResourceListOfDataTypeSummary
"""
kwargs['_return_http_data_only'] = True
return self.list_data_type_summaries_with_http_info(**kwargs) # noqa: E501
def list_data_type_summaries_with_http_info(self, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListDataTypeSummaries: List all data type summaries, without the reference data # noqa: E501
List all data type summaries # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_data_type_summaries_with_http_info(async_req=True)
>>> result = thread.get()
:param as_at: The asAt datetime at which to list the data type summaries. Defaults to returning the latest version of each summary if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing data type summaries. This value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 100 if not specified.
:type limit: int
:param filter: Optional. Expression to filter the result set. For example, to filter on the Scope, use \"id.scope eq 'myscope'\", to filter on Schema, use \"schema eq 'string'\", to filter on AcceptableValues use \"acceptableValues any (~ eq 'value')\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param sort_by: Sort the results by these fields. Use use the '-' sign to denote descending allocation e.g. -MyFieldName.
:type sort_by: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PagedResourceListOfDataTypeSummary, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'as_at',
'page',
'start',
'limit',
'filter',
'sort_by'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_data_type_summaries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) > 500): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_data_type_summaries`, length must be less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_data_type_summaries`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and not re.search(r'^[a-zA-Z0-9\+\/]*={0,3}$', local_var_params['page']): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_data_type_summaries`, must conform to the pattern `/^[a-zA-Z0-9\+\/]*={0,3}$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_data_type_summaries`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_data_type_summaries`, must be a value greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 16384): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_type_summaries`, length must be less than or equal to `16384`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_type_summaries`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_type_summaries`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'start' in local_var_params and local_var_params['start'] is not None: # noqa: E501
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'sort_by' in local_var_params and local_var_params['sort_by'] is not None: # noqa: E501
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
collection_formats['sortBy'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "PagedResourceListOfDataTypeSummary",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_data_types(self, scope, **kwargs): # noqa: E501
"""ListDataTypes: List data types # noqa: E501
List all data types in a specified scope # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_data_types(scope, async_req=True)
>>> result = thread.get()
:param scope: The requested scope of the data types (required)
:type scope: str
:param as_at: The as at of the requested data types
:type as_at: datetime
:param include_system: Whether to additionally include those data types in the \"system\" scope
:type include_system: bool
:param sort_by: Optional. Order the results by these fields. Use use the '-' sign to denote descending order e.g. -MyFieldName
:type sort_by: list[str]
:param start: Optional. When paginating, skip this number of results
:type start: int
:param limit: Optional. When paginating, limit the number of returned results to this many.
:type limit: int
:param filter: Optional. Expression to filter the result set. For example, to filter on the Display Name, use \"displayName eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfDataType
"""
kwargs['_return_http_data_only'] = True
return self.list_data_types_with_http_info(scope, **kwargs) # noqa: E501
def list_data_types_with_http_info(self, scope, **kwargs): # noqa: E501
"""ListDataTypes: List data types # noqa: E501
List all data types in a specified scope # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_data_types_with_http_info(scope, async_req=True)
>>> result = thread.get()
:param scope: The requested scope of the data types (required)
:type scope: str
:param as_at: The as at of the requested data types
:type as_at: datetime
:param include_system: Whether to additionally include those data types in the \"system\" scope
:type include_system: bool
:param sort_by: Optional. Order the results by these fields. Use use the '-' sign to denote descending order e.g. -MyFieldName
:type sort_by: list[str]
:param start: Optional. When paginating, skip this number of results
:type start: int
:param limit: Optional. When paginating, limit the number of returned results to this many.
:type limit: int
:param filter: Optional. Expression to filter the result set. For example, to filter on the Display Name, use \"displayName eq 'string'\" Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:type filter: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfDataType, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'as_at',
'include_system',
'sort_by',
'start',
'limit',
'filter'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_data_types" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_data_types`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_data_types`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_data_types`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) > 16384): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_types`, length must be less than or equal to `16384`") # noqa: E501
if self.api_client.client_side_validation and ('filter' in local_var_params and # noqa: E501
len(local_var_params['filter']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_types`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'filter' in local_var_params and not re.search(r'^[\s\S]*$', local_var_params['filter']): # noqa: E501
raise ApiValueError("Invalid value for parameter `filter` when calling `list_data_types`, must conform to the pattern `/^[\s\S]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'include_system' in local_var_params and local_var_params['include_system'] is not None: # noqa: E501
query_params.append(('includeSystem', local_var_params['include_system'])) # noqa: E501
if 'sort_by' in local_var_params and local_var_params['sort_by'] is not None: # noqa: E501
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
collection_formats['sortBy'] = 'multi' # noqa: E501
if 'start' in local_var_params and local_var_params['start'] is not None: # noqa: E501
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfDataType",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes/{scope}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_data_type(self, scope, code, update_data_type_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] UpdateDataType: Update data type definition # noqa: E501
Update the definition of the specified existing data type Not all elements within a data type definition are modifiable due to the potential implications for data already stored against the types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_data_type(scope, code, update_data_type_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param update_data_type_request: The updated definition of the data type (required)
:type update_data_type_request: UpdateDataTypeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DataType
"""
kwargs['_return_http_data_only'] = True
return self.update_data_type_with_http_info(scope, code, update_data_type_request, **kwargs) # noqa: E501
def update_data_type_with_http_info(self, scope, code, update_data_type_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] UpdateDataType: Update data type definition # noqa: E501
Update the definition of the specified existing data type Not all elements within a data type definition are modifiable due to the potential implications for data already stored against the types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_data_type_with_http_info(scope, code, update_data_type_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param update_data_type_request: The updated definition of the data type (required)
:type update_data_type_request: UpdateDataTypeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DataType, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'update_data_type_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_data_type" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'update_data_type_request' is set
if self.api_client.client_side_validation and ('update_data_type_request' not in local_var_params or # noqa: E501
local_var_params['update_data_type_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `update_data_type_request` when calling `update_data_type`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_data_type`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_data_type`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_data_type`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_data_type_request' in local_var_params:
body_params = local_var_params['update_data_type_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.4425'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DataType",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes/{scope}/{code}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_reference_values(self, scope, code, field_value, **kwargs): # noqa: E501
"""[EXPERIMENTAL] UpdateReferenceValues: Update reference data on a data type # noqa: E501
Replaces the whole set of reference values # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reference_values(scope, code, field_value, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param field_value: The updated reference values (required)
:type field_value: list[FieldValue]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DataType
"""
kwargs['_return_http_data_only'] = True
return self.update_reference_values_with_http_info(scope, code, field_value, **kwargs) # noqa: E501
def update_reference_values_with_http_info(self, scope, code, field_value, **kwargs): # noqa: E501
"""[EXPERIMENTAL] UpdateReferenceValues: Update reference data on a data type # noqa: E501
Replaces the whole set of reference values # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_reference_values_with_http_info(scope, code, field_value, async_req=True)
>>> result = thread.get()
:param scope: The scope of the data type (required)
:type scope: str
:param code: The code of the data type (required)
:type code: str
:param field_value: The updated reference values (required)
:type field_value: list[FieldValue]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DataType, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'field_value'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_reference_values" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'field_value' is set
if self.api_client.client_side_validation and ('field_value' not in local_var_params or # noqa: E501
local_var_params['field_value'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `field_value` when calling `update_reference_values`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_reference_values`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_reference_values`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_reference_values`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_reference_values`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_reference_values`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_reference_values`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'field_value' in local_var_params:
body_params = local_var_params['field_value']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.4425'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DataType",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/datatypes/{scope}/{code}/referencedatavalues', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
import inspect
from typing import Iterable, Optional
from tqdm import tqdm
from ..utils.translations import trans
_tqdm_kwargs = {
p.name
for p in inspect.signature(tqdm.__init__).parameters.values()
if p.kind is not inspect.Parameter.VAR_KEYWORD and p.name != "self"
}
class progress(tqdm):
"""This class inherits from tqdm and provides an interface for
progress bars in the napari viewer. Progress bars can be created
directly by wrapping an iterable or by providing a total number
of expected updates.
See tqdm.tqdm API for valid args and kwargs:
https://tqdm.github.io/docs/tqdm/
Also, any keyword arguments to the :class:`ProgressBar` `QWidget`
are also accepted and will be passed to the ``ProgressBar``.
Examples
--------
>>> def long_running(steps=10, delay=0.1):
... for i in progress(range(steps)):
... sleep(delay)
it can also be used as a context manager:
>>> def long_running(steps=10, repeats=4, delay=0.1):
... with progress(range(steps)) as pbr:
... for i in pbr:
... sleep(delay)
or equivalently, using the `progrange` shorthand
... with progrange(steps) as pbr:
... for i in pbr:
... sleep(delay)
For manual updates:
>>> def manual_updates(total):
... pbr = progress(total=total)
... sleep(10)
... pbr.set_description("Step 1 Complete")
... pbr.update(1)
... # must call pbr.close() when using outside for loop
... # or context manager
... pbr.close()
"""
monitor_interval = 0 # set to 0 to disable the thread
def __init__(
self,
iterable: Optional[Iterable] = None,
desc: Optional[str] = None,
total: Optional[int] = None,
nest_under: Optional['progress'] = None,
*args,
**kwargs,
) -> None:
kwargs = kwargs.copy()
pbar_kwargs = {k: kwargs.pop(k) for k in set(kwargs) - _tqdm_kwargs}
self._group_token = None
# get progress bar added to viewer
try:
from .._qt.dialogs.activity_dialog import get_pbar
pbar = get_pbar(self, nest_under=nest_under, **pbar_kwargs)
except ImportError:
pbar = None
if pbar is not None:
kwargs['gui'] = True
self._pbar = pbar
super().__init__(iterable, desc, total, *args, **kwargs)
if not self._pbar:
return
if self.total is not None:
self._pbar.setRange(self.n, self.total)
self._pbar._set_value(self.n)
else:
self._pbar.setRange(0, 0)
self.total = 0
if desc:
self.set_description(desc)
else:
self.set_description(trans._("progress"))
def display(self, msg: str = None, pos: int = None) -> None:
"""Update the display."""
if not self._pbar:
return super().display(msg=msg, pos=pos)
if self.total != 0:
etas = str(self).split('|')[-1]
try:
self._pbar._set_value(self.n)
self._pbar._set_eta(etas)
except AttributeError:
pass
def increment_with_overflow(self):
"""Update if not exceeding total, else set indeterminate range."""
if self.n == self.total:
self.total = 0
if self._pbar:
self._pbar.setRange(0, 0)
else:
self.update(1)
def set_description(self, desc):
"""Update progress bar description"""
super().set_description(desc, refresh=True)
if self._pbar:
self._pbar._set_description(self.desc)
def close(self):
"""Closes and deletes the progress bar widget"""
if self.disable:
return
if self._pbar:
self.close_pbar()
super().close()
def close_pbar(self):
if self.disable or not self._pbar:
return
from napari._qt.widgets.qt_progress_bar import (
ProgressBar,
ProgressBarGroup,
)
parent_widget = self._pbar.parent()
self._pbar.close()
self._pbar.deleteLater()
if isinstance(parent_widget, ProgressBarGroup):
pbar_children = [
child
for child in parent_widget.children()
if isinstance(child, ProgressBar)
]
if not any(child.isVisible() for child in pbar_children):
parent_widget.close()
self._pbar = None
def progrange(*args, **kwargs):
"""Shorthand for `progress(range(*args), **kwargs)`.
Adds tqdm based progress bar to napari viewer, if it
exists, and returns the wrapped range object.
Returns
-------
progress
wrapped range object
"""
return progress(range(*args), **kwargs)
|
from flask import Blueprint, render_template
from macronizer_cores import db
# create error blueprint
errors = Blueprint('errors', __name__)
# SECTION - routes
# NOTE - app_errorhandler() is a method inherited from Blueprint that is equivalent to errorhandler() inherited from flask
@errors.app_errorhandler(404)
def page_not_found(e):
'''Handle 404 error'''
return render_template('errors/404.html'), 404
@errors.app_errorhandler(500)
def internal_server_error(e):
'''Handle 500 error'''
db.session.rollback()
return render_template('errors/500.html'), 500
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 3.2.5 on 2021-08-03 02:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('categories', '0009_auto_20210413_1702'),
]
operations = [
migrations.CreateModel(
name='SyncProgress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('task_id', models.UUIDField(db_index=True, verbose_name='任务id')),
('step', models.CharField(choices=[('users', '用户数据更新'), ('departments', '组织数据更新'), ('users_relationship', '用户间关系数据更新'), ('dept_user_relationship', '用户和组织关系数据更新')], max_length=32, verbose_name='同步步骤')),
('status', models.CharField(choices=[('successful', '成功'), ('failed', '失败'), ('running', '同步中')], default='running', max_length=16, verbose_name='状态')),
('successful_count', models.IntegerField(verbose_name='同步成功数量', default=0)),
('failed_count', models.IntegerField(verbose_name='同步失败数量', default=0)),
],
),
migrations.AlterField(
model_name='profilecategory',
name='type',
field=models.CharField(choices=[('local', '本地目录'), ('mad', 'Microsoft Active Directory'), ('ldap', 'OpenLDAP'), ('tof', 'TOF'), ('custom', '自定义目录'), ('pluggable', '可插拔目录')], max_length=32, verbose_name='类型'),
),
migrations.CreateModel(
name='SyncProgressLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('logs', models.TextField(verbose_name='日志')),
('failed_records', models.JSONField(default=list)),
('progress', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='log', to='categories.syncprogress')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='syncprogress',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='categories.profilecategory', verbose_name='用户目录'),
),
migrations.AlterUniqueTogether(
name='syncprogress',
unique_together={('category', 'step', 'task_id')},
),
]
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Tasks represent atomic operations such as processes.
"""
import os, shutil, re, tempfile, time, pprint
from waflib import Context, Utils, Logs, Errors
from binascii import hexlify
# task states
NOT_RUN = 0
"""The task was not executed yet"""
MISSING = 1
"""The task has been executed but the files have not been created"""
CRASHED = 2
"""The task execution returned a non-zero exit status"""
EXCEPTION = 3
"""An exception occured in the task execution"""
SKIPPED = 8
"""The task did not have to be executed"""
SUCCESS = 9
"""The task was successfully executed"""
ASK_LATER = -1
"""The task is not ready to be executed"""
SKIP_ME = -2
"""The task does not need to be executed"""
RUN_ME = -3
"""The task must be executed"""
COMPILE_TEMPLATE_SHELL = '''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
p = env.get_flat
tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
return tsk.exec_command(cmd, cwd=wd, env=env.env or None)
'''
COMPILE_TEMPLATE_NOSHELL = '''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
tsk.last_cmd = lst = []
%s
lst = [x for x in lst if x]
return tsk.exec_command(lst, cwd=wd, env=env.env or None)
'''
def cache_outputs(cls):
"""
Task class decorator applied to all task classes by default unless they define the attribute 'nocache'::
from waflib import Task
class foo(Task.Task):
nocache = True
If bld.cache_global is defined and if the task instances produces output nodes,
the files will be copied into a folder in the cache directory
The files may also be retrieved from that folder, if it exists
"""
m1 = cls.run
def run(self):
bld = self.generator.bld
if bld.cache_global and not bld.nocache:
if self.can_retrieve_cache():
return 0
return m1(self)
cls.run = run
m2 = cls.post_run
def post_run(self):
bld = self.generator.bld
ret = m2(self)
if bld.cache_global and not bld.nocache:
self.put_files_cache()
return ret
cls.post_run = post_run
return cls
classes = {}
"class tasks created by user scripts or Waf tools are kept in this dict name -> class object"
class store_task_type(type):
"""
Metaclass: store the task classes into :py:const:`waflib.Task.classes`, or to the dict pointed
by the class attribute 'register'.
The attribute 'run_str' will be processed to compute a method 'run' on the task class
The decorator :py:func:`waflib.Task.cache_outputs` is also applied to the class
"""
def __init__(cls, name, bases, dict):
super(store_task_type, cls).__init__(name, bases, dict)
name = cls.__name__
if name.endswith('_task'):
name = name.replace('_task', '')
if name != 'evil' and name != 'TaskBase':
global classes
if getattr(cls, 'run_str', None):
# if a string is provided, convert it to a method
(f, dvars) = compile_fun(cls.run_str, cls.shell)
cls.hcode = cls.run_str
cls.run_str = None
cls.run = f
cls.vars = list(set(cls.vars + dvars))
cls.vars.sort()
elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__:
# getattr(cls, 'hcode') would look in the upper classes
cls.hcode = Utils.h_fun(cls.run)
if not getattr(cls, 'nocache', None):
cls = cache_outputs(cls)
# be creative
getattr(cls, 'register', classes)[name] = cls
evil = store_task_type('evil', (object,), {})
"Base class provided to avoid writing a metaclass, so the code can run in python 2.6 and 3.x unmodified"
class TaskBase(evil):
"""
Base class for all Waf tasks, which should be seen as an interface.
For illustration purposes, instances of this class will execute the attribute
'fun' in :py:meth:`waflib.Task.TaskBase.run`. When in doubt, create
subclasses of :py:class:`waflib.Task.Task` instead.
Subclasses should override these methods:
#. __str__: string to display to the user
#. runnable_status: ask the task if it should be run, skipped, or if we have to ask later
#. run: let threads execute the task
#. post_run: let threads update the data regarding the task (cache)
"""
color = 'GREEN'
"""Color for the console display, see :py:const:`waflib.Logs.colors_lst`"""
ext_in = []
"""File extensions that objects of this task class might use"""
ext_out = []
"""File extensions that objects of this task class might create"""
before = []
"""List of task class names to execute before instances of this class"""
after = []
"""List of task class names to execute after instances of this class"""
hcode = ''
"""String representing an additional hash for the class representation"""
def __init__(self, *k, **kw):
"""
The base task class requires a task generator, which will be itself if missing
"""
self.hasrun = NOT_RUN
try:
self.generator = kw['generator']
except KeyError:
self.generator = self
def __repr__(self):
"for debugging purposes"
return '\n\t{task %r: %s %s}' % (self.__class__.__name__, id(self), str(getattr(self, 'fun', '')))
def __str__(self):
"string to display to the user"
if hasattr(self, 'fun'):
return 'executing: %s\n' % self.fun.__name__
return self.__class__.__name__ + '\n'
def __hash__(self):
"Very fast hashing scheme but not persistent (replace/implement in subclasses and see :py:meth:`waflib.Task.Task.uid`)"
return id(self)
def exec_command(self, cmd, **kw):
"""
Wrapper for :py:meth:`waflib.Context.Context.exec_command` which sets a current working directory to ``build.variant_dir``
:return: the return code
:rtype: int
"""
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
return bld.exec_command(cmd, **kw)
def runnable_status(self):
"""
State of the task
:return: a task state in :py:const:`waflib.Task.RUN_ME`, :py:const:`waflib.Task.SKIP_ME` or :py:const:`waflib.Task.ASK_LATER`.
:rtype: int
"""
return RUN_ME
def process(self):
"""
Assume that the task has had a new attribute ``master`` which is an instance of :py:class:`waflib.Runner.Parallel`.
Execute the task and then put it back in the queue :py:attr:`waflib.Runner.Parallel.out` (may be replaced by subclassing).
"""
m = self.master
if m.stop:
m.out.put(self)
return
# remove the task signature immediately before it is executed
# in case of failure the task will be executed again
try:
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
try:
self.generator.bld.returned_tasks.append(self)
self.log_display(self.generator.bld)
ret = self.run()
except Exception:
self.err_msg = Utils.ex_stack()
self.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(self)
m.out.put(self)
return
if ret:
self.err_code = ret
self.hasrun = CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg = Utils.ex_stack()
self.hasrun = EXCEPTION
else:
self.hasrun = SUCCESS
if self.hasrun != SUCCESS:
m.error_handler(self)
m.out.put(self)
def run(self):
"""
Called by threads to execute the tasks. The default is empty and meant to be overridden in subclasses.
It is a bad idea to create nodes in this method (so, no node.ant_glob)
:rtype: int
"""
if hasattr(self, 'fun'):
return self.fun(self)
return 0
def post_run(self):
"Update the cache files (executed by threads). Override in subclasses."
pass
def log_display(self, bld):
"Write the execution status on the context logger"
bld.to_log(self.display())
def display(self):
"""
Return an execution status for the console, the progress bar, or the IDE output.
:rtype: string
"""
col1 = Logs.colors(self.color)
col2 = Logs.colors.NORMAL
master = self.master
def cur():
# the current task position, computed as late as possible
tmp = -1
if hasattr(master, 'ready'):
tmp -= master.ready.qsize()
return master.processed + tmp
if self.generator.bld.progress_bar == 1:
return self.generator.bld.progress_line(cur(), master.total, col1, col2)
if self.generator.bld.progress_bar == 2:
ela = str(self.generator.bld.timer)
try:
ins = ','.join([n.name for n in self.inputs])
except AttributeError:
ins = ''
try:
outs = ','.join([n.name for n in self.outputs])
except AttributeError:
outs = ''
return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (master.total, cur(), ins, outs, ela)
s = str(self)
if not s:
return None
total = master.total
n = len(str(total))
fs = '[%%%dd/%%%dd] %%s%%s%%s' % (n, n)
return fs % (cur(), total, col1, s, col2)
def attr(self, att, default=None):
"""
Retrieve an attribute from the instance or from the class.
:param att: variable name
:type att: string
:param default: default value
"""
ret = getattr(self, att, self)
if ret is self: return getattr(self.__class__, att, default)
return ret
def hash_constraints(self):
"""
Identify a task type for all the constraints relevant for the scheduler: precedence, file production
:return: a hash value
:rtype: string
"""
cls = self.__class__
tup = (str(cls.before), str(cls.after), str(cls.ext_in), str(cls.ext_out), cls.__name__, cls.hcode)
h = hash(tup)
return h
def format_error(self):
"""
Error message to display to the user when a build fails
:rtype: string
"""
msg = getattr(self, 'last_cmd', '')
# Format msg to be better read-able
output = ''
for i in msg:
if not isinstance(i, str):
output += str(i) + ' '
else:
output += i + ' '
msg = output[:len(output)-1]
name = self.__class__.__name__.replace('_task', '') + ' (' + self.env['PLATFORM'] + '|' + self.env['CONFIGURATION'] + ')'
if getattr(self, "err_msg", None):
return self.err_msg
elif not self.hasrun:
return 'task in %r was not executed for some reason: %r' % (name, self)
elif self.hasrun == CRASHED:
try:
return ' -> task in %r failed (exit status %r): %r\n%r' % (name, self.err_code, self, msg)
except AttributeError:
return ' -> task in %r failed: %r\n%r' % (name, self, msg)
elif self.hasrun == MISSING:
return ' -> missing files in %r: %r\n%r' % (name, self, msg)
else:
return 'invalid status for task in %r: %r' % (name, self.hasrun)
def colon(self, var1, var2):
"""
private function for the moment
used for scriptlet expressions such as ${FOO_ST:FOO}, for example, if
env.FOO_ST = ['-a', '-b']
env.FOO = ['1', '2']
then the result will be ['-a', '-b', '1', '-a', '-b', '2']
"""
tmp = self.env[var1]
if isinstance(var2, str):
it = self.env[var2]
else:
it = var2
if isinstance(tmp, str):
return [tmp % x for x in it]
else:
if Logs.verbose and not tmp and it:
Logs.warn('Missing env variable %r for task %r (generator %r)' % (var1, self, self.generator))
lst = []
for y in it:
lst.extend(tmp)
lst.append(y)
return lst
class Task(TaskBase):
"""
This class deals with the filesystem (:py:class:`waflib.Node.Node`). The method :py:class:`waflib.Task.Task.runnable_status`
uses a hash value (from :py:class:`waflib.Task.Task.signature`) which is persistent from build to build. When the value changes,
the task has to be executed. The method :py:class:`waflib.Task.Task.post_run` will assign the task signature to the output
nodes (if present).
"""
vars = []
"""Variables to depend on (class attribute used for :py:meth:`waflib.Task.Task.sig_vars`)"""
shell = False
"""Execute the command with the shell (class attribute)"""
def __init__(self, *k, **kw):
TaskBase.__init__(self, *k, **kw)
self.env = kw['env']
"""ConfigSet object (make sure to provide one)"""
self.inputs = []
"""List of input nodes, which represent the files used by the task instance"""
self.outputs = []
"""List of output nodes, which represent the files created by the task instance"""
self.dep_nodes = []
"""List of additional nodes to depend on"""
self.run_after = set([])
"""Set of tasks that must be executed before this one"""
self.sig_debug_output = ''
"""String output detailing a signature mismatch"""
self.sig_implicit_debug_log = ''
"""String output to aggregate implicit deps for logging purposes"""
# Additionally, you may define the following
#self.dep_vars = 'PREFIX DATADIR'
def __str__(self):
"string to display to the user"
env = self.env
src_str = ' '.join([a.nice_path() for a in self.inputs])
tgt_str = ' '.join([a.nice_path() for a in self.outputs])
if self.outputs and self.inputs: sep = ' -> '
else: sep = ''
name = self.__class__.__name__.replace('_task', '') + ' (' + env['PLATFORM'] + '|' + env['CONFIGURATION'] + ')'
return '%s: %s%s%s\n' % (name, src_str, sep, tgt_str)
def __repr__(self):
"for debugging purposes"
try:
ins = ",".join([x.name for x in self.inputs])
outs = ",".join([x.name for x in self.outputs])
except AttributeError:
ins = ",".join([str(x) for x in self.inputs])
outs = ",".join([str(x) for x in self.outputs])
return "".join(['\n\t{task %r: ' % id(self), self.__class__.__name__, " ", ins, " -> ", outs, '}'])
def uid(self):
"""
Return an identifier used to determine if tasks are up-to-date. Since the
identifier will be stored between executions, it must be:
- unique: no two tasks return the same value (for a given build context)
- the same for a given task instance
By default, the node paths, the class name, and the function are used
as inputs to compute a hash.
The pointer to the object (python built-in 'id') will change between build executions,
and must be avoided in such hashes.
:return: hash value
:rtype: string
"""
try:
return self.uid_
except AttributeError:
# this is not a real hot zone, but we want to avoid surprises here
m = Utils.md5()
up = m.update
up(self.__class__.__name__.encode())
deplist = [k.abspath().encode() for k in self.inputs + self.outputs]
dep_bld_sigs_str = "".join(deplist)
up(dep_bld_sigs_str)
self.uid_ = m.digest()
return self.uid_
def set_inputs(self, inp):
"""
Append the nodes to the *inputs*
:param inp: input nodes
:type inp: node or list of nodes
"""
if isinstance(inp, list): self.inputs += inp
else: self.inputs.append(inp)
def set_outputs(self, out):
"""
Append the nodes to the *outputs*
:param out: output nodes
:type out: node or list of nodes
"""
if isinstance(out, list): self.outputs += out
else: self.outputs.append(out)
def set_run_after(self, task):
"""
Run this task only after *task*. Affect :py:meth:`waflib.Task.runnable_status`
You probably want to use tsk.run_after.add(task) directly
:param task: task
:type task: :py:class:`waflib.Task.Task`
"""
assert isinstance(task, TaskBase)
self.run_after.add(task)
def signature(self):
"""
Task signatures are stored between build executions, they are use to track the changes
made to the input nodes (not to the outputs!). The signature hashes data from various sources:
* explicit dependencies: files listed in the inputs (list of node objects) :py:meth:`waflib.Task.Task.sig_explicit_deps`
* implicit dependencies: list of nodes returned by scanner methods (when present) :py:meth:`waflib.Task.Task.sig_implicit_deps`
* hashed data: variables/values read from task.__class__.vars/task.env :py:meth:`waflib.Task.Task.sig_vars`
If the signature is expected to give a different result, clear the cache kept in ``self.cache_sig``::
from waflib import Task
class cls(Task.Task):
def signature(self):
sig = super(Task.Task, self).signature()
delattr(self, 'cache_sig')
return super(Task.Task, self).signature()
"""
try: return self.cache_sig
except AttributeError: pass
self.m = Utils.md5()
self.m.update(self.hcode.encode())
# explicit deps
exp_deps = self.sig_explicit_deps()
self.m.update(exp_deps)
# env vars
self.sig_vars()
# implicit deps / scanner results
if self.scan:
imp_deps = self.sig_implicit_deps()
self.m.update(imp_deps)
ret = self.cache_sig = self.m.digest()
return ret
def runnable_status(self):
"""
Override :py:meth:`waflib.Task.TaskBase.runnable_status` to determine if the task is ready
to be run (:py:attr:`waflib.Task.Task.run_after`)
"""
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
bld = self.generator.bld
self.sig_debug_output = ""
# first compute the signature
try:
new_sig = self.signature()
except Errors.TaskNotReady:
return ASK_LATER
# compare the signature to a signature computed previously
key = self.uid()
try:
prev_sig = bld.task_sigs[key]
except KeyError:
Logs.debug("task: task %r must run as it was never run before or the task code changed" % self)
return RUN_ME
if new_sig != prev_sig:
if Logs.sig_delta:
sig_debug_path = os.path.join('TaskSigDeltaOutput','{}_{}_{}.log'.format(self.__class__.__name__, self.inputs[0].name.replace(".", ""), time.time()))
sig_debug_node = self.generator.bld.bldnode.find_or_declare(sig_debug_path)
sig_debug_node.write(self.sig_debug_output)
return RUN_ME
# compare the signatures of the outputs
for node in self.outputs:
try:
if node.sig != new_sig:
return RUN_ME
except AttributeError:
Logs.debug("task: task %r must run as the output nodes do not exist" % self)
return RUN_ME
return SKIP_ME
def post_run(self):
"""
Called after successful execution to update the cache data :py:class:`waflib.Node.Node` sigs
and :py:attr:`waflib.Build.BuildContext.task_sigs`.
The node signature is obtained from the task signature, but the output nodes may also get the signature
of their contents. See the class decorator :py:func:`waflib.Task.update_outputs` if you need this behaviour.
"""
bld = self.generator.bld
sig = self.signature()
for node in self.outputs:
# check if the node exists ..
try:
os.stat(node.abspath())
except OSError:
self.hasrun = MISSING
self.err_msg = '-> missing file: %r' % node.abspath()
raise Errors.WafError(self.err_msg)
# important, store the signature for the next run
node.sig = node.cache_sig = sig
bld.task_sigs[self.uid()] = self.cache_sig
def sig_explicit_deps(self):
"""
Used by :py:meth:`waflib.Task.Task.signature`, hash :py:attr:`waflib.Task.Task.inputs`
and :py:attr:`waflib.Task.Task.dep_nodes` signatures.
:rtype: hash value
"""
bld = self.generator.bld
bld_sigs = []
exp_output = ''
# the inputs
for x in self.inputs + self.dep_nodes:
try:
bld_sig = x.get_bld_sig()
if Logs.sig_delta:
exp_output += '{} {} {}\n'.format(x.name, x.abspath(), hexlify(bld_sig))
bld_sigs.append(bld_sig)
except (AttributeError, TypeError, IOError):
Logs.warn('Missing signature for node %r (required by %r)' % (x, self))
continue # skip adding the signature to the calculation, but continue adding other dependencies
# manual dependencies, they can slow down the builds
if bld.deps_man:
additional_deps = bld.deps_man
for x in self.inputs + self.outputs:
try:
d = additional_deps[id(x)]
except KeyError:
continue
for v in d:
v_name = v.name
if isinstance(v, bld.root.__class__):
try:
v = v.get_bld_sig()
except AttributeError:
raise Errors.WafError('Missing node signature for %r (required by %r)' % (v, self))
elif hasattr(v, '__call__'):
v = v() # dependency is a function, call it
if Logs.sig_delta:
exp_output += '{} {}\n'.format(v_name, hexlify(v))
bld_sigs.append(v)
dep_bld_sigs_str = "".join(bld_sigs)
m = Utils.md5()
m.update(dep_bld_sigs_str)
explicit_sig = m.digest()
if Logs.sig_delta:
key = self.uid()
prev_sig = bld.task_sigs.get((key, 'exp'), [])
if prev_sig and prev_sig != explicit_sig:
self.capture_signature_log('\nExplicit(Old):\n')
self.capture_signature_log(bld.last_build['exp_deps'].get(key,''))
self.capture_signature_log('\nExplicit(New):\n')
self.capture_signature_log(exp_output)
bld.last_build['exp_deps'][key] = exp_output
bld.task_sigs[(key, 'exp')] = explicit_sig
return explicit_sig
def sig_vars(self):
"""
Used by :py:meth:`waflib.Task.Task.signature`, hash :py:attr:`waflib.Task.Task.env` variables/values
:rtype: hash value
"""
bld = self.generator.bld
env = self.env
upd = self.m.update
key = self.uid()
# dependencies on the environment vars
act_sig = bld.hash_env_vars(env, self.__class__.vars)
upd(act_sig)
if Logs.sig_delta:
prev_act_sig = bld.task_sigs.get((key, 'env'), [])
prev_dep_sig = bld.task_sigs.get((key, 'dep_vars'), [])
bld.task_sigs[(key, 'env')] = act_sig
# additional variable dependencies, if provided
dep_vars = getattr(self, 'dep_vars', None)
dep_sig = ''
if dep_vars:
dep_sig = bld.hash_env_vars(env, dep_vars)
upd(dep_sig)
if Logs.sig_delta:
bld.task_sigs[(key, 'dep_vars')] = dep_sig
if prev_dep_sig and prev_dep_sig != dep_sig:
self.capture_signature_log('\nDep Vars:\n'+pprint.pformat(dep_vars))
if Logs.sig_delta:
if (prev_act_sig and prev_act_sig != act_sig) or (prev_dep_sig and prev_dep_sig != dep_sig):
self.capture_signature_log('\nEnv(Current):\n'+pprint.pformat(bld.debug_get_env_vars(env, self.__class__.vars)))
prev_env = bld.last_build['env'].get(key, {})
self.capture_signature_log('\nEnv(Last):\n'+pprint.pformat(prev_env))
bld.last_build['env'][key] = bld.debug_get_env_vars(env, self.__class__.vars)
return self.m.digest()
scan = None
"""
This method, when provided, returns a tuple containing:
* a list of nodes corresponding to real files
* a list of names for files not found in path_lst
For example::
from waflib.Task import Task
class mytask(Task):
def scan(self, node):
return ((), ())
The first and second lists are stored in :py:attr:`waflib.Build.BuildContext.node_deps` and
:py:attr:`waflib.Build.BuildContext.raw_deps` respectively.
"""
def sig_implicit_deps(self):
"""
Used by :py:meth:`waflib.Task.Task.signature` hashes node signatures obtained by scanning for dependencies (:py:meth:`waflib.Task.Task.scan`).
:rtype: hash value
"""
bld = self.generator.bld
# get the task signatures from previous runs
key = self.uid()
prev = bld.task_sigs.get((key, 'imp'), [])
# for issue #379
if prev:
try:
# if a dep is deleted, it will be missing. Don't warn, the signature will be different
sid = self.compute_sig_implicit_deps(False)
if prev == sid:
return prev
except Exception:
# when a file was renamed (IOError usually), remove the stale nodes (headers in folders without source files)
# this will break the order calculation for headers created during the build in the source directory (should be uncommon)
# the behaviour will differ when top != out
deps = bld.node_deps.get(self.uid(), [])
for x in deps:
if x.is_child_of(bld.srcnode):
try:
os.stat(x.abspath())
except OSError:
try:
del x.parent.children[x.name]
except KeyError:
pass
# the previous signature and the current signature don't match, delete the implicit deps, and cause a rescan below
del bld.task_sigs[(key, 'imp')]
# no previous run or the signature of the dependencies has changed, rescan the dependencies
(nodes, names) = self.scan()
if Logs.verbose:
Logs.debug('deps: scanner for %s returned %s %s' % (str(self), str(nodes), str(names)))
# store the dependencies in the cache
bld.node_deps[key] = nodes
bld.raw_deps[key] = names
# recompute the signature and return it
old_sig_debug_log = self.sig_implicit_debug_log
bld.task_sigs[(key, 'imp')] = sig = self.compute_sig_implicit_deps(False)
# Make the equality check since it's possible we didn't have a prior imp key but had prior nodes
# and said nodes didn't change
if Logs.sig_delta and old_sig_debug_log != self.sig_implicit_debug_log:
self.capture_signature_log('\nImplicit(Old):\n')
self.capture_signature_log(old_sig_debug_log)
self.capture_signature_log('\nImplicit(New):\n')
self.capture_signature_log(self.sig_implicit_debug_log)
return sig
def compute_sig_implicit_deps(self, warn_on_missing=True):
"""
Used by :py:meth:`waflib.Task.Task.sig_implicit_deps` for computing the actual hash of the
:py:class:`waflib.Node.Node` returned by the scanner.
:return: hash value
:rtype: string
"""
bld = self.generator.bld
self.are_implicit_nodes_ready()
self.sig_implicit_debug_log = ''
# scanner returns a node that does not have a signature
# just *ignore* the error and let them figure out from the compiler output
# waf -k behaviour
deps = bld.node_deps.get(self.uid(), [])
bld_sigs = []
for k in deps:
try:
bld_sig = k.get_bld_sig()
if Logs.sig_delta:
self.sig_implicit_debug_log += ('{} {}\n'.format(k.name, hexlify(bld_sig)))
except:
if warn_on_missing:
Logs.warn('Missing signature for node %r (dependency will not be tracked)' % k)
continue # skip adding the signature to the calculation, but continue adding other dependencies
bld_sigs.append(bld_sig)
dep_bld_sigs_str = "".join(bld_sigs)
m = Utils.md5()
m.update(dep_bld_sigs_str)
return m.digest()
def are_implicit_nodes_ready(self):
"""
For each node returned by the scanner, see if there is a task behind it, and force the build order
The performance impact on null builds is nearly invisible (1.66s->1.86s), but this is due to
agressive caching (1.86s->28s)
"""
bld = self.generator.bld
try:
cache = bld.dct_implicit_nodes
except AttributeError:
bld.dct_implicit_nodes = cache = {}
try:
dct = cache[bld.cur]
except KeyError:
dct = cache[bld.cur] = {}
for tsk in bld.cur_tasks:
for x in tsk.outputs:
dct[x] = tsk
# find any dependency that is not part of the run_after set already
deps = bld.node_deps.get(self.uid(), [])
deps_missing_from_runafter = [dct[i] for i in deps if i in dct and dct[i] not in self.run_after]
# verify that all tasks have not already run
for tsk in deps_missing_from_runafter:
if not tsk.hasrun:
#print "task is not ready..."
raise Errors.TaskNotReady('not ready')
# update the run_after tasks
self.run_after.update(deps_missing_from_runafter)
def capture_signature_log(self, output):
"""
Logging function to aggregate info on what caused the task signature to change between runs
"""
if hasattr(self, 'sig_debug_output'):
self.sig_debug_output += output
def can_retrieve_cache(self):
"""
Used by :py:meth:`waflib.Task.cache_outputs`
Retrieve build nodes from the cache
update the file timestamps to help cleaning the least used entries from the cache
additionally, set an attribute 'cached' to avoid re-creating the same cache files
Suppose there are files in `cache/dir1/file1` and `cache/dir2/file2`:
#. read the timestamp of dir1
#. try to copy the files
#. look at the timestamp again, if it has changed, the data may have been corrupt (cache update by another process)
#. should an exception occur, ignore the data
"""
if not getattr(self, 'outputs', None):
return None
sig = self.signature()
ssig = Utils.to_hex(self.uid()) + Utils.to_hex(sig)
# first try to access the cache folder for the task
dname = os.path.join(self.generator.bld.cache_global, ssig)
try:
t1 = os.stat(dname).st_mtime
except OSError:
return None
for node in self.outputs:
orig = os.path.join(dname, node.name)
try:
shutil.copy2(orig, node.abspath())
# mark the cache file as used recently (modified)
os.utime(orig, None)
except (OSError, IOError):
Logs.debug('task: failed retrieving file')
return None
# is it the same folder?
try:
t2 = os.stat(dname).st_mtime
except OSError:
return None
if t1 != t2:
return None
for node in self.outputs:
node.sig = sig
if self.generator.bld.progress_bar < 1:
self.generator.bld.to_log('restoring from cache %r\n' % node.abspath())
self.cached = True
return True
def put_files_cache(self):
"""
Used by :py:func:`waflib.Task.cache_outputs` to store the build files in the cache
"""
# file caching, if possible
# try to avoid data corruption as much as possible
if getattr(self, 'cached', None):
return None
if not getattr(self, 'outputs', None):
return None
sig = self.signature()
ssig = Utils.to_hex(self.uid()) + Utils.to_hex(sig)
dname = os.path.join(self.generator.bld.cache_global, ssig)
tmpdir = tempfile.mkdtemp(prefix=self.generator.bld.cache_global + os.sep + 'waf')
try:
shutil.rmtree(dname)
except Exception:
pass
try:
for node in self.outputs:
dest = os.path.join(tmpdir, node.name)
shutil.copy2(node.abspath(), dest)
except (OSError, IOError):
try:
shutil.rmtree(tmpdir)
except Exception:
pass
else:
try:
os.rename(tmpdir, dname)
except OSError:
try:
shutil.rmtree(tmpdir)
except Exception:
pass
else:
try:
os.chmod(dname, Utils.O755)
except Exception:
pass
def is_before(t1, t2):
"""
Return a non-zero value if task t1 is to be executed before task t2::
t1.ext_out = '.h'
t2.ext_in = '.h'
t2.after = ['t1']
t1.before = ['t2']
waflib.Task.is_before(t1, t2) # True
:param t1: task
:type t1: :py:class:`waflib.Task.TaskBase`
:param t2: task
:type t2: :py:class:`waflib.Task.TaskBase`
"""
to_list = Utils.to_list
for k in to_list(t2.ext_in):
if k in to_list(t1.ext_out):
return 1
if t1.__class__.__name__ in to_list(t2.after):
return 1
if t2.__class__.__name__ in to_list(t1.before):
return 1
return 0
def set_file_constraints(tasks):
"""
Adds tasks to the task 'run_after' attribute based on the task inputs and outputs
:param tasks: tasks
:type tasks: list of :py:class:`waflib.Task.TaskBase`
"""
ins = Utils.defaultdict(set)
outs = Utils.defaultdict(set)
for x in tasks:
for a in getattr(x, 'inputs', []) + getattr(x, 'dep_nodes', []):
ins[id(a)].add(x)
for a in getattr(x, 'outputs', []):
outs[id(a)].add(x)
links = set(ins.keys()).intersection(outs.keys())
for k in links:
for a in ins[k]:
a.run_after.update(outs[k])
def set_precedence_constraints(tasks):
"""
Add tasks to the task 'run_after' attribute based on the after/before/ext_out/ext_in attributes
:param tasks: tasks
:type tasks: list of :py:class:`waflib.Task.TaskBase`
"""
cstr_groups = Utils.defaultdict(list)
for x in tasks:
h = x.hash_constraints()
cstr_groups[h].append(x)
keys = list(cstr_groups.keys())
maxi = len(keys)
# this list should be short
for i in range(maxi):
t1 = cstr_groups[keys[i]][0]
for j in range(i + 1, maxi):
t2 = cstr_groups[keys[j]][0]
# add the constraints based on the comparisons
if is_before(t1, t2):
a = i
b = j
elif is_before(t2, t1):
a = j
b = i
else:
continue
aval = set(cstr_groups[keys[a]])
for x in cstr_groups[keys[b]]:
x.run_after.update(aval)
def funex(c):
"""
Compile a function by 'exec'
:param c: function to compile
:type c: string
:return: the function 'f' declared in the input string
:rtype: function
"""
dc = {}
exec(c, dc)
return dc['f']
reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})", re.M)
def compile_fun_shell(line):
"""
Create a compiled function to execute a process with the shell
WARNING: this method may disappear anytime, so use compile_fun instead
"""
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('backslash'): return '\\\\'
elif g('subst'): extr.append((g('var'), g('code'))); return "%s"
return None
line = reg_act.sub(repl, line) or line
parm = []
dvars = []
app = parm.append
for (var, meth) in extr:
if var == 'SRC':
if meth: app('tsk.inputs%s' % meth)
else: app('" ".join([a.path_from(bld.bldnode) for a in tsk.inputs])')
elif var == 'ABS_SRC':
if meth: app('tsk.inputs%s' % meth)
else: app('" ".join([\'"{}"\'.format(a.abspath()) for a in tsk.inputs])')
elif var == 'TGT':
if meth: app('tsk.outputs%s' % meth)
else: app('" ".join([a.path_from(bld.bldnode) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
m = meth[1:]
if m == 'SRC':
m = '[a.path_from(bld.bldnode) for a in tsk.inputs]'
if m == 'ABS_SRC':
m = '[a.abspath() for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.path_from(bld.bldnode) for a in tsk.outputs]'
elif m[:3] not in ('tsk', 'gen', 'bld'):
dvars.extend([var, meth[1:]])
m = '%r' % m
app('" ".join(tsk.colon(%r, %s))' % (var, m))
else:
app('%s%s' % (var, meth))
else:
if not var in dvars: dvars.append(var)
app("p('%s')" % var)
if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm))
else: parm = ''
c = COMPILE_TEMPLATE_SHELL % (line, parm)
Logs.debug('action: %s' % c.strip().splitlines())
return (funex(c), dvars)
def compile_fun_noshell(line):
"""
Create a compiled function to execute a process without the shell
WARNING: this method may disappear anytime, so use compile_fun instead
"""
extr = []
def repl(match):
g = match.group
if g('dollar'): return "$"
elif g('subst'): extr.append((g('var'), g('code'))); return "<<|@|>>"
return None
line2 = reg_act.sub(repl, line)
params = line2.split('<<|@|>>')
assert(extr)
buf = []
dvars = []
app = buf.append
for x in range(len(extr)):
params[x] = params[x].strip()
if params[x]:
app("lst.extend(%r)" % params[x].split())
(var, meth) = extr[x]
if var == 'SRC':
if meth: app('lst.append(tsk.inputs%s)' % meth)
else: app("lst.extend([a.abspath() for a in tsk.inputs])")
elif var == 'TGT':
if meth: app('lst.append(tsk.outputs%s)' % meth)
else: app("lst.extend([a.abspath() for a in tsk.outputs])")
elif meth:
if meth.startswith(':'):
m = meth[1:]
if m == 'SRC':
m = '[a.abspath() for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.abspath() for a in tsk.outputs]'
elif m[:3] not in ('tsk', 'gen', 'bld'):
dvars.extend([var, m])
m = '%r' % m
app('lst.extend(tsk.colon(%r, %s))' % (var, m))
else:
app('lst.extend(gen.to_list(%s%s))' % (var, meth))
else:
app('lst.extend(to_list(env[%r]))' % var)
if not var in dvars: dvars.append(var)
if extr:
if params[-1]:
app("lst.extend(%r)" % params[-1].split())
fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
Logs.debug('action: %s' % fun.strip().splitlines())
return (funex(fun), dvars)
def compile_fun(line, shell=False):
"""
Parse a string expression such as "${CC} ${SRC} -o ${TGT}" and return a pair containing:
* the function created (compiled) for use as :py:meth:`waflib.Task.TaskBase.run`
* the list of variables that imply a dependency from self.env
for example::
from waflib.Task import compile_fun
compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')
def build(bld):
bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"')
The env variables (CXX, ..) on the task must not hold dicts (order)
The reserved keywords *TGT* and *SRC* represent the task input and output nodes
"""
if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
shell = True
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def task_factory(name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None):
"""
Deprecated. Return a new task subclass with the function ``run`` compiled from the line given.
Provided for compatibility with waf 1.4-1.5, when we did not have the metaclass to register new classes (will be removed in Waf 1.8)
:param func: method run
:type func: string or function
:param vars: list of variables to hash
:type vars: list of string
:param color: color to use
:type color: string
:param shell: when *func* is a string, enable/disable the use of the shell
:type shell: bool
:param scan: method scan
:type scan: function
:rtype: :py:class:`waflib.Task.Task`
"""
params = {
'vars': vars or [], # function arguments are static, and this one may be modified by the class
'color': color,
'name': name,
'ext_in': Utils.to_list(ext_in),
'ext_out': Utils.to_list(ext_out),
'before': Utils.to_list(before),
'after': Utils.to_list(after),
'shell': shell,
'scan': scan,
}
if isinstance(func, str):
params['run_str'] = func
else:
params['run'] = func
cls = type(Task)(name, (Task,), params)
global classes
classes[name] = cls
return cls
def always_run(cls):
"""
Task class decorator
Set all task instances of this class to be executed whenever a build is started
The task signature is calculated, but the result of the comparation between
task signatures is bypassed
"""
old = cls.runnable_status
def always(self):
ret = old(self)
if ret == SKIP_ME:
ret = RUN_ME
return ret
cls.runnable_status = always
return cls
def update_outputs(cls):
"""
Task class decorator
If you want to create files in the source directory. For example, to keep *foo.txt* in the source
directory, create it first and declare::
def build(bld):
bld(rule='cp ${SRC} ${TGT}', source='wscript', target='foo.txt', update_outputs=True)
"""
old_post_run = cls.post_run
def post_run(self):
old_post_run(self)
for node in self.outputs:
node.sig = node.cache_sig = Utils.h_file(node.abspath())
self.generator.bld.task_sigs[node.abspath()] = self.uid() # issue #1017
cls.post_run = post_run
old_runnable_status = cls.runnable_status
def runnable_status(self):
status = old_runnable_status(self)
if status != RUN_ME:
return status
try:
# by default, we check that the output nodes have the signature of the task
# perform a second check, returning 'SKIP_ME' as we are expecting that
# the signatures do not match
bld = self.generator.bld
prev_sig = bld.task_sigs[self.uid()]
if prev_sig == self.signature():
for x in self.outputs:
if not x.sig or bld.task_sigs[x.abspath()] != self.uid():
return RUN_ME
return SKIP_ME
except KeyError:
pass
except IndexError:
pass
except AttributeError:
pass
return RUN_ME
cls.runnable_status = runnable_status
return cls
|
"""Tests for the `dictionaries` module."""
from __future__ import division, print_function, absolute_import
import foampy
from foampy.dictionaries import *
def test_replace_value():
"""Test the `replace_value` function."""
print("\nTesting dictionaries.replace_value")
orig = read_single_line_value(dictname="blockMeshDict",
keyword="convertToMeters",
casedir="./test", dtype=int)
replace_value("test/system/blockMeshDict", "convertToMeters", 555)
assert read_single_line_value(dictname="blockMeshDict",
keyword="convertToMeters",
casedir="./test") == 555
replace_value("test/system/blockMeshDict", "convertToMeters", orig)
assert read_single_line_value(dictname="blockMeshDict",
keyword="convertToMeters",
casedir="./test") == orig
def test_build_header():
"""Test the `dictionaries.build_header` function."""
print("\nTesting dictionaries.build_header")
h = build_header("blockMeshDict", incl_foamfile=True)
print(h)
assert h == r"""/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.3.x |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object blockMeshDict;
}"""
h = build_header("blockMeshDict", incl_foamfile=False)
print(h)
assert h == r"""/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.3.x |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/"""
|
import os
from time import sleep
#
# The azure library provides access to services made available by the
# Microsoft Azure platform, such as storage and messaging.
#
# See https://go.microsoft.com/fwlink/?linkid=254360 for documentation and
# example code.
#
from azure.servicebus import ServiceBusService
from azure.storage import CloudStorageAccount
#
# The CloudStorageAccount provides factory methods for the queue, table, and
# blob services.
#
# See https://go.microsoft.com/fwlink/?linkid=246933 for Storage documentation.
#
STORAGE_ACCOUNT_NAME = '__paste_your_storage_account_name_here__'
STORAGE_ACCOUNT_KEY = '__paste_your_storage_key_here__'
if os.environ.get('EMULATED', '').lower() == 'true':
# Running in the emulator, so use the development storage account
storage_account = CloudStorageAccount(None, None)
else:
storage_account = CloudStorageAccount(STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY)
blob_service = storage_account.create_blob_service()
table_service = storage_account.create_table_service()
queue_service = storage_account.create_queue_service()
#
# Service Bus is a messaging solution for applications. It sits between
# components of your applications and enables them to exchange messages in a
# loosely coupled way for improved scale and resiliency.
#
# See https://go.microsoft.com/fwlink/?linkid=246934 for Service Bus documentation.
#
SERVICE_BUS_NAMESPACE = '__paste_your_service_bus_namespace_here__'
SERVICE_BUS_KEY = '__paste_your_service_bus_key_here__'
bus_service = ServiceBusService(SERVICE_BUS_NAMESPACE, SERVICE_BUS_KEY, issuer='owner')
if __name__ == '__main__':
while True:
#
# Write your worker process here.
#
# You will probably want to call a blocking function such as
# bus_service.receive_queue_message('queue name', timeout=seconds)
# to avoid consuming 100% CPU time while your worker has no work.
#
sleep(1.0)
|
import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from mmdetection.core import auto_fp16
@HEADS.register_module
class MGANHead(nn.Module):
def __init__(self,
num_convs=2,
roi_feat_size=7,
in_channels=512,
conv_out_channels=512,
conv_cfg=None,
norm_cfg=None):
super(MGANHead, self).__init__()
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
logits_in_channel = self.conv_out_channels
self.conv_logits = nn.Conv2d(logits_in_channel, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
x = self.conv_logits(x).sigmoid() * x
return x
|
import redis
host = '127.0.0.1'
port = 6379
client = redis.Redis(host=host, port=port)
client.set("redis", "value111", None, 1000)
print(str(client.get("redis")))
if client.get("redis1") != None and len(client.get("redis1")) > 0:
print("sss")
else:
print("no")
|
from onegov.ballot import Ballot
from onegov.ballot import BallotCollection
from onegov.ballot import Candidate
from onegov.ballot import CandidateCollection
from onegov.ballot import Election
from onegov.ballot import ElectionCollection
from onegov.ballot import ElectionCompound
from onegov.ballot import ElectionCompoundCollection
from onegov.ballot import List
from onegov.ballot import ListCollection
from onegov.ballot import Vote
from onegov.ballot import VoteCollection
from onegov.core.converters import extended_date_converter
from onegov.core.i18n import SiteLocale
from onegov.election_day import ElectionDayApp
from onegov.election_day.collections import ArchivedResultCollection
from onegov.election_day.collections import DataSourceCollection
from onegov.election_day.collections import DataSourceItemCollection
from onegov.election_day.collections import EmailSubscriberCollection
from onegov.election_day.collections import ScreenCollection
from onegov.election_day.collections import SearchableArchivedResultCollection
from onegov.election_day.collections import SmsSubscriberCollection
from onegov.election_day.collections import SubscriberCollection
from onegov.election_day.collections import UploadTokenCollection
from onegov.election_day.models import DataSource
from onegov.election_day.models import DataSourceItem
from onegov.election_day.models import Principal
from onegov.election_day.models import Screen
from onegov.election_day.models import Subscriber
from onegov.election_day.models import UploadToken
from onegov.user import Auth
from uuid import UUID
@ElectionDayApp.path(
model=Auth,
path='/auth'
)
def get_auth(request, to='/'):
return Auth.from_request(request, to)
@ElectionDayApp.path(
model=Principal,
path='/'
)
def get_principal(app):
return app.principal
@ElectionDayApp.path(
model=ElectionCollection,
path='/manage/elections',
converters=dict(
page=int
)
)
def get_manage_elections(app, page=0):
return ElectionCollection(app.session(), page=page)
@ElectionDayApp.path(
model=ElectionCompoundCollection,
path='/manage/election-compounds',
converters=dict(
page=int
)
)
def get_manage_election_compsites(app, page=0):
return ElectionCompoundCollection(app.session(), page=page)
@ElectionDayApp.path(
model=VoteCollection,
path='/manage/votes',
converters=dict(
page=int
)
)
def get_manage_votes(app, page=0):
return VoteCollection(app.session(), page=page)
@ElectionDayApp.path(
model=SmsSubscriberCollection,
path='/manage/subscribers/sms',
converters=dict(
page=int
)
)
def get_manage_sms_subscribers(app, page=0, term=None):
return SmsSubscriberCollection(app.session(), page=page, term=term)
@ElectionDayApp.path(
model=EmailSubscriberCollection,
path='/manage/subscribers/email',
converters=dict(
page=int
)
)
def get_manage_email_subscribers(app, page=0, term=None):
return EmailSubscriberCollection(app.session(), page=page, term=term)
@ElectionDayApp.path(
model=UploadTokenCollection,
path='/manage/upload-tokens'
)
def get_manage_upload_tokens(app):
return UploadTokenCollection(app.session())
@ElectionDayApp.path(
model=DataSourceCollection,
path='/manage/sources',
converters=dict(
page=int
)
)
def get_manage_data_sources(app, page=0):
return DataSourceCollection(app.session(), page=page)
@ElectionDayApp.path(
model=DataSourceItemCollection,
path='/manage/source/{id}/items',
converters=dict(
id=UUID,
page=int
)
)
def get_manage_data_source_items(app, id, page=0):
return DataSourceItemCollection(app.session(), id, page=page)
@ElectionDayApp.path(
model=Election,
path='/election/{id}',
)
def get_election(app, id):
return ElectionCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=Candidate,
path='/candidate/{id}',
converters=dict(
id=UUID
)
)
def get_candidate(app, id):
return CandidateCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=List,
path='/list/{id}',
converters=dict(
id=UUID
)
)
def get_list(app, id):
return ListCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=ElectionCompound,
path='/elections/{id}'
)
def get_election_compound(app, id):
return ElectionCompoundCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=Vote,
path='/vote/{id}'
)
def get_vote(app, id):
return VoteCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=Ballot,
path='/ballot/{id}',
converters=dict(
id=UUID
)
)
def get_ballot(app, id):
return BallotCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=Subscriber,
path='/subscriber/{id}',
converters=dict(
id=UUID
)
)
def get_subscriber(app, id):
return SubscriberCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=UploadToken,
path='/upload-token/{id}',
converters=dict(
id=UUID
)
)
def get_upload_token(app, id):
return UploadTokenCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=DataSource,
path='/data-source/{id}',
converters=dict(
id=UUID
)
)
def get_data_source(app, id):
return DataSourceCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=DataSourceItem,
path='/data-source-item/{id}',
converters=dict(
id=UUID
)
)
def get_data_source_item(app, id):
return DataSourceItemCollection(app.session()).by_id(id)
@ElectionDayApp.path(
model=ArchivedResultCollection,
path='/archive/{date}'
)
def get_archive_by_year(app, date):
return ArchivedResultCollection(app.session(), date)
@ElectionDayApp.path(
model=SearchableArchivedResultCollection,
path='archive-search/{item_type}',
converters=dict(
from_date=extended_date_converter,
to_date=extended_date_converter,
domains=[str],
answers=[str],
page=int
)
)
def get_archive_search(
app,
from_date=None,
to_date=None,
answers=None,
item_type=None,
domains=None,
term=None,
page=0
):
return SearchableArchivedResultCollection.for_item_type(
app.session(),
item_type,
to_date=to_date,
from_date=from_date,
answers=answers,
domains=domains,
term=term,
page=page
)
@ElectionDayApp.path(
model=SiteLocale,
path='/locale/{locale}'
)
def get_locale(request, app, locale, to=None):
to = to or request.link(app.principal)
return SiteLocale.for_path(app, locale, to)
@ElectionDayApp.path(
model=ScreenCollection,
path='/manage/screens',
converters=dict(
page=int
)
)
def get_manage_screens(app, page=0):
return ScreenCollection(app.session(), page)
@ElectionDayApp.path(
model=Screen,
path='/screen/{number}',
converters=dict(
number=int
)
)
def get_screen(app, number):
return ScreenCollection(app.session()).by_number(number)
|
#!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or \
self.CP.fingerprintSource == car.CarParams.FingerprintSource.can
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed and car_recognized and not self.CP.dashcamOnly:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
for pandaState in self.sm['pandaStates']:
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION):
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if any(not ps.controlsAllowed and self.enabled for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif self.CP.pcmCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits)
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.20
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers.keys():
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
self.AM.process_alerts(self.sm.frame, clear_event)
hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
with open('log.txt') as f:
text = f.read().lower()
if ('python' in text):
print ('Yes, Python is present')
else:
print ('No, Python is not present')
|
from PIL import Image, ImageTk
import random as rand
import turtle as trtl
# import required module
from playsound import playsound
import tkinter.messagebox
from pynput.keyboard import Key, Listener
import glob
#import prgm_image
# prgm_image_list = []
# for filename in glob.glob('/Users/khaase/Desktop/VroomFold/prgm_gif/'): #assuming gif
# im=prgm_image.open(filename)
# prgm_image_list.append(im)
wn = trtl.Screen()
#si = tk.Tk()
si = trtl.Turtle()
caller = trtl.Turtle()
st = trtl.Turtle()
rSt = trtl.Turtle()
user = trtl.Turtle()
point = trtl.Turtle()
atmpt = trtl.Turtle()
frac = trtl.Turtle()
#score = trtl.Turtle()
count = 0
rP = False #count for times restart btn is pressed
alpha = False
digit = False
caller_list = ['abrupt stop', 'speed bump','right','left','go']
caller_txt = []
#Message ="Abrupt stop = DOWN speed bump = SHIFT right = RIGHT left = LEFT go =UP"
#tkinter.messagebox.showinfo('Directions','Press "Go" to start\nabrupt stop = DOWN\nspeed bump = Return\n right = RIGHT\n left = LEFT\n go =UP\n')
#playsound('vvvcopy.wav', False)
attempt = 0
frac.ht()
atmpt.ht()
user_txt = ''
lead = ''
#wn = tk.Tk()
#wn.screensize("400x400")
# --- Window Creator ---
wn.title("Vroom Vroom: BTS EditionT")
#wn.window_height(150)
wn.setup(height=500,width=500)
wn.bgpic('prgm_image/runbtsback.gif')
wn_msg = trtl.textinput('Directions','Press "Go" to start\nabrupt stop = DOWN\nspeed bump = Return\n right = RIGHT\n left = LEFT\n go =UP\nEnter your Name for Leaderboard')
for m in wn_msg:
if m.isalpha():
alpha = True
if m.isdigit():
digit = True
if wn_msg.isdigit():
wn_msg = trtl.textinput('Directions','Press "Go" to start\nabrupt stop = DOWN\nspeed bump = Return\n right = RIGHT\n left = LEFT\n go =UP\nEnter your Name for Leaderboard')
msg_input = str(wn_msg)
#while wn_msg != wn_msg.isalpha() == False:
#wn_msg = trtl.textinput('Directions','Press "Go" to start\nabrupt stop = DOWN\nspeed bump = Return\n right = RIGHT\n left = LEFT\n go =UP\nEnter your Name for Leaderboard')
while wn_msg != msg_input:
wn_msg = trtl.textinput('Directions','Press "Go" to start\nabrupt stop = DOWN\nspeed bump = Return\n right = RIGHT\n left = LEFT\n go =UP\nEnter your Name for Leaderboard')
#caller_img ="huh_resize.gif"
#user_label = Label(wn,prgm_image=caller_img)
# ---prgm_images ---
as_img = 'prgm_image/as_resize.gif'
wn.addshape(as_img)
sb_img = "prgm_image/vSb_resize_tsp.gif"
wn.addshape(sb_img)
r_img = "prgm_image/right_resize.gif"
wn.addshape(r_img)
l_img = "prgm_image/vL_resize_tsp.gif"
wn.addshape(l_img)
go_img = "prgm_image/go_resize.gif"
wn.addshape(go_img)
caller_img = "prgm_image/huh_resize.gif"
wn.addshape(caller_img)
restart_pic = "prgm_image/restart_resize_tsp.gif"
wn.addshape(restart_pic)
start_pic = "prgm_image/st_resized.gif"
wn.addshape(start_pic)
user_pic = "prgm_image/plyr_resize_tsp.gif"
wn.addshape(user_pic)
point.ht()
#caller.ht()
# --- Functions ---
x = -191
y = 180
caller.pu()
caller.goto(x,y)
si.pu()
si.ht()
si.goto(-120,150)
rSt.shape(restart_pic)
rSt.pu()
rSt.goto(0,180)
rSt.ht()
st.shape(start_pic)
st.pu()
st.goto(0,180)
user.shape(user_pic)
user.pu()
user.goto(0,-50)
point.pu(); point.goto(145,176);atmpt.pu();atmpt.goto(160,160);frac.pu();frac.goto(165,188); frac.pd();frac.goto(150,168)
def startPress(x, y):
global attempt, count
global rP
point.clear()
caller.shape(caller_img);user.shape(user_pic)
#caller.st()
st.ht()
rSt.st()
#print('playing sound using native player')
#playsound('vvvcopy.wav')
attempt = 0
count = 0
wn.delay(100)
si.clear()
rP = False
# callerChoose()
gameMain()
# callerSoundOs()
def rStPress(x, y):
global point
global count
global attempt, rP, wn_msg, user
#print("restart cliicked")
caller.shape(caller_img)
rP = True
rSt.ht()
rst_trace(rP)
# st.st()
si.clear();point.clear();atmpt.clear()
#attempt = 0; count = 0
user.shape(user_pic)
wn_msg = ''
wn_msg = trtl.textinput('Leaderboard','Enter your Name to add to the Leaderboard')
si.clear()
wn.delay(500)
# while rP == True:
# break
# if rP == False:
# print("end")
gameMain()
# callerChoose()
st.ht()
rSt.st()
# gameMain()
def callerChoose():
global point
global caller_txt
si.ht()
caller_txt = rand.choice(caller_list)
si.write(caller_txt,font=("Arial",15))
print(caller_txt)
callerSoundOs()
wn.delay(10)
playsound('audio/vvvSoftfade.wav', False)
wn.delay(100)
whilePoint()
point.clear()
point.ht()
print ('point: ', count)
#wn.delay(10)
#si.ht()
def usL():
global user_txt
user_txt = 'left'
user.shape(l_img)
def usR():
global user_txt
user_txt = 'right'
user.shape(r_img)
def usAS():
global user_txt
user_txt = 'abrupt stop'
user.shape(as_img)
def usSB():
global user_txt
user_txt = 'speed bump'
user.shape(sb_img)
def usGo():
global user_txt
user_txt = 'go'
user.shape(go_img)
def callerSoundOs():
global caller_txt
#print("cSOs")
#caller_pic = "huh_resize.gif"
if caller_txt == caller_list[0]:
#print("ab")
cAs(),playsound('audio/vDa_ASSoft.wav')
elif caller_txt == caller_list[1]:
#print("sb")
cSb(),playsound('audio/vS_sbsoft.wav')
elif caller_txt == caller_list[2]:
#print("r")
cR(),playsound('audio/vRsoft.wav')
elif caller_txt == caller_list[3]:
#print("l")
cL(),playsound('audio/vLsoft.wav')
#vroomVroom_wn.addshape(caller_pic)
#caller.shape(caller_pic)
elif caller_txt == caller_list[4]:
#print("g")
cGo(),playsound('audio/vUp_goSoft.wav')
#user change
def abruptStop():
global user_txt
user.shape(as_img)
print('user',user_txt)
usAS()
def speedBump():
global user_txt
user.shape(sb_img)
print('user',user_txt)
usSB()
def rightTurn():
global user_txt
user.shape(r_img)
print('user',user_txt)
usR()
def leftTurn():
global user_txt
user.shape(l_img)
print('user',user_txt)
usL()
def goFD():
global user_txt
user.shape(go_img)
print('user',user_txt)
usGo()
#caller change
def cAs():
caller.shape(as_img)
print('caller',caller_txt)
def cSb():
caller.shape(sb_img)
print('caller',caller_txt)
def cR():
caller.shape(r_img)
print('caller',caller_txt)
def cL():
caller.shape(l_img)
print('caller',caller_txt)
def cGo():
caller.shape(go_img)
print('caller',caller_txt)
def whilePoint():
global count, attempt
global user_txt, caller_txt
# wn.delay(200)
# print(user_txt,"wP")
# print(caller_txt,"wP")
point.ht()
if user_txt == caller_txt:
wn.delay(150)
attempt += 1
count = int(count)
count += 1
point.clear(); atmpt.clear()
atmpt.write(str(attempt),font=("Arial",15)),point.write(str(count),font=("Arial",15))
wn.delay(50)
user.shape(user_pic)
else:
wn.delay(150)
attempt += 1
count = int(count)
count -= 1
ah = str(count)
point.clear(); atmpt.clear()
atmpt.write(str(attempt),font=("Arial",15)),point.write(ah,font=("Arial",15))
wn.delay(50)
user.shape(user_pic)
#atmpt.clear()
#attempt +=1;#atmpt.write(str(attempt),font=("Arial",15))
#print('attempt',attempt)
si.clear()
point.ht()
gameMain()
def rst_trace(rP):
global lead, count, attempt
if rP:
name = wn_msg
name += ' '
#print("rst_btn pressed")
print(rP)
count = str(count)
attempt = str(attempt)
lead = name + str( '' + count + '/' + attempt + '\n')
print(lead)
f = open("Leaderboard.txt","a") #'password' always have to be a str
f.write(lead) #checks to see if code is a completed string consisting of letters and numbers
f.close()
count = 0
attempt = 0
gameMain()
rSt.st()
# wn.onkeypress(rightTurn,'Right')
# wn.onkeypress(leftTurn,'Left')
# wn.onkeypress(goFD,'Up')
# wn.onkeypress(abruptStop,'Down')
# rP = False
def gameMain():
global count, attempt
caller.shape(caller_img);user.shape(user_pic)
# caller.st()
si.ht()
wn.onkeypress(rightTurn,'Right')
wn.onkeypress(leftTurn,'Left')
wn.onkeypress(goFD,'Up')
wn.onkeypress(abruptStop,'Down')
#point.st()
#caller.shapesize(10)
# for t in attempt:
if attempt < 100:
wn.delay(200)
point.ht()
callerChoose()
point.clear()
atmpt.clear()
else:
print("You've reached 100 attempts: ")
exit()
# break
# count = 0
# attempt = 0
# user.shape(user_pic)
# caller.shape(caller_img)
# point.clear()
# atmpt.clear()
# wn.delay(200)
# callerChoose()
# gameMain()
st.onclick(startPress)
rSt.onclick(rStPress)
wn.onkeypress(speedBump,'Return')
#wn.onkeypress(gameMain,'BackSpace')
wn.listen()
wn.mainloop()
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import shutil
import tempfile
import typing as t
from contextlib import contextmanager
import apache_beam.metrics as metrics
import netCDF4 as nc
import pygrib
from apache_beam.io.filesystems import FileSystems
from .file_name_utils import OutFileInfo
logger = logging.getLogger(__name__)
class SplitKey(t.NamedTuple):
levelType: str
shortname: str
def __str__(self):
if not self.levelType:
return f'field {self.shortname}'
return f'{self.levelType} - field {self.shortname}'
class FileSplitter(abc.ABC):
"""Base class for weather file splitters."""
def __init__(self, input_path: str, output_info: OutFileInfo,
force_split: bool = False, level: int = logging.INFO):
self.input_path = input_path
self.output_info = output_info
self.force_split = force_split
self.logger = logging.getLogger(f'{__name__}.{type(self).__name__}')
self.logger.setLevel(level)
self.logger.debug('Splitter for path=%s, output base=%s',
self.input_path, self.output_info)
@abc.abstractmethod
def split_data(self) -> None:
raise NotImplementedError()
@contextmanager
def _copy_to_local_file(self) -> t.Iterator[t.IO]:
with FileSystems().open(self.input_path) as source_file:
with tempfile.NamedTemporaryFile() as dest_file:
shutil.copyfileobj(source_file, dest_file)
dest_file.flush()
yield dest_file
def _copy_dataset_to_storage(self, src_file: t.IO, target: str):
with FileSystems().create(target) as dest_file:
shutil.copyfileobj(src_file, dest_file)
def _get_output_file_path(self, key: SplitKey) -> str:
split_keys = key._asdict()
if self.output_info.output_dir and key.levelType:
split_keys['levelType'] = f'{key.levelType}_'
return self.output_info.file_name_template.format(**split_keys)
def should_skip(self):
"""Skip splitting if the data was already split."""
if self.force_split:
return False
for match in FileSystems().match([
self._get_output_file_path(SplitKey('', '**')),
self._get_output_file_path(SplitKey('**', '**')),
]):
if len(match.metadata_list) > 0:
return True
return False
class GribSplitter(FileSplitter):
def split_data(self) -> None:
outputs = dict()
if self.should_skip():
metrics.Metrics.counter('file_splitters', 'skipped').inc()
self.logger.info('Skipping %s, file already split.', repr(self.input_path))
return
with self._open_grib_locally() as grbs:
for grb in grbs:
key = SplitKey(grb.typeOfLevel, grb.shortName)
if key not in outputs:
metrics.Metrics.counter('file_splitters',
f'grib: {key}').inc()
outputs[key] = self._open_outfile(key)
outputs[key].write(grb.tostring())
outputs[key].flush()
for out in outputs.values():
out.close()
self.logger.info('split %s into %d files', self.input_path, len(outputs))
@contextmanager
def _open_grib_locally(self) -> t.Iterator[t.Iterator[pygrib.gribmessage]]:
with self._copy_to_local_file() as local_file:
yield pygrib.open(local_file.name)
def _open_outfile(self, key: SplitKey):
return FileSystems.create(self._get_output_file_path(key))
class NetCdfSplitter(FileSplitter):
def split_data(self) -> None:
if self.should_skip():
metrics.Metrics.counter('file_splitters', 'skipped').inc()
self.logger.info('Skipping %s, file already split.', repr(self.input_path))
return
with self._open_dataset_locally() as nc_data:
fields = [var for var in nc_data.variables.keys() if
var not in nc_data.dimensions.keys()]
for field in fields:
self._create_netcdf_dataset_for_variable(nc_data, field)
self.logger.info('split %s into %d files', self.input_path, len(fields))
@contextmanager
def _open_dataset_locally(self) -> t.Iterator[nc.Dataset]:
with self._copy_to_local_file() as local_file:
yield nc.Dataset(local_file.name, 'r')
def _create_netcdf_dataset_for_variable(self, dataset: nc.Dataset,
variable: str) -> None:
metrics.Metrics.counter('file_splitters',
f'netcdf output for {variable}').inc()
with tempfile.NamedTemporaryFile() as temp_file:
with nc.Dataset(temp_file.name, 'w',
format=dataset.file_format) as dest:
dest.setncatts(dataset.__dict__)
for name, dim in dataset.dimensions.items():
dest.createDimension(
name,
(len(dim) if not dim.isunlimited() else None))
include = [var for var in dataset.dimensions.keys()]
include.append(variable)
for name, var in dataset.variables.items():
if name in include:
var = dataset.variables[name]
dest.createVariable(name, var.datatype, var.dimensions)
# copy variable attributes all at once via dictionary
dest[name].setncatts(dataset[name].__dict__)
dest[name][:] = dataset[name][:]
temp_file.flush()
self._copy_dataset_to_storage(temp_file,
self._get_output_file_path(
SplitKey('', variable)))
class DrySplitter(FileSplitter):
def split_data(self) -> None:
self.logger.info('input file: %s - output scheme: %s',
self.input_path, self._get_output_file_path(SplitKey('level', 'shortname')))
def get_splitter(file_path: str, output_info: OutFileInfo, dry_run: bool, force_split: bool = False) -> FileSplitter:
if dry_run:
return DrySplitter(file_path, output_info)
with FileSystems.open(file_path) as f:
header = f.read(4)
if b'GRIB' in header:
metrics.Metrics.counter('get_splitter', 'grib').inc()
return GribSplitter(file_path, output_info, force_split)
# See the NetCDF Spec docs:
# https://docs.unidata.ucar.edu/netcdf-c/current/faq.html#How-can-I-tell-which-format-a-netCDF-file-uses
if b'CDF' in header or b'HDF' in header:
metrics.Metrics.counter('get_splitter', 'netcdf').inc()
return NetCdfSplitter(file_path, output_info, force_split)
raise ValueError(f'cannot determine if file {file_path!r} is Grib or NetCDF.')
|
"""
Contains various data structures used by Bionic's infrastructure.
"""
import attr
from .utils.misc import ImmutableSequence, ImmutableMapping
@attr.s(frozen=True)
class EntityDefinition:
"""
Describes the immutable properties of an entity. These properties generally have
to do with the entity's "contract": the assumptions other parts of the system can
make about its value. However, this does *not* include the way the entity's value
is determined; this is configured separately and can be changed more easily.
Attributes
----------
name: string
The name of the entity.
protocol: Protocol
The protocol to use when serializing and deserializing entity values on disk.
doc: string
A human-readable description of the entity.
optional_should_memoize: boolean or None
Whether the entity should be memoized, or None if the global default should be
used.
optional_should_persist: boolean or None
Whether the entity should be persisted, or None if the global default should be
used
needs_caching: boolean
Indicates that some kind of caching needs to be enabled for this entity (either
persistence or memoization).
"""
name = attr.ib()
protocol = attr.ib()
doc = attr.ib()
optional_should_memoize = attr.ib()
optional_should_persist = attr.ib()
needs_caching = attr.ib(default=False)
@attr.s(frozen=True)
class DescriptorMetadata:
"""
Holds extra data we might need when working with a descriptor.
Similar to an EntityDefinition, but can apply to non-entity descriptors, and also
incorporates information from the global configuration. (For example,
EntityDefinition has an `optional_should_memoize` field which describes the
user's memoization preferences, if any; this class has a `should_memoize` field
which describes what we'll actually do, based on both user preferences and the
global configuration.)
Attributes
----------
protocol: Protocol
The protocol to use when serializing and deserializing descriptor values on
disk.
doc: string
A human-readable description of the descriptor.
should_memoize: boolean
Whether the value should be memoized for the lifetime of its Flow instance.
should_memoize_for_query: boolean
Whether the value should be memoized for the lifetime of a Flow.get() call.
(Only relevant if ``should_memoize`` is False.)
should_persist: boolean
Whether the value should be persisted.
is_composite: boolean
Whether the value contains other descriptor values. (If so, it's desirable to
get it out of memory quickly.)
"""
protocol = attr.ib()
doc = attr.ib()
should_memoize = attr.ib(default=False)
should_memoize_for_query = attr.ib(default=False)
should_persist = attr.ib(default=False)
is_composite = attr.ib(default=True)
@attr.s(frozen=True)
class TaskKey:
"""
A unique identifier for a Task.
"""
dnode = attr.ib()
case_key = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __str__(self):
args_str = ", ".join(f"{name}={value}" for name, value in self.case_key.items())
return f"{self.dnode.to_descriptor(near_commas=True)}({args_str})"
@attr.s(frozen=True)
class Task:
"""
A unit of work. Can have dependencies, which are referred to via their
TaskKeys.
Attributes
----------
key: TaskKey
Key corresponding to the output value computed by this task.
dep_keys: list of TaskKeys
Keys corresponding to the input values required by this task.
compute_func: function taking a single ``dep_values`` argument
Generates output values based on the passed input values.
is_simple_lookup: boolean
Whether this task consists of simply looking up the fixed value of an entity;
used to determine what message to log when this task is computed.
"""
key = attr.ib()
dep_keys = attr.ib(converter=tuple)
compute_func = attr.ib()
is_simple_lookup = attr.ib(default=False)
def compute(self, dep_values):
return self.compute_func(dep_values)
@property
def can_be_serialized(self):
return not self.is_simple_lookup
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
def __repr__(self):
return f"Task({self.key!r}, {self.dep_keys!r})"
@attr.s(frozen=True)
class Result:
"""
Represents one value for one entity.
"""
task_key = attr.ib()
value = attr.ib()
local_artifact = attr.ib()
value_is_missing = attr.ib(default=False)
def __repr__(self):
return f"Result({self.task_key!r}, {self.value!r})"
@attr.s(frozen=True)
class Artifact:
"""
Represents a serialized, file-like artifact, either on a local filesystem or in a
cloud object store.
"""
url: str = attr.ib()
content_hash: str = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
class CaseKeySpace(ImmutableSequence):
"""
A set of CaseKey names (without values) -- represents a space of possible
CaseKeys.
"""
def __init__(self, names=None):
if names is None:
names = []
super(CaseKeySpace, self).__init__(sorted(names))
def union(self, other):
return CaseKeySpace(set(self).union(other))
def intersection(self, other):
return CaseKeySpace(name for name in self if name in other)
def difference(self, other):
return CaseKeySpace(name for name in self if name not in other)
def select(self, case_key):
return case_key.project(self)
@classmethod
def union_all(cls, spaces):
if not spaces:
return CaseKeySpace([])
names = set()
for space in spaces:
names = names.union(space)
return CaseKeySpace(names)
@classmethod
def intersection_all(cls, spaces):
if not spaces:
raise ValueError("Can't take the intersection of zero spaces")
names = None
for space in spaces:
if names is None:
names = set(spaces)
else:
names = names.intersection(space)
return CaseKeySpace(names)
def __repr__(self):
return f'CaseKeySpace({", ".join(repr(name) for name in self)})'
class CaseKey(ImmutableMapping):
"""
A collection of name-token pairs that uniquely identifies a case.
"""
def __init__(self, name_token_pairs):
tokens_by_name = {name: token for name, token in name_token_pairs}
super(CaseKey, self).__init__(tokens_by_name)
self._name_token_pairs = name_token_pairs
self.tokens = tokens_by_name
self.space = CaseKeySpace(list(tokens_by_name.keys()))
self.missing_names = [
# None is a sentinel value used to indicate that no value is available.
# Normally I would prefer to represent missing-ness out-of-band by making the
# `missing_names` field the source of truth here, but the relational methods like
# `project` are cleaner when we use a sentinel value.
name
for name, token in name_token_pairs
if token is None
]
self.has_missing_values = len(self.missing_names) > 0
def project(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name in key_space
]
)
def drop(self, key_space):
return CaseKey(
[
(name, token)
for name, token in self._name_token_pairs
if name not in key_space
]
)
def merge(self, other):
tokens_by_name = {name: token for name, token in self._name_token_pairs}
for name, token in other._name_token_pairs:
if name in tokens_by_name:
assert token == tokens_by_name[name]
else:
tokens_by_name[name] = token
return CaseKey([(name, token) for name, token in tokens_by_name.items()])
def __repr__(self):
args_str = ", ".join(f"{name}={token}" for name, token in self.items())
return f"CaseKey({args_str})"
class ResultGroup(ImmutableSequence):
"""
Represents a collection of Results, distinguished by their CaseKeys. Each
CaseKey should have the same set of names.
"""
def __init__(self, results, key_space):
super(ResultGroup, self).__init__(results)
self.key_space = key_space
def __repr__(self):
return f"ResultGroup({list(self)!r})"
def str_from_version_value(value):
if value is None:
return "0"
elif isinstance(value, int):
return str(value)
elif isinstance(value, str):
return value
else:
raise ValueError(f"Version values must be str, int, or None: got {value!r}")
# The CodeVersion and CodeFingerprint classs are used (indirectly) by
# persistence.ArtifactMetadataRecord and can be serialized to YAML and stored in the
# persistent cache. That means if we add new fields to them, we also need to update
# persistence.CACHE_SCHEMA_VERSION.
# TODO Should we just move these classes to persistence.py as well?
@attr.s(frozen=True)
class CodeVersion:
"""
Contains the user-designated version of a piece of code, consisting of a
major and a minor version string, and a boolean that indicates whether it
includes the bytecode. The convention is that changing the major version
indicates a functional change, while changing the minor version indicates a
nonfunctional change. If ``includes_bytecode`` is True, then the major version
is understood to implicitly include the bytecode of the code as well.
"""
major: str = attr.ib(converter=str_from_version_value)
minor: str = attr.ib(converter=str_from_version_value)
includes_bytecode: bool = attr.ib(converter=attr.converters.default_if_none(True))
@attr.s(frozen=True)
class CodeVersioningPolicy:
"""
Contains the version of the user entity function with any additional settings
related to the version. For now, we only have one setting that affects the
analysis-time behavior of the version.
"""
version: CodeVersion = attr.ib()
suppress_bytecode_warnings: bool = attr.ib(
converter=attr.converters.default_if_none(False)
)
@attr.s(frozen=True)
class CodeFingerprint:
"""
A collection of characteristics attempting to uniquely identify a function.
Attributes
----------
version: CodeVersion
A version identifier provided by the user.
bytecode_hash: str
A hash of the function's Python bytecode.
orig_flow_name: str
The name of the flow in which this function was originally defined.
is_identity: bool
If True, indicates that this function is equivalent to the identity function:
it takes one argument and returns it unchanged.
"""
version: CodeVersion = attr.ib()
bytecode_hash: str = attr.ib()
orig_flow_name: str = attr.ib()
is_identity: bool = attr.ib(default=False)
@attr.s(frozen=True)
class VersioningPolicy:
"""
Encodes the versioning rules to use when computing entity values.
"""
check_for_bytecode_errors = attr.ib()
treat_bytecode_as_functional = attr.ib()
ignore_bytecode_exceptions = attr.ib()
@attr.s(frozen=True)
class FunctionAttributes:
"""
Describes properties of a Python function.
"""
code_fingerprint = attr.ib()
code_versioning_policy = attr.ib()
changes_per_run = attr.ib()
aip_task_config = attr.ib()
|
import paramiko
from time import localtime, strftime
# Create a ssh client
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect('ocare', username='taiwanet', password='qwe7891')
file_name = strftime("/home/taiwanet/capture_image/%Y%m%d_%H%M%S.jpg", localtime())
stdin, stdout, stderr = client.exec_command('fswebcam --no-banner -d /dev/video1 brightness=112 -s Contrast=37 -s Gamma=50% -s Saturation=51 -s Sharpness=20 -p YUYV -S 5 -D 1 -r 1280x960 --jpeg 100 '+ file_name)
print('Capture Image \''+file_name+'\' successful! ')
for line in stderr:
print(line)
for line in stdout:
print(line)
client.close()
|
# HotSpotMap: A python based temperature (thermal) map generation
# tool for HotSpot-6.0 (http://lava.cs.virginia.edu/HotSpot/)
# This tool uses python's turtle library
#
# Author: Gaurav Kothari (gkothar1@binghamton.edu) Copyright 2021
#
# This tool generates:
# 1) Floor-plan image (using floor-plan file)
# 2) Thermal map (using floor-plan file and steady temperature file)
# 3) Fine grained thermal map (using floor-plan file and grid steady temperature file)
#
# Supports 2D and 3D stacked systems
# Supports output formats: '.eps' and '.pdf'
import os
import time
import subprocess
import tkinter
import turtle
import tempfile
import numpy as np
import matplotlib
from matplotlib import cm
from matplotlib.colors import LinearSegmentedColormap
import argparse
from sys import argv
# To represent each floor-plan unit
class FloorplanUnit():
def __init__(self, name, width, height, xpos, ypos, temp=0):
self.name = name
self.width = width
self.height = height
self.xpos = xpos
self.ypos = ypos
self.temp = temp # temperature
msg_prefix = " HotSpotMap:"
# Home co-ordinates for drawing the chip floor-plan
# Note: turtle's default home co-ordinates are (0,0)
# For drawing the floor-plan, we will start from (-w/2,-h/2), where
# w = width of the chip, h = height of the chip
chip_home_xpos = 0
chip_home_ypos = 0
# Inspired from HotSpot 6.0
def get_chip_width(flp_units):
min_x = flp_units[0].xpos
max_x = flp_units[0].xpos + flp_units[0].width
for i in range(1, len(flp_units)):
if flp_units[i].xpos < min_x:
min_x = flp_units[i].xpos
if (flp_units[i].xpos + flp_units[i].width) > max_x:
max_x = flp_units[i].xpos + flp_units[i].width
return (max_x - min_x) * 1e3
# Inspired from HotSpot 6.0
def get_chip_height(flp_units):
min_y = flp_units[0].ypos
max_y = flp_units[0].ypos + flp_units[0].height
for i in range(1, len(flp_units)):
if flp_units[i].ypos < min_y:
min_y = flp_units[i].ypos
if (flp_units[i].ypos + flp_units[i].height) > max_y:
max_y = flp_units[i].ypos + flp_units[i].height
return (max_y - min_y) * 1e3
def get_pos_from_chip_home(xpos, ypos):
return (chip_home_xpos + xpos, chip_home_ypos + ypos)
# Only for 3D systems, collect all the output files
# (for every layer) to combine them later as a single PDF
output_3d_files = []
#
# Functions related to Turtle
#
def turtle_setup(config):
# setup screen
ts = turtle.Screen()
cw = (config.chip_width * 1e-3 * config.zoom_by)
ch = (config.chip_height * 1e-3 * config.zoom_by)
ts.reset()
ts.colormode(255)
ts.tracer(0, 0)
global chip_home_xpos
chip_home_xpos = -(cw / 2)
global chip_home_ypos
chip_home_ypos = -(ch / 2)
# create turtle cursor
t = turtle.Turtle()
t.pen(shown=False)
t.pensize(0.5)
t.hideturtle()
t.penup()
t.setpos(chip_home_xpos, chip_home_ypos)
return t
def turtle_save_image(config):
ts = turtle.getscreen()
eps_file = os.path.join(
config.output_dir, "{f}-{a}.eps".format(f=config.output_file,
a=config.action))
pdf_file = os.path.join(
config.output_dir, "{f}-{a}.pdf".format(f=config.output_file,
a=config.action))
canvas = ts.getcanvas()
canvas.config(width=config.chip_width * 1e-3 * config.zoom_by,
height=config.chip_height * 1e-3 * config.zoom_by)
canvas.postscript(file=eps_file)
print("{p} Generated eps file: {f}".format(p=msg_prefix, f=eps_file))
cmd = "ps2pdf {i} {o}".format(i=eps_file, o=pdf_file)
process = subprocess.Popen(cmd, shell=True)
process.wait()
print("{p} Generated pdf file: {f}".format(p=msg_prefix, f=pdf_file))
if config.model_3d:
output_3d_files.append(pdf_file)
def turtle_draw_unit(t,
xpos,
ypos,
width,
height,
config,
name,
border_color="",
fill_color="",
hide_names=True):
xpos *= config.zoom_by
ypos *= config.zoom_by
pos = get_pos_from_chip_home(xpos, ypos)
xpos = pos[0]
ypos = pos[1]
width *= config.zoom_by
height *= config.zoom_by
t.penup()
t.setpos(xpos, ypos)
t.color(border_color, fill_color)
if fill_color:
t.begin_fill()
t.pendown()
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
if fill_color:
t.end_fill()
t.penup()
if name and (hide_names == False):
t.setpos(xpos + (width / 2), ypos + (height / 2))
t.pendown()
t.color("black")
print_name = name
if config.print_area:
area = (width / config.zoom_by) * (height /
config.zoom_by) * 1e6 # mm2
area = round(area, 3)
print_name += " ({a})".format(a=area)
t.write(print_name,
align="center",
font=(config.font, config.font_size, config.font_weight))
t.penup()
def draw_chip_dimensions(t, config):
# draw height scale on left of the floor-plan
arrow_height = 15
xpos = -30
ypos = 0
t.penup()
t.color("black")
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.left(90)
t.pendown()
t.forward(config.chip_height * 1e-3 * config.zoom_by)
temp = t.pos()
t.left(135)
t.forward(arrow_height)
t.setpos(temp)
t.right(270)
t.forward(arrow_height)
t.penup()
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.pendown()
t.left(90)
t.forward(arrow_height)
t.penup()
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.right(270)
t.pendown()
t.forward(arrow_height)
t.right(135) # reset
t.penup()
canvas = turtle.getcanvas()
xpos = -45
ypos = (config.chip_height * 1e-3 * config.zoom_by) / 2
pos = get_pos_from_chip_home(xpos, ypos)
canvas.create_text(pos[0],
pos[1],
text="Height {h} mm".format(h=config.chip_height),
angle=90,
font=(config.font, config.font_size,
config.font_weight))
# draw width scale on top of the floor-plan
xpos = 0
ypos = (config.chip_height * 1e-3 * config.zoom_by) + 30
t.penup()
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.pendown()
t.forward(config.chip_width * 1e-3 * config.zoom_by)
temp = t.pos()
t.left(135)
t.forward(arrow_height)
t.setpos(temp)
t.right(270)
t.forward(arrow_height)
t.penup()
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.pendown()
t.left(90)
t.forward(arrow_height)
t.penup()
t.setpos(get_pos_from_chip_home(xpos, ypos))
t.right(270)
t.pendown()
t.forward(arrow_height)
t.penup()
canvas = turtle.getcanvas()
xpos = (config.chip_width * 1e-3 * config.zoom_by) / 2
ypos = -45
pos = get_pos_from_chip_home(xpos, ypos)
canvas.create_text(pos[0],
pos[1],
text="Width {w} mm".format(w=config.chip_width),
angle=0,
font=(config.font, config.font_size,
config.font_weight))
#
# Function related to temperature color bar
#
# Colors used for temperature map
colors = [
"#ff0000",
"#ff3300",
"#ff6600",
"#ff9900",
"#ffcc00",
"#ffff00",
"#ccff00",
"#99ff00",
"#66ff00",
"#33ff00",
"#00ff00",
"#00ff33",
"#00ff66",
"#00ff99",
"#00ffcc",
"#00ffff",
"#00ccff",
"#0099ff",
"#0066ff",
"#0033ff",
"#0000ff",
]
# Color map for temperatures
def get_chip_temp_cmap():
global colors
colors.reverse()
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
"chipTemp", colors)
return cmap
def draw_color_bar(t, config, colors, temp_min, temp_max):
xpos = ((config.chip_width + 0.05) * 1e-3)
ypos = 0
color_bar_max_height = config.chip_height * 1e-3
color_cell_width = color_bar_max_height / len(colors)
color_cell_height = color_cell_width
temp_cell_width = color_cell_width * 3
temp_cell_height = color_cell_height
interval = len(colors)
temp_values = np.linspace(temp_min,
temp_max,
num=int(interval),
endpoint=True)
temp_values = [round(val, 2) for val in temp_values]
i = 0
for color in colors:
# draw the temperature value
turtle_draw_unit(t,
xpos,
ypos,
temp_cell_width,
temp_cell_height,
config,
name="{f}K".format(f=temp_values[i]),
border_color="",
fill_color="",
hide_names=False)
# color cell
turtle_draw_unit(t,
xpos + temp_cell_width,
ypos,
color_cell_width,
color_cell_height,
config,
name="",
border_color="black",
fill_color=color)
ypos += color_cell_height
i += 1
#
# Functions related to drawing chip floor-plan
#
# Checks if floor-plan has duplicated units
def check_duplicated_flp_units(flp_units_names):
flp_units_namesSet = set(flp_units_names)
if len(flp_units_namesSet) != len(flp_units_names):
print("{p} warning! duplicated floor-plan units detected".format(
p=msg_prefix))
def draw_floorplan(config, t):
start = time.time()
file = open(config.floor_plan, "r")
flp = file.readlines()
flp_units = []
flp_units_names = []
for line in flp:
if "#" in line or line == "\n" or not line:
continue
line = line.split("\t")
flp_units_names.append(line[0])
flp_units.append(
FloorplanUnit(line[0], float(line[1]), float(line[2]),
float(line[3]), float(line[4])))
check_duplicated_flp_units(flp_units_names)
print("{p} Drawing floor-plan".format(p=msg_prefix))
print(
"{p} Reading floor-plan file {f}: found {u} units, {w} mm chip-width, {h} mm chip-height"
.format(f=config.floor_plan,
p=msg_prefix,
u=len(flp_units),
w=config.chip_width,
h=config.chip_height))
file.close()
for unit in flp_units:
turtle_draw_unit(turtle,
unit.xpos,
unit.ypos,
unit.width,
unit.height,
config,
name=unit.name,
border_color="black",
fill_color="",
hide_names=config.hide_names)
end = time.time()
print("{p} Finished drawing floor-plan in {t} seconds".format(
p=msg_prefix, t=round((end - start), 2)))
#
# Functions related to draw the temperature maps
#
# This parses the given temperature file and extracts
# min and max temperatures (for steady and grid steady file)
def get_temperature_file_config(temperature_file, grid_steady_file_3d=""):
file = open(temperature_file, "r")
lines = file.readlines()
temperatures = []
for line in lines:
if line == "\n" or not line:
continue
line = line.split("\t")
if len(line) == 1:
continue # for 3D grid steady file, skip layer header
temperatures.append(float(line[1]))
file.close()
grid_steady_config = []
grid_steady_config.append(str(min(temperatures)))
grid_steady_config.append(str(max(temperatures)))
return grid_steady_config
def draw_grid_steady_thermal_map(config, turtle, grid_steady_file_3d=""):
start = time.time()
temperature_limit_file = config.temperature_file
if config.model_3d:
# for 3D systems, use the original grid-steady file containing
# the temperature data for all the layers to extract min and max
# temperatures, because all the layers must use the same color range
temperature_limit_file = grid_steady_file_3d
# find min and max temperatures reported in grid steady file
grid_steady_config = get_temperature_file_config(temperature_limit_file)
rows = config.grid_rows
cols = config.grid_cols
temp_min = float(grid_steady_config[0])
temp_max = float(grid_steady_config[1])
print(
"{p} Reading grid steady file {f}, with {r} rows, {c} cols, {min} min-temp, {max} max-temp"
.format(p=msg_prefix,
f=config.temperature_file,
r=rows,
c=cols,
min=temp_min,
max=temp_max))
# normalize temperature range between 0 and 1, which will be used to fetch color from color map
norm_temp_range = matplotlib.colors.Normalize(vmin=temp_min, vmax=temp_max)
# generate color map
cmap = get_chip_temp_cmap()
global colors
draw_color_bar(turtle, config, colors, temp_min, temp_max)
grid_cell_width = (config.chip_width * 1e-3) / cols
grid_cell_height = (config.chip_height * 1e-3) / rows
file = open(config.temperature_file, "r")
lines = file.readlines()
xpos = 0
ypos = (config.chip_height * 1e-3) - grid_cell_height
print("{p} Drawing temperature grid".format(p=msg_prefix))
next_col = 0
for line in lines:
if line == "\n" or not line:
continue
else:
line = line.split("\t")
col = line[0] # column number
temp = float(
line[1]) # temperature of the cell at current row and column
color = matplotlib.colors.rgb2hex(cmap(norm_temp_range(temp)))
turtle_draw_unit(turtle,
xpos,
ypos,
grid_cell_width,
grid_cell_height,
config,
name="",
border_color=color,
fill_color=color)
xpos += grid_cell_width
next_col += 1
if next_col == config.grid_cols:
# one complete row is finished
xpos = 0
next_col = 0
ypos -= grid_cell_height
file.close()
end = time.time()
print("{p} Finished drawing temperature grid in {t} seconds".format(
p=msg_prefix, t=round((end - start), 2)))
def draw_steady_thermal_map(config, turtle):
start = time.time()
# find min and max temperatures reported in steady file
steady_config = get_temperature_file_config(config.temperature_file)
temp_min = float(steady_config[0])
temp_max = float(steady_config[1])
print("{p} Reading steady file {f}, found {min} min-temp, {max} max-temp".
format(p=msg_prefix,
f=config.temperature_file,
min=temp_min,
max=temp_max))
# normalize temperature range between 0 and 1, which will be used to fetch color from color map
norm_temp_range = matplotlib.colors.Normalize(vmin=temp_min, vmax=temp_max)
# generate color map
cmap = get_chip_temp_cmap()
draw_color_bar(turtle, config, colors, temp_min, temp_max)
# read all the floor-plan units
file = open(config.floor_plan, "r")
flp = file.readlines()
flp_units = []
for line in flp:
if "#" in line or line == "\n":
continue
line = line.split("\t")
flp_units.append(
FloorplanUnit(line[0], float(line[1]), float(line[2]),
float(line[3]), float(line[4])))
file.close()
file = open(config.temperature_file, "r")
lines = file.readlines()
for line in lines:
line = line.split("\t")
name = line[0]
temp = float(line[1])
# for 3D steady temperature file, each unit is appended with prefix layer_<layer>_
# we need to remove that prefix
if config.model_3d and "layer_" in name:
name = name[name.find("_") + 1:]
name = name[name.find("_") + 1:]
for unit in flp_units:
if unit.name == name:
color = matplotlib.colors.rgb2hex(cmap(norm_temp_range(temp)))
turtle_draw_unit(turtle,
unit.xpos,
unit.ypos,
unit.width,
unit.height,
config,
name=unit.name,
border_color="black",
fill_color=color,
hide_names=config.hide_names)
file.close()
end = time.time()
print("{p} Finished steady temperature map in {t} seconds".format(
p=msg_prefix, t=round((end - start), 2)))
#
# Function related to parse file for 3D system (such as LCF and grid-steady file)
#
# Parse HotSpot's layer configuration file (lcf) for 3D systems
# For 3D systems, config.floor_plan is the lCF
def read_lcf(config):
file = open(config.floor_plan, "r")
lines = file.readlines()
config_lines = [
] # To store lcf after removing all the comments and blank lines
for line in lines:
if "#" in line or not line or line == "\n":
continue
config_lines.append(line)
file.close()
layer_num_pos = 0 # pos of layer number for the corresponding layer
has_power_pos = 2 # pos of power dissipation flag for the corresponding layer
floor_plan_file_pos = 6 # pos of floor plan file for the corresponding layer
current_line = 0
current_layer = []
lcf_home_dir = os.path.dirname(config.floor_plan)
lcf_breakdown_list = []
while current_line < len(config_lines):
if current_line and ((current_line % 7) == 0):
temp = []
temp.append(current_layer[layer_num_pos].rstrip())
temp.append(current_layer[has_power_pos].rstrip())
temp.append(
os.path.join(lcf_home_dir,
current_layer[floor_plan_file_pos].rstrip()))
lcf_breakdown_list.append(temp)
current_layer.clear()
current_layer.append(config_lines[current_line])
current_line += 1
print("{p} Finished reading lcf file: {f}, found {flp} floor-plan files".
format(p=msg_prefix,
f=config.floor_plan,
flp=len(lcf_breakdown_list)))
return lcf_breakdown_list
def extract_grid_temperatures_for_layer(config, temperature_file, layer):
file = open(temperature_file, "r")
lines = file.readlines()
file.close()
# remove all the empty lines
cleaned_lines = []
for line in lines:
if line == "\n" or not line:
continue
cleaned_lines.append(line)
line_num = 0
look_for_layer = "layer_{l}".format(l=layer)
while cleaned_lines[line_num].rstrip() != look_for_layer:
line_num += 1
print(
"{p} Grid temperature data for layer {l} starts at line {n} in file: {f}"
.format(p=msg_prefix, l=layer, n=line_num, f=temperature_file))
# grid temperatures for current layer start at line_num
line_num += 1 # skip the header line for this layer
file = open("temp.grid.steady", "w")
# we will read grid_rows x grid_cols line from this line onwards
lines_read = line_num
lines_to_read = line_num + (config.grid_rows * config.grid_cols)
while lines_read < lines_to_read:
current_line = cleaned_lines[lines_read]
file.write("{l}\n".format(l=current_line.rstrip()))
lines_read += 1
file.close()
# For 2D systems
def main_2d(config):
turtle = turtle_setup(config)
if config.action == "flp":
draw_floorplan(config, turtle)
else:
if config.action == "grid-steady":
draw_grid_steady_thermal_map(config, turtle)
draw_floorplan(
config, turtle
) # This will superimpose floor-plan onto temperature grid
else:
draw_steady_thermal_map(config, turtle)
if config.print_chip_dim:
draw_chip_dimensions(turtle, config)
turtle_save_image(config)
# For 3D stacked systems
def main_3d(config):
lcf_breakdown_list = read_lcf(config)
output_file_bkp = config.output_file
temperature_file_bkp = config.temperature_file
for lcf_layer in lcf_breakdown_list:
layer = int(lcf_layer[0]) # layer number
# override the config parameters
config.floor_plan = lcf_layer[2]
config.output_file = output_file_bkp
config.output_file += "-layer-{l}".format(l=layer)
turtle = turtle_setup(config)
print("{s} Processing layer {l} with floor-plan: {f}".format(
s=msg_prefix, l=layer, f=config.floor_plan))
if config.action == "flp":
draw_floorplan(config, turtle)
else:
if config.action == "grid-steady":
extract_grid_temperatures_for_layer(config,
temperature_file_bkp,
layer)
# this file has extracted grid temperatures for current layer
config.temperature_file = "temp.grid.steady"
draw_grid_steady_thermal_map(config, turtle,
temperature_file_bkp)
draw_floorplan(
config, turtle
) # this will superimpose floor-plan onto temperature grid
os.remove("temp.grid.steady")
else:
draw_steady_thermal_map(config, turtle)
if config.print_chip_dim:
draw_chip_dimensions(turtle, config)
turtle_save_image(config)
print("")
if config.concat:
# this code block combines all the files
# generated for each layer into a single PDF
output_file_list_str = ""
for file in output_3d_files:
output_file_list_str += "{f} ".format(f=file)
final_concat_output = os.path.join(
config.output_dir, "{p}-{a}-concat.pdf".format(p=output_file_bkp,a=config.action))
pdfjam = "pdfjam --nup {n}x1 --landscape {files} -o {output}".format(
n=len(output_3d_files),
files=output_file_list_str,
output=final_concat_output)
print("{p} Executing {c}".format(p=msg_prefix, c=pdfjam))
process = subprocess.Popen(pdfjam, shell=True)
process.wait()
stdout, stderr = process.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
def setup_chip_dimensions(config):
floor_plan_file = config.floor_plan
if config.model_3d:
lcf_breakdown_list = read_lcf(config)
# index 0 in lcf_breakdown_list is the 1st layer in 3D system
# index 2 in 1st layer is the floor-plan file for that layer
# for stacked 3D system, all layers must have equal dimensions, so pick any 1 layer
floor_plan_file = lcf_breakdown_list[0][2]
file = open(floor_plan_file, "r")
flp = file.readlines()
flp_units = []
file.close()
for line in flp:
if "#" in line or line == "\n" or not line:
continue
line = line.split("\t")
flp_units.append(
FloorplanUnit(line[0], float(line[1]), float(line[2]),
float(line[3]), float(line[4])))
config.chip_height = round(get_chip_height(flp_units), 5)
config.chip_width = round(get_chip_width(flp_units), 5)
print("{p} Calculated chip's width as {w} mm and chip's height as {h} mm".
format(p=msg_prefix, w=config.chip_width, h=config.chip_height))
def parse_command_line():
version = 2.0
description = "A python based temperature (thermal) map generation tool for HotSpot-6.0 (http://lava.cs.virginia.edu/HotSpot/), Author: Gaurav Kothari (gkothar1@binghamton.edu) v{v}".format(
v=version)
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-a",
"--action",
action="store",
dest="action",
required=True,
choices=["flp", "steady", "grid-steady"],
help="Action type")
parser.add_argument("-3D",
"--model-3D",
action="store_true",
dest="model_3d",
required=False,
default=False,
help="To indicate a 3D system")
parser.add_argument("-f",
"--flp",
action="store",
dest="floor_plan",
required=True,
help="Floor-plan file")
parser.add_argument(
"-t",
"--temperature",
action="store",
dest="temperature_file",
required=("steady" in argv) or ("grid-steady" in argv),
help=
"Steady temperature file or Grid steady temperature file based on action"
)
parser.add_argument("-r",
"--row",
action="store",
dest="grid_rows",
type=int,
required=("grid-steady" in argv),
help="Number of rows in grid-steady model")
parser.add_argument("-c",
"--col",
action="store",
dest="grid_cols",
type=int,
required=("grid-steady" in argv),
help="Number of columns in grid-steady model")
parser.add_argument("-ft",
"--font",
action="store",
dest="font",
required=False,
default="Ubuntu",
help="Font family")
parser.add_argument("-fts",
"--font-size",
action="store",
dest="font_size",
required=False,
default=9,
type=int,
help="Font size")
parser.add_argument("-ftw",
"--font-weight",
action="store",
dest="font_weight",
required=False,
default="normal",
help="Font weight")
parser.add_argument("-o",
"--output-file",
action="store",
dest="output_file",
required=True,
help="Output file name prefix")
parser.add_argument("-d",
"--output-directory",
action="store",
dest="output_dir",
required=False,
default=os.getcwd(),
help="Output directory")
parser.add_argument("-hn",
"--hide-names",
action="store_true",
dest="hide_names",
required=False,
default=False,
help="Hide names on floor-plan")
parser.add_argument("-z",
"--zoom-by",
action="store",
dest="zoom_by",
type=int,
required=False,
default=75000,
help="Zoom factor")
parser.add_argument("-pcd",
"--print-chip-dim",
action="store_true",
dest="print_chip_dim",
required=False,
default=False,
help="Draw chip' width and height scale")
parser.add_argument("-concat",
"--concat-3D",
action="store_true",
dest="concat",
required=False,
default=False,
help="Combines the images generated for all layer into a single PDF")
parser.add_argument(
"-pa",
"--print-area",
action="store_true",
dest="print_area",
required=False,
default=False,
help=
"Print unit's area (mm2) alongside its name, rounded to three decimal places"
)
args = parser.parse_args()
print("{p} {d}".format(p=msg_prefix, d=description))
print("")
return args
def main():
config = parse_command_line()
# before we start drawing images, first quickly read floor-plan file
# and calculate the chip's width and height
setup_chip_dimensions(config)
if config.model_3d:
main_3d(config)
else:
main_2d(config)
if __name__ == "__main__":
main()
|
import os
from setuptools import setup, find_packages
def read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
try:
file = open(path, encoding='utf-8')
except TypeError:
file = open(path)
return file.read()
setup(
name='litewql',
version=__import__('litewql').VERSION,
description='Lite web queries language',
long_description=read('README.md'),
long_description_content_type="text/markdown",
author='Vadim Sharay',
author_email='vadimsharay@gmail.com',
packages=find_packages(exclude=['tests']),
zip_safe=False,
include_package_data=True,
install_requires=[
"regex"
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Programming Language :: Python :: Implementation :: PyPy3",
'Topic :: Software Development :: Libraries'
]
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import uuid
from jsonlint.compat import string_types, text_type
__all__ = (
'DataRequired', 'data_required', 'Email', 'email', 'EqualTo', 'equal_to',
'IPAddress', 'ip_address', 'InputRequired', 'input_required', 'Length',
'length', 'NumberRange', 'number_range', 'Optional', 'optional',
'Regexp', 'regexp', 'URL', 'url', 'AnyOf',
'any_of', 'NoneOf', 'none_of', 'MacAddress', 'mac_address', 'UUID'
)
class ValidationError(ValueError):
"""
Raised when a validator fails to validate its input.
"""
def __init__(self, message='', *args, **kwargs):
ValueError.__init__(self, message, *args, **kwargs)
class StopValidation(Exception):
"""
Causes the validation chain to stop.
If StopValidation is raised, no more validators in the validation chain are
called. If raised with a message, the message will be added to the errors
list.
"""
def __init__(self, message='', *args, **kwargs):
Exception.__init__(self, message, *args, **kwargs)
class EqualTo(object):
"""
Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext("Invalid field name '%s'.") % self.fieldname)
if field.data != other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname
}
message = self.message
if message is None:
message = field.gettext('Field must be equal to %(other_name)s.')
raise ValidationError(message % d)
class Length(object):
"""
Validates the length of a string.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=-1, max=-1, message=None):
assert min != -1 or max != -1, 'At least one of `min` or `max` must be specified.'
assert max == -1 or min <= max, '`min` cannot be more than `max`.'
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
l = field.data and len(field.data) or 0
if l < self.min or self.max != -1 and l > self.max:
message = self.message
if message is None:
if self.max == -1:
message = field.ngettext('Field must be at least %(min)d character long.',
'Field must be at least %(min)d characters long.', self.min)
elif self.min == -1:
message = field.ngettext('Field cannot be longer than %(max)d character.',
'Field cannot be longer than %(max)d characters.', self.max)
else:
message = field.gettext('Field must be between %(min)d and %(max)d characters long.')
raise ValidationError(message % dict(min=self.min, max=self.max, length=l))
class NumberRange(object):
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param min:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param max:
The maximum value of the number. If not provided, maximum value
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=None, max=None, message=None):
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
data = field.data
if data is None or (self.min is not None and data < self.min) or \
(self.max is not None and data > self.max):
message = self.message
if message is None:
# we use %(min)s interpolation to support floats, None, and
# Decimals without throwing a formatting exception.
if self.max is None:
message = field.gettext('Number must be at least %(min)s.')
elif self.min is None:
message = field.gettext('Number must be at most %(max)s.')
else:
message = field.gettext('Number must be between %(min)s and %(max)s.')
raise ValidationError(message % dict(min=self.min, max=self.max))
class Optional(object):
"""
Allows empty input and stops the validation chain from continuing.
If input is empty, also removes prior errors (such as processing errors)
from the field.
:param strip_whitespace:
If True (the default) also stop the validation chain on input which
consists of only whitespace.
"""
field_flags = ('optional', )
def __init__(self, strip_whitespace=True):
if strip_whitespace:
self.string_check = lambda s: s.strip()
else:
self.string_check = lambda s: s
def __call__(self, form, field):
if not field.raw_data or isinstance(field.raw_data[0], string_types) and not self.string_check(field.raw_data[0]):
field.errors[:] = []
raise StopValidation()
class DataRequired(object):
"""
Checks the field's data is 'truthy' otherwise stops the validation chain.
This validator checks that the ``data`` attribute on the field is a 'true'
value (effectively, it does ``if field.data``.) Furthermore, if the data
is a string type, a string containing only whitespace characters is
considered false.
If the data is empty, also removes prior errors (such as processing errors)
from the field.
**NOTE** this validator used to be called `Required` but the way it behaved
(requiring coerced data, not input data) meant it functioned in a way
which was not symmetric to the `Optional` validator and furthermore caused
confusion with certain fields which coerced data to 'falsey' values like
``0``, ``Decimal(0)``, ``time(0)`` etc. Unless a very specific reason
exists, we recommend using the :class:`InputRequired` instead.
:param message:
Error message to raise in case of a validation error.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.data or isinstance(field.data, string_types) and not field.data.strip():
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class InputRequired(object):
"""
Validates that input was provided for this field.
Note there is a distinction between this and DataRequired in that
InputRequired looks that form-input data was provided, and DataRequired
looks at the post-coercion data.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.raw_data or not field.raw_data[0]:
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Regexp(object):
"""
Validates the field against a user provided regexp.
:param regex:
The regular expression string to use. Can also be a compiled regular
expression pattern.
:param flags:
The regexp flags to use, for example re.IGNORECASE. Ignored if
`regex` is not a string.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, regex, flags=0, message=None):
if isinstance(regex, string_types):
regex = re.compile(regex, flags)
self.regex = regex
self.message = message
def __call__(self, form, field, message=None):
match = self.regex.match(field.data or '')
if not match:
if message is None:
if self.message is None:
message = field.gettext('Invalid input.')
else:
message = self.message
raise ValidationError(message)
return match
class Email(Regexp):
"""
Validates an email address. Note that this uses a very primitive regular
expression and should only be used in instances where you later verify by
other means, such as email activation or lookups.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
self.validate_hostname = HostnameValidation(
require_tld=True,
)
super(Email, self).__init__(r'^.+@([^.@][^@]+)$', re.IGNORECASE, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid email address.')
match = super(Email, self).__call__(form, field, message)
if not self.validate_hostname(match.group(1)):
raise ValidationError(message)
class IPAddress(object):
"""
Validates an IP address.
:param ipv4:
If True, accept IPv4 addresses as valid (default True)
:param ipv6:
If True, accept IPv6 addresses as valid (default False)
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, ipv4=True, ipv6=False, message=None):
if not ipv4 and not ipv6:
raise ValueError('IP Address Validator must have at least one of ipv4 or ipv6 enabled.')
self.ipv4 = ipv4
self.ipv6 = ipv6
self.message = message
def __call__(self, form, field):
value = field.data
valid = False
if value:
valid = (self.ipv4 and self.check_ipv4(value)) or (self.ipv6 and self.check_ipv6(value))
if not valid:
message = self.message
if message is None:
message = field.gettext('Invalid IP address.')
raise ValidationError(message)
@classmethod
def check_ipv4(cls, value):
parts = value.split('.')
if len(parts) == 4 and all(x.isdigit() for x in parts):
numbers = list(int(x) for x in parts)
return all(num >= 0 and num < 256 for num in numbers)
return False
@classmethod
def check_ipv6(cls, value):
parts = value.split(':')
if len(parts) > 8:
return False
num_blank = 0
for part in parts:
if not part:
num_blank += 1
else:
try:
value = int(part, 16)
except ValueError:
return False
else:
if value < 0 or value >= 65536:
return False
if num_blank < 2:
return True
elif num_blank == 2 and not parts[0] and not parts[1]:
return True
return False
class MacAddress(Regexp):
"""
Validates a MAC address.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
pattern = r'^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$'
super(MacAddress, self).__init__(pattern, message=message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid Mac address.')
super(MacAddress, self).__call__(form, field, message)
class URL(Regexp):
"""
Simple regexp based url validation. Much like the email validator, you
probably want to validate the url later by other means if the url must
resolve.
:param require_tld:
If true, then the domain-name portion of the URL must contain a .tld
suffix. Set this to false if you want to allow domains like
`localhost`.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, require_tld=True, message=None):
regex = r'^[a-z]+://(?P<host>[^/:]+)(?P<port>:[0-9]+)?(?P<path>\/.*)?$'
super(URL, self).__init__(regex, re.IGNORECASE, message)
self.validate_hostname = HostnameValidation(
require_tld=require_tld,
allow_ip=True,
)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid URL.')
match = super(URL, self).__call__(form, field, message)
if not self.validate_hostname(match.group('host')):
raise ValidationError(message)
class UUID(object):
"""
Validates a UUID.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid UUID.')
try:
uuid.UUID(field.data)
except ValueError:
raise ValidationError(message)
class AnyOf(object):
"""
Compares the incoming data to a sequence of valid inputs.
:param values:
A sequence of valid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = self.default_values_formatter
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data not in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, must be one of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
@staticmethod
def default_values_formatter(values):
return ', '.join(text_type(x) for x in values)
class NoneOf(object):
"""
Compares the incoming data to a sequence of invalid inputs.
:param values:
A sequence of invalid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = self.default_values_formatter
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, can\'t be any of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
@staticmethod
def default_values_formatter(v):
return ', '.join(text_type(x) for x in v)
class HostnameValidation(object):
"""
Helper class for checking hostnames for validation.
This is not a validator in and of itself, and as such is not exported.
"""
hostname_part = re.compile(r'^(xn-|[a-z0-9]+)(-[a-z0-9]+)*$', re.IGNORECASE)
tld_part = re.compile(r'^([a-z]{2,20}|xn--([a-z0-9]+-)*[a-z0-9]+)$', re.IGNORECASE)
def __init__(self, require_tld=True, allow_ip=False):
self.require_tld = require_tld
self.allow_ip = allow_ip
def __call__(self, hostname):
if self.allow_ip:
if IPAddress.check_ipv4(hostname) or IPAddress.check_ipv6(hostname):
return True
# Encode out IDNA hostnames. This makes further validation easier.
hostname = hostname.encode('idna')
# Turn back into a string in Python 3x
if not isinstance(hostname, string_types):
hostname = hostname.decode('ascii')
# Check that all labels in the hostname are valid
parts = hostname.split('.')
for part in parts:
if not part or len(part) > 63:
return False
if not self.hostname_part.match(part):
return False
if self.require_tld:
if len(parts) < 2 or not self.tld_part.match(parts[-1]):
return False
return True
email = Email
equal_to = EqualTo
ip_address = IPAddress
mac_address = MacAddress
length = Length
number_range = NumberRange
optional = Optional
input_required = InputRequired
data_required = DataRequired
regexp = Regexp
url = URL
any_of = AnyOf
none_of = NoneOf
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from __future__ import print_function
import functools
import hashlib
import os
from azure.core.exceptions import ResourceNotFoundError
from devtools_testutils import ResourceGroupPreparer, KeyVaultPreparer
from keys_preparer import VaultClientPreparer
from keys_test_case import KeyVaultTestCase
def print(*args):
assert all(arg is not None for arg in args)
def test_create_key_client():
vault_url = "vault_url"
# pylint:disable=unused-variable
# [START create_key_client]
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
# Create a KeyClient using default Azure credentials
credential = DefaultAzureCredential()
key_client = KeyClient(vault_url, credential)
# [END create_key_client]
class TestExamplesKeyVault(KeyVaultTestCase):
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_key_crud_operations(self, vault_client, **kwargs):
from dateutil import parser as date_parse
key_client = vault_client.keys
# [START create_key]
from dateutil import parser as date_parse
expires_on = date_parse.parse("2050-02-02T08:00:00.000Z")
# create a key with optional arguments
key = key_client.create_key("key-name", "RSA-HSM", expires_on=expires_on)
print(key.name)
print(key.id)
print(key.key_type)
print(key.properties.expires_on)
# [END create_key]
# [START create_rsa_key]
key_size = 2048
key_ops = ["encrypt", "decrypt", "sign", "verify", "wrapKey", "unwrapKey"]
# create an rsa key with size specification
# RSA key can be created with default size of '2048'
key = key_client.create_rsa_key("key-name", hardware_protected=True, size=key_size, key_operations=key_ops)
print(key.id)
print(key.name)
print(key.key_type)
print(key.key_operations)
# [END create_rsa_key]
# [START create_ec_key]
key_curve = "P-256"
# create an EC (Elliptic curve) key with curve specification
# EC key can be created with default curve of 'P-256'
ec_key = key_client.create_ec_key("key-name", curve=key_curve)
print(ec_key.id)
print(ec_key.properties.version)
print(ec_key.key_type)
print(ec_key.key.crv)
# [END create_ec_key]
# [START get_key]
# get the latest version of a key
key = key_client.get_key("key-name")
# alternatively, specify a version
key_version = key.properties.version
key = key_client.get_key("key-name", key_version)
print(key.id)
print(key.name)
print(key.properties.version)
print(key.key_type)
print(key.properties.vault_url)
# [END get_key]
# [START update_key]
# update attributes of an existing key
expires_on = date_parse.parse("2050-01-02T08:00:00.000Z")
tags = {"foo": "updated tag"}
updated_key = key_client.update_key_properties(key.name, expires_on=expires_on, tags=tags)
print(updated_key.properties.version)
print(updated_key.properties.updated_on)
print(updated_key.properties.expires_on)
print(updated_key.properties.tags)
print(key.key_type)
# [END update_key]
# [START delete_key]
# delete a key
deleted_key_poller = key_client.begin_delete_key("key-name")
deleted_key = deleted_key_poller.result()
print(deleted_key.name)
# if the vault has soft-delete enabled, the key's deleted_date,
# scheduled purge date and recovery id are set
print(deleted_key.deleted_date)
print(deleted_key.scheduled_purge_date)
print(deleted_key.recovery_id)
# if you want to block until deletion is complete, call wait() on the poller
deleted_key_poller.wait()
# [END delete_key]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_key_list_operations(self, vault_client, **kwargs):
key_client = vault_client.keys
for i in range(4):
key_client.create_ec_key("key{}".format(i))
for i in range(4):
key_client.create_rsa_key("key{}".format(i))
# [START list_keys]
# get an iterator of keys
keys = key_client.list_properties_of_keys()
for key in keys:
print(key.id)
print(key.name)
# [END list_keys]
# [START list_properties_of_key_versions]
# get an iterator of a key's versions
key_versions = key_client.list_properties_of_key_versions("key-name")
for key in key_versions:
print(key.id)
print(key.name)
# [END list_properties_of_key_versions]
# [START list_deleted_keys]
# get an iterator of deleted keys (requires soft-delete enabled for the vault)
deleted_keys = key_client.list_deleted_keys()
for key in deleted_keys:
print(key.id)
print(key.name)
print(key.scheduled_purge_date)
print(key.recovery_id)
print(key.deleted_date)
# [END list_deleted_keys]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer()
@VaultClientPreparer()
def test_example_keys_backup_restore(self, vault_client, **kwargs):
key_client = vault_client.keys
created_key = key_client.create_key("keyrec", "RSA")
key_name = created_key.name
# [START backup_key]
# backup key
key_backup = key_client.backup_key(key_name)
# returns the raw bytes of the backed up key
print(key_backup)
# [END backup_key]
key_client.begin_delete_key(key_name).wait()
# [START restore_key_backup]
# restore a key backup
restored_key = key_client.restore_key_backup(key_backup)
print(restored_key.id)
print(restored_key.properties.version)
# [END restore_key_backup]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_keys_recover(self, vault_client, **kwargs):
key_client = vault_client.keys
created_key = key_client.create_key("key-name", "RSA")
key_client.begin_delete_key(created_key.name).wait()
# [START get_deleted_key]
# get a deleted key (requires soft-delete enabled for the vault)
deleted_key = key_client.get_deleted_key("key-name")
print(deleted_key.name)
# if the vault has soft-delete enabled, the key's deleted_date
# scheduled purge date and recovery id are set
print(deleted_key.deleted_date)
print(deleted_key.scheduled_purge_date)
print(deleted_key.recovery_id)
# [END get_deleted_key]
# [START recover_deleted_key]
# recover a deleted key to its latest version (requires soft-delete enabled for the vault)
recover_key_poller = key_client.begin_recover_deleted_key("key-name")
recovered_key = recover_key_poller.result()
print(recovered_key.id)
print(recovered_key.name)
# if you want to block until key is recovered server-side, call wait() on the poller
recover_key_poller.wait()
# [END recover_deleted_key]
|
"""
This module implements the class `truncated_chi2` which
performs (conditional) UMPU tests for Gaussians
restricted to a set of intervals.
"""
import numpy as np
import mpmath as mp
from scipy.stats import chi, chi2
from .base import truncated, find_root
class truncated_chi(truncated):
"""
>>> from intervals import intervals
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> distr = trunc_chi(I, 3, 2.)
>>> print distr.cdf(0)
0.0
>>> z = distr.quantile(distr.cdf(5.))
>>> np.abs(z - 5) < 1e-2
True
"""
def __init__(self, I, k, scale = 1.):
"""
Create a new object for a truncated_chi distribution
Parameters
----------
I : intervals
The intervals the distribution is truncated to
k : int
Number of degree of freedom of the distribution
scale : float
The distribution is \sim scale * \chi_k
"""
self._k = k
self._scale = scale
truncated.__init__(self, I)
def _cdf_notTruncated(self, a, b, dps):
"""
Compute the probability of being in the interval (a, b)
for a variable with a chi distribution (not truncated)
Parameters
----------
a, b : float
Bounds of the interval. Can be infinite.
dps : int
Decimal precision (decimal places). Used in mpmath
Returns
-------
p : float
The probability of being in the intervals (a, b)
P( a < X < b)
for a non truncated variable
"""
scale = self._scale
k = self._k
dps_temp = mp.mp.dps
mp.mp.dps = dps
a = max(0, a)
b = max(0, b)
sf = mp.gammainc(1./2 * k,
1./2*((a/scale)**2),
1./2*((b/scale)**2),
regularized=True)
mp.mp.dps = dps_temp
return sf
def _pdf_notTruncated(self, z, dps):
scale = self._scale
k = self._k
dps = self._dps
return chi.pdf(z/scale, k)
def _quantile_notTruncated(self, q, tol=1.e-6):
"""
Compute the quantile for the non truncated distribution
Parameters
----------
q : float
quantile you want to compute. Between 0 and 1
tol : float
precision for the output
Returns
-------
x : float
x such that P(X < x) = q
"""
scale = self._scale
k = self._k
dps = self._dps
z_approx = scale * chi.ppf(q, k)
epsilon = scale * 0.001
lb = z_approx - epsilon
ub = z_approx + epsilon
f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)
z = find_root(f, q, lb, ub, tol)
return z
class truncated_chi2(truncated):
"""
>>> from intervals import intervals
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
>>> distr = trunc_chi(I, 3, 2.)
>>> print distr.cdf(0)
0.0
>>> z = distr.quantile(distr.cdf(5.))
>>> np.abs(z - 5) < 1e-2
True
"""
def __init__(self, I, k, scale = 1.):
"""
Create a new object for a truncated_chi distribution
Parameters
----------
I : intervals
The intervals the distribution is truncated to
k : int
Number of degree of freedom of the distribution
scale : float
The distribution is \sim scale * \chi_k
"""
self._k = k
self._scale = scale
truncated.__init__(self, I)
def _cdf_notTruncated(self, a, b, dps):
"""
Compute the probability of being in the interval (a, b)
for a variable with a chi distribution (not truncated)
Parameters
----------
a, b : float
Bounds of the interval. Can be infinite.
dps : int
Decimal precision (decimal places). Used in mpmath
Returns
-------
p : float
The probability of being in the intervals (a, b)
P( a < X < b)
for a non truncated variable
"""
scale = self._scale
k = self._k
dps_temp = mp.mp.dps
mp.mp.dps = dps
a = max(0, a)
b = max(0, b)
cdf = mp.gammainc(1./2 * k,
1./2*(a/scale),
1./2*(b/scale),
regularized=True)
mp.mp.dps = dps_temp
return cdf
def _pdf_notTruncated(self, z, dps):
scale = self._scale
k = self._k
dps = self._dps
return chi2.pdf(z/scale, k)
def _quantile_notTruncated(self, q, tol=1.e-6):
"""
Compute the quantile for the non truncated distribution
Parameters
----------
q : float
quantile you want to compute. Between 0 and 1
tol : float
precision for the output
Returns
-------
x : float
x such that P(X < x) = q
"""
scale = self._scale
k = self._k
dps = self._dps
z_approx = scale * chi.ppf(q, k)
epsilon = scale * 0.001
lb = z_approx - epsilon
ub = z_approx + epsilon
f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)
z = find_root(f, q, lb, ub, tol)
return z
def _pdf_notTruncated(self, z, dps):
scale = self._scale
k = self._k
#dps = self._dps
return chi2.pdf(z/scale, k)
def _quantile_notTruncated(self, q, tol=1.e-6):
"""
Compute the quantile for the non truncated distribution
Parameters
----------
q : float
quantile you want to compute. Between 0 and 1
tol : float
precision for the output
Returns
-------
x : float
x such that P(X < x) = q
"""
scale = self._scale
k = self._k
dps = self._dps
z_approx = scale * chi2.ppf(q, k)
epsilon = scale * 0.001
lb = z_approx - epsilon
ub = z_approx + epsilon
f = lambda z: self._cdf_notTruncated(-np.inf, z, dps)
z = find_root(f, q, lb, ub, tol)
return z
import doctest
doctest.testmod()
|
#!/usr/bin/env python3
#
# This command requires HatSploit: https://hatsploit.netlify.app
# Current source: https://github.com/EntySec/HatSploit
#
from hatsploit.lib.command import Command
class HatSploitCommand(Command):
details = {
'Category': "transfer",
'Name': "upload",
'Authors': [
'Ivan Nikolsky (enty8080) - command developer'
],
'Description': "Upload local file.",
'Usage': "upload <local_file> <remote_path>",
'MinArgs': 2
}
def run(self, argc, argv):
self.session.upload(argv[1], argv[2])
|
import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
model = Sequential()
model.add(Dense(units=50, input_dim=1, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
# uk corona
import json
url = 'https://api.covid19uk.live/historyfigures'
def read_url_to_json(url):
import urllib.request as request
webpage = request.urlopen(url)
get_data = webpage.read()
data = json.loads(get_data)
return data
read_data = read_url_to_json(url)
each_data = read_data['data']
uk_comfirmed_data = []
for each in each_data:
uk_comfirmed_data.append(each['confirmed'])
uk_date_length = len(uk_comfirmed_data)
uk_dates = list(range(1, uk_date_length + 1))
uk_comfirmed_data = np.array(uk_comfirmed_data)
uk_dates = np.array(uk_dates)
uk_absorb_amount = uk_comfirmed_data[uk_date_length-1]
uk_comfirmed_data_norm = uk_comfirmed_data / uk_absorb_amount
# fit model
model.fit(uk_dates, uk_comfirmed_data_norm, epochs=10000, shuffle=False)
uk_comfirmed_data_predict = model.predict(uk_dates)
uk_comfirmed_data_predict = uk_comfirmed_data_predict * uk_absorb_amount
fig2 = plt.figure(figsize=(7, 5))
plt.scatter(uk_dates, uk_comfirmed_data, label='Real Confirmed')
plt.plot(uk_dates, uk_comfirmed_data_predict, label='Predict Result')
plt.title('UK Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from PyQt5.uic import loadUi
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
#from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
import matplotlib.image as mpimg
import sys
import radiomics_single as rs
qtCreatorFile = "design/diplom.ui" # Enter file here.
class MatplotlibWidget(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
loadUi(qtCreatorFile, self)
self.FlagLoaded = False
self.setWindowTitle("Texture Analysis for Diffuse Liver Diseases")
self.buttonLoader.clicked.connect(self.choose_file)
self.buttonAnalyze.clicked.connect(self.analyze)
#self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self))
self.setWindowIcon(QIcon("app.ico"))
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('File')
helpMenu = mainMenu.addMenu('Help')
buttonLoaderMenu = QAction('Download', self)
buttonLoaderMenu.setShortcut('Ctrl+D')
buttonLoaderMenu.setStatusTip('Download the region of the interest')
buttonLoaderMenu.triggered.connect(self.choose_file)
fileMenu.addAction(buttonLoaderMenu)
buttonAnalyzeMenu = QAction('Analysis', self)
buttonAnalyzeMenu.setShortcut('Ctrl+A')
buttonAnalyzeMenu.setStatusTip('Analyse the loaded region of the interest')
buttonAnalyzeMenu.triggered.connect(self.analyze)
fileMenu.addAction(buttonAnalyzeMenu)
buttonExit = QAction('Quit', self)
buttonExit.setShortcut('Ctrl+Q')
buttonExit.setStatusTip('Quit out of application')
buttonExit.triggered.connect(sys.exit)
fileMenu.addAction(buttonExit)
buttonLaunch = QAction('How to run', self)
buttonLaunch.setStatusTip('Get info about how to run the application')
self.msgBox1 = QMessageBox(self)
self.msgBox1.setIcon(QMessageBox.Information)
self.msgBox1.setWindowTitle("How to run")
self.msgBox1.setText("To run the classifier:\n1) push the button <Choose an image>\n2) push the button <Analyse>")
buttonLaunch.triggered.connect(self.msgBox1.exec_)
helpMenu.addAction(buttonLaunch)
buttonInfo = QAction('Application', self)
buttonInfo.setStatusTip('Get info about the application')
self.msgBox2 = QMessageBox(self)
self.msgBox2.setIcon(QMessageBox.Information)
self.msgBox2.setWindowTitle("Application")
self.msgBox2.setText("This application give an ability to load ROI and predict a probable presence of diffuse liver diseases.")
buttonInfo.triggered.connect(self.msgBox2.exec_)
helpMenu.addAction(buttonInfo)
buttonInfo = QAction('Developer', self)
buttonInfo.setStatusTip('Get info about the developer')
self.msgBox3 = QMessageBox(self)
self.msgBox3.setIcon(QMessageBox.Information)
self.msgBox3.setWindowTitle("Developer")
self.msgBox3.setText("This application was developed by Illia Yankovyi, the student of the 4th year"
"\nNTUU Igor Sikorsky Kyiv Polytechnic Institute:"
"\nFaculty of Biomedical Engineering (FBME)\n"
"\nAcademic unit:BS-52 group\n"
"\nSupervisor: Nastenko I., M.D., Candidate of Engineering Sciences, Senior Research Fellow.")
buttonInfo.triggered.connect(self.msgBox3.exec_)
helpMenu.addAction(buttonInfo)
self.labelTitle.setText('Classifier of Diffuse Liver Diseases')
font = QFont()
font.setPointSize(20)
font.setBold(True)
self.labelTitle.setFont(font)
self.labelTitle.setAlignment(Qt.AlignCenter)
self.buttonAnalyze.setText('Analyze Image')
self.buttonLoader.setText('Download Image')
self.labelResult.setText('To get a prediction:\n\n1) Download the region of interest;\n2) Run the analysis.')
def analyze(self):
if (self.FlagLoaded):
self.labelResult.setText(rs.signle_prediction(self.path))
else:
self.labelResult.setText("Image was not chosen!\n\nPlease choose the image\nbefore running the Analysis")
self.msgBox4 = QMessageBox(self)
self.msgBox4.setIcon(QMessageBox.Warning)
self.msgBox4.setWindowTitle("Error! Image was not chosen.")
self.msgBox4.setText(
"Image was not chosen! Please choose the image before running the Analysis.")
self.msgBox4.exec_()
def choose_file(self):
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(self, "Choose an image", "",
"Image (*.bmp *.png *.jpeg *.jpg)", options=options)
extensions = ['png', 'jpg', 'jpeg', 'bmp']
fileExtension = (fileName.split('.'))[-1].lower()
if fileName:
if fileExtension in extensions:
self.path = fileName
self.img = mpimg.imread(self.path)
self.MplWidget.canvas.axes.clear()
self.MplWidget.canvas.axes.imshow(self.img)
self.MplWidget.canvas.axes.set_title('Chosen image')
self.MplWidget.canvas.draw()
self.FlagLoaded = True
else:
self.labelResult.setText("Chosen filetype is not supported.\nSupported filetypes:\nBMP, PNG, JPEG, JPG")
self.msgBox5 = QMessageBox(self)
self.msgBox5.setIcon(QMessageBox.Warning)
self.msgBox5.setWindowTitle("Error! Chosen filetype is not supported.")
self.msgBox5.setText(
"Chosen filetype is not supported.\nSupported filetypes:\nBMP, PNG, JPEG, JPG.")
self.msgBox5.exec_()
if __name__ == "__main__":
app = QApplication([])
window = MatplotlibWidget()
window.show()
sys.exit(app.exec_())
|
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 2 1 54 55
1 54 2 1 53 55
1 55 0 0
1 56 2 1 57 58
1 57 2 1 56 58
1 58 0 0
1 59 2 1 60 61
1 60 2 1 59 61
1 61 0 0
1 62 2 1 63 64
1 63 2 1 62 64
1 64 0 0
1 65 2 1 66 67
1 66 2 1 65 67
1 67 0 0
1 68 2 1 69 70
1 69 2 1 68 70
1 70 0 0
1 71 2 1 72 73
1 72 2 1 71 73
1 73 0 0
1 74 2 1 75 76
1 75 2 1 74 76
1 76 0 0
1 77 2 1 78 79
1 78 2 1 77 79
1 79 0 0
1 80 2 1 81 82
1 81 2 1 80 82
1 82 0 0
1 83 2 1 84 85
1 84 2 1 83 85
1 85 0 0
1 86 2 1 87 88
1 87 2 1 86 88
1 88 0 0
1 89 2 1 90 91
1 90 2 1 89 91
1 91 0 0
1 92 2 1 93 94
1 93 2 1 92 94
1 94 0 0
1 95 2 1 96 97
1 96 2 1 95 97
1 97 0 0
1 98 2 1 99 100
1 99 2 1 98 100
1 100 0 0
1 101 2 1 102 103
1 102 2 1 101 103
1 103 0 0
1 104 2 1 105 106
1 105 2 1 104 106
1 106 0 0
1 107 2 1 108 109
1 108 2 1 107 109
1 109 0 0
1 110 2 1 111 112
1 111 2 1 110 112
1 112 0 0
1 113 2 1 114 115
1 114 2 1 113 115
1 115 0 0
1 116 2 1 117 118
1 117 2 1 116 118
1 118 0 0
1 119 2 1 120 121
1 120 2 1 119 121
1 121 0 0
1 122 2 1 123 124
1 123 2 1 122 124
1 124 0 0
1 125 2 1 126 127
1 126 2 1 125 127
1 127 0 0
1 128 2 1 129 130
1 129 2 1 128 130
1 130 0 0
1 131 2 1 132 133
1 132 2 1 131 133
1 133 0 0
1 134 2 1 135 136
1 135 2 1 134 136
1 136 0 0
1 137 2 1 138 139
1 138 2 1 137 139
1 139 0 0
1 140 2 1 141 142
1 141 2 1 140 142
1 142 0 0
1 143 2 1 144 145
1 144 2 1 143 145
1 145 0 0
1 146 2 1 147 148
1 147 2 1 146 148
1 148 0 0
1 149 2 1 150 151
1 150 2 1 149 151
1 151 0 0
1 152 2 1 153 154
1 153 2 1 152 154
1 154 0 0
1 155 2 1 156 157
1 156 2 1 155 157
1 157 0 0
1 158 2 1 159 160
1 159 2 1 158 160
1 160 0 0
1 161 2 1 162 163
1 162 2 1 161 163
1 163 0 0
1 164 2 1 165 166
1 165 2 1 164 166
1 166 0 0
1 167 2 1 168 169
1 168 2 1 167 169
1 169 0 0
1 170 2 1 171 172
1 171 2 1 170 172
1 172 0 0
1 173 1 0 65
1 174 1 0 74
1 175 1 0 71
1 176 1 0 68
1 177 2 0 173 53
1 178 2 0 173 62
1 179 2 0 173 59
1 175 2 0 173 56
1 174 2 0 176 89
1 177 2 0 176 86
1 177 2 0 175 95
1 174 2 0 175 98
1 179 2 0 175 101
1 173 2 0 175 92
1 176 2 0 174 119
1 180 2 0 174 128
1 179 2 0 174 125
1 175 2 0 174 122
1 177 2 0 174 116
1 173 2 0 177 65
1 174 2 0 177 74
1 175 2 0 177 71
1 176 2 0 177 68
1 173 2 0 179 131
1 178 2 0 179 146
1 180 2 0 179 143
1 174 2 0 179 140
1 181 2 0 179 137
1 175 2 0 179 134
1 182 2 0 180 149
1 179 2 0 180 158
1 174 2 0 180 155
1 181 2 0 180 152
1 173 2 0 178 161
1 179 2 0 178 170
1 181 2 0 178 167
1 182 2 0 178 164
1 181 2 0 182 77
1 178 2 0 182 83
1 180 2 0 182 80
1 179 2 0 181 107
1 180 2 0 181 110
1 178 2 0 181 113
1 182 2 0 181 104
1 1 2 0 62 53
1 1 2 0 59 53
1 1 2 0 56 53
1 1 2 0 53 56
1 1 2 0 62 56
1 1 2 0 59 56
1 1 2 0 53 59
1 1 2 0 62 59
1 1 2 0 56 59
1 1 2 0 53 62
1 1 2 0 59 62
1 1 2 0 56 62
1 1 2 0 74 65
1 1 2 0 71 65
1 1 2 0 68 65
1 1 2 0 65 68
1 1 2 0 74 68
1 1 2 0 71 68
1 1 2 0 65 71
1 1 2 0 74 71
1 1 2 0 68 71
1 1 2 0 65 74
1 1 2 0 71 74
1 1 2 0 68 74
1 1 2 0 83 77
1 1 2 0 80 77
1 1 2 0 77 80
1 1 2 0 83 80
1 1 2 0 77 83
1 1 2 0 80 83
1 1 2 0 89 86
1 1 2 0 86 89
1 1 2 0 95 92
1 1 2 0 98 92
1 1 2 0 101 92
1 1 2 0 98 95
1 1 2 0 101 95
1 1 2 0 92 95
1 1 2 0 95 98
1 1 2 0 101 98
1 1 2 0 92 98
1 1 2 0 95 101
1 1 2 0 98 101
1 1 2 0 92 101
1 1 2 0 107 104
1 1 2 0 110 104
1 1 2 0 113 104
1 1 2 0 110 107
1 1 2 0 113 107
1 1 2 0 104 107
1 1 2 0 107 110
1 1 2 0 113 110
1 1 2 0 104 110
1 1 2 0 107 113
1 1 2 0 110 113
1 1 2 0 104 113
1 1 2 0 119 116
1 1 2 0 128 116
1 1 2 0 125 116
1 1 2 0 122 116
1 1 2 0 128 119
1 1 2 0 125 119
1 1 2 0 122 119
1 1 2 0 116 119
1 1 2 0 119 122
1 1 2 0 128 122
1 1 2 0 125 122
1 1 2 0 116 122
1 1 2 0 119 125
1 1 2 0 128 125
1 1 2 0 122 125
1 1 2 0 116 125
1 1 2 0 119 128
1 1 2 0 125 128
1 1 2 0 122 128
1 1 2 0 116 128
1 1 2 0 146 131
1 1 2 0 143 131
1 1 2 0 140 131
1 1 2 0 137 131
1 1 2 0 134 131
1 1 2 0 131 134
1 1 2 0 146 134
1 1 2 0 143 134
1 1 2 0 140 134
1 1 2 0 137 134
1 1 2 0 131 137
1 1 2 0 146 137
1 1 2 0 143 137
1 1 2 0 140 137
1 1 2 0 134 137
1 1 2 0 131 140
1 1 2 0 146 140
1 1 2 0 143 140
1 1 2 0 137 140
1 1 2 0 134 140
1 1 2 0 131 143
1 1 2 0 146 143
1 1 2 0 140 143
1 1 2 0 137 143
1 1 2 0 134 143
1 1 2 0 131 146
1 1 2 0 143 146
1 1 2 0 140 146
1 1 2 0 137 146
1 1 2 0 134 146
1 1 2 0 158 149
1 1 2 0 155 149
1 1 2 0 152 149
1 1 2 0 149 152
1 1 2 0 158 152
1 1 2 0 155 152
1 1 2 0 149 155
1 1 2 0 158 155
1 1 2 0 152 155
1 1 2 0 149 158
1 1 2 0 155 158
1 1 2 0 152 158
1 1 2 0 170 161
1 1 2 0 167 161
1 1 2 0 164 161
1 1 2 0 161 164
1 1 2 0 170 164
1 1 2 0 167 164
1 1 2 0 161 167
1 1 2 0 170 167
1 1 2 0 164 167
1 1 2 0 161 170
1 1 2 0 167 170
1 1 2 0 164 170
1 1 2 0 86 53
1 1 2 0 95 53
1 1 2 0 116 53
1 1 2 0 134 56
1 1 2 0 122 56
1 1 2 0 71 56
1 1 2 0 101 59
1 1 2 0 170 59
1 1 2 0 158 59
1 1 2 0 125 59
1 1 2 0 107 59
1 1 2 0 146 62
1 1 2 0 113 62
1 1 2 0 83 62
1 1 2 0 92 65
1 1 2 0 161 65
1 1 2 0 131 65
1 1 2 0 119 68
1 1 2 0 56 71
1 1 2 0 134 71
1 1 2 0 122 71
1 1 2 0 89 74
1 1 2 0 155 74
1 1 2 0 140 74
1 1 2 0 98 74
1 1 2 0 167 77
1 1 2 0 152 77
1 1 2 0 137 77
1 1 2 0 110 80
1 1 2 0 143 80
1 1 2 0 128 80
1 1 2 0 62 83
1 1 2 0 146 83
1 1 2 0 113 83
1 1 2 0 95 86
1 1 2 0 116 86
1 1 2 0 53 86
1 1 2 0 155 89
1 1 2 0 140 89
1 1 2 0 98 89
1 1 2 0 74 89
1 1 2 0 161 92
1 1 2 0 131 92
1 1 2 0 65 92
1 1 2 0 86 95
1 1 2 0 116 95
1 1 2 0 53 95
1 1 2 0 89 98
1 1 2 0 155 98
1 1 2 0 140 98
1 1 2 0 74 98
1 1 2 0 170 101
1 1 2 0 158 101
1 1 2 0 125 101
1 1 2 0 107 101
1 1 2 0 59 101
1 1 2 0 164 104
1 1 2 0 149 104
1 1 2 0 101 107
1 1 2 0 170 107
1 1 2 0 158 107
1 1 2 0 125 107
1 1 2 0 59 107
1 1 2 0 143 110
1 1 2 0 128 110
1 1 2 0 80 110
1 1 2 0 62 113
1 1 2 0 146 113
1 1 2 0 83 113
1 1 2 0 86 116
1 1 2 0 95 116
1 1 2 0 53 116
1 1 2 0 68 119
1 1 2 0 56 122
1 1 2 0 134 122
1 1 2 0 71 122
1 1 2 0 101 125
1 1 2 0 170 125
1 1 2 0 158 125
1 1 2 0 107 125
1 1 2 0 59 125
1 1 2 0 110 128
1 1 2 0 143 128
1 1 2 0 80 128
1 1 2 0 92 131
1 1 2 0 161 131
1 1 2 0 65 131
1 1 2 0 56 134
1 1 2 0 122 134
1 1 2 0 71 134
1 1 2 0 77 137
1 1 2 0 167 137
1 1 2 0 152 137
1 1 2 0 89 140
1 1 2 0 155 140
1 1 2 0 98 140
1 1 2 0 74 140
1 1 2 0 110 143
1 1 2 0 128 143
1 1 2 0 80 143
1 1 2 0 62 146
1 1 2 0 113 146
1 1 2 0 83 146
1 1 2 0 104 149
1 1 2 0 164 149
1 1 2 0 77 152
1 1 2 0 167 152
1 1 2 0 137 152
1 1 2 0 89 155
1 1 2 0 140 155
1 1 2 0 98 155
1 1 2 0 74 155
1 1 2 0 101 158
1 1 2 0 170 158
1 1 2 0 125 158
1 1 2 0 107 158
1 1 2 0 59 158
1 1 2 0 92 161
1 1 2 0 131 161
1 1 2 0 65 161
1 1 2 0 104 164
1 1 2 0 149 164
1 1 2 0 77 167
1 1 2 0 152 167
1 1 2 0 137 167
1 1 2 0 101 170
1 1 2 0 158 170
1 1 2 0 125 170
1 1 2 0 107 170
1 1 2 0 59 170
1 1 1 1 173
1 1 1 1 173
1 1 1 1 173
1 1 1 1 173
1 1 1 1 177
1 1 1 1 177
1 1 1 1 177
1 1 1 1 177
1 1 1 1 182
1 1 1 1 182
1 1 1 1 182
1 1 1 1 176
1 1 1 1 176
1 1 1 1 175
1 1 1 1 175
1 1 1 1 175
1 1 1 1 175
1 1 1 1 181
1 1 1 1 181
1 1 1 1 181
1 1 1 1 181
1 1 1 1 174
1 1 1 1 174
1 1 1 1 174
1 1 1 1 174
1 1 1 1 174
1 1 1 1 179
1 1 1 1 179
1 1 1 1 179
1 1 1 1 179
1 1 1 1 179
1 1 1 1 179
1 1 1 1 180
1 1 1 1 180
1 1 1 1 180
1 1 1 1 180
1 1 1 1 178
1 1 1 1 178
1 1 1 1 178
1 1 1 1 178
0
43 vertex(0)
44 vertex(1)
45 vertex(2)
46 vertex(3)
47 vertex(4)
48 vertex(5)
49 vertex(6)
50 vertex(7)
51 vertex(8)
52 vertex(9)
3 arc(0,1)
4 arc(0,4)
5 arc(0,7)
6 arc(0,9)
7 arc(1,0)
8 arc(1,3)
9 arc(1,4)
10 arc(1,6)
11 arc(2,5)
12 arc(2,8)
13 arc(2,9)
14 arc(3,1)
15 arc(3,6)
16 arc(4,0)
17 arc(4,1)
18 arc(4,6)
19 arc(4,7)
20 arc(5,2)
21 arc(5,7)
22 arc(5,8)
23 arc(5,9)
24 arc(6,1)
25 arc(6,3)
26 arc(6,4)
27 arc(6,7)
28 arc(6,8)
29 arc(7,0)
30 arc(7,4)
31 arc(7,5)
32 arc(7,6)
33 arc(7,8)
34 arc(7,9)
35 arc(8,2)
36 arc(8,5)
37 arc(8,6)
38 arc(8,7)
39 arc(9,0)
40 arc(9,2)
41 arc(9,5)
42 arc(9,7)
173 reached(0)
174 reached(6)
175 reached(4)
176 reached(3)
177 reached(1)
178 reached(9)
179 reached(7)
180 reached(8)
181 reached(5)
182 reached(2)
54 out_hm(0,1)
57 out_hm(0,4)
60 out_hm(0,7)
63 out_hm(0,9)
66 out_hm(1,0)
69 out_hm(1,3)
72 out_hm(1,4)
75 out_hm(1,6)
78 out_hm(2,5)
81 out_hm(2,8)
84 out_hm(2,9)
87 out_hm(3,1)
90 out_hm(3,6)
93 out_hm(4,0)
96 out_hm(4,1)
99 out_hm(4,6)
102 out_hm(4,7)
105 out_hm(5,2)
108 out_hm(5,7)
111 out_hm(5,8)
114 out_hm(5,9)
117 out_hm(6,1)
120 out_hm(6,3)
123 out_hm(6,4)
126 out_hm(6,7)
129 out_hm(6,8)
132 out_hm(7,0)
135 out_hm(7,4)
138 out_hm(7,5)
141 out_hm(7,6)
144 out_hm(7,8)
147 out_hm(7,9)
150 out_hm(8,2)
153 out_hm(8,5)
156 out_hm(8,6)
159 out_hm(8,7)
162 out_hm(9,0)
165 out_hm(9,2)
168 out_hm(9,5)
171 out_hm(9,7)
53 in_hm(0,1)
56 in_hm(0,4)
59 in_hm(0,7)
62 in_hm(0,9)
65 in_hm(1,0)
68 in_hm(1,3)
71 in_hm(1,4)
74 in_hm(1,6)
77 in_hm(2,5)
80 in_hm(2,8)
83 in_hm(2,9)
86 in_hm(3,1)
89 in_hm(3,6)
92 in_hm(4,0)
95 in_hm(4,1)
98 in_hm(4,6)
101 in_hm(4,7)
104 in_hm(5,2)
107 in_hm(5,7)
110 in_hm(5,8)
113 in_hm(5,9)
116 in_hm(6,1)
119 in_hm(6,3)
122 in_hm(6,4)
125 in_hm(6,7)
128 in_hm(6,8)
131 in_hm(7,0)
134 in_hm(7,4)
137 in_hm(7,5)
140 in_hm(7,6)
143 in_hm(7,8)
146 in_hm(7,9)
149 in_hm(8,2)
152 in_hm(8,5)
155 in_hm(8,6)
158 in_hm(8,7)
161 in_hm(9,0)
164 in_hm(9,2)
167 in_hm(9,5)
170 in_hm(9,7)
2 start(1)
0
B+
0
B-
1
0
1
"""
output = """
{start(1), arc(0,1), arc(0,4), arc(0,7), arc(0,9), arc(1,0), arc(1,3), arc(1,4), arc(1,6), arc(2,5), arc(2,8), arc(2,9), arc(3,1), arc(3,6), arc(4,0), arc(4,1), arc(4,6), arc(4,7), arc(5,2), arc(5,7), arc(5,8), arc(5,9), arc(6,1), arc(6,3), arc(6,4), arc(6,7), arc(6,8), arc(7,0), arc(7,4), arc(7,5), arc(7,6), arc(7,8), arc(7,9), arc(8,2), arc(8,5), arc(8,6), arc(8,7), arc(9,0), arc(9,2), arc(9,5), arc(9,7), vertex(0), vertex(1), vertex(2), vertex(3), vertex(4), vertex(5), vertex(6), vertex(7), vertex(8), vertex(9), reached(0), reached(6), reached(4), reached(3), reached(1), reached(9), reached(7), reached(8), reached(5), reached(2), out_hm(6,1), out_hm(1,6)}
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.