text
stringlengths 2
999k
|
|---|
""" TensorMONK :: layers :: RoutingCapsule """
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..activations import Activations
class RoutingCapsule(nn.Module):
r""" Routing capsule from Dynamic Routing Between Capsules.
Implemented -- https://arxiv.org/pdf/1710.09829.pdf
Args:
tensor_size: 5D shape of tensor from PrimaryCapsule
(None/any integer >0, capsule_length, height, width, n_capsules)
n_capsules (int, required): number of capsules, usually, number of
labels per paper
capsule_length (int, required): length of capsules
iterations (int, required): routing iterations, default = 3
Return:
3D torch.Tensor of shape
(None/any integer >0, n_capsules, capsule_length)
"""
def __init__(self,
tensor_size,
n_capsules: int = 10,
capsule_length: int = 32,
iterations: int = 3,
*args, **kwargs):
super(RoutingCapsule, self).__init__()
self.iterations = iterations
# Ex from paper
# For tensor_size=(1,32,6,6,8), n_capsules=10 and capsule_length=16
# weight_size = (tensor_size[1]*tensor_size[2]*tensor_size[3], \
# tensor_size[4], n_capsules*capsule_length)
# = (32*6*6, 8 , 10*16)
weight_size = (int(np.prod(tensor_size[1:-1])), tensor_size[-1],
n_capsules*capsule_length)
self.weight = nn.Parameter(torch.randn(*weight_size).normal_(0., 0.1))
self.activation = Activations((None, int(np.prod(tensor_size[1:-1])),
tensor_size[-1]), "squash")
self.tensor_size = (6, n_capsules, capsule_length)
def forward(self, tensor):
batch_size, primary_capsule_length, h, w, n_primary_capsules = \
tensor.size()
# Initial squash
tensor = tensor.view(batch_size, -1, n_primary_capsules)
tensor = self.activation(tensor)
# from the given example:
# tensor is of size _ x 32 x 6 x 6 x 8
# after matrix mulitplication the size of u is _x32x6x6x10x16
# essentially, each of the pixel from 8 primary capsules is project
# to a dimension of n_capsules x capsule_length
u = tensor.view(batch_size, -1, 1,
n_primary_capsules).matmul(self.weight)
u = u.view(*((batch_size, primary_capsule_length, h, w) +
self.tensor_size[1:]))
bias = torch.zeros(batch_size, primary_capsule_length, h, w,
self.tensor_size[1])
if tensor.is_cuda:
bias = bias.to(tensor.device)
# routing
for i in range(self.iterations):
# softmax
# initial softmax gives equal probabilities (since bias is
# initialized with zeros), eventually, bias updates will change
# the probabilities
c = F.softmax(bias, 4) # size = _ x 32 x 6 x 6 x 10
# could be done with a single sum after reorganizing the tensor's,
# however, retaining dimensions can explain better
# s size without sum's = _ x 32 x 6 x 6 x 10 x 16
# s size = _ x 10 x 16
s = (c.unsqueeze(5)*u).sum(3).sum(2).sum(1)
# squash -- v size = _ x 10 x 16
v = self.activation(s)
# bias update -- size = _ x 32 x 6 x 6 x 10
if i < self.iterations-1:
bias = bias + (u * v.view(batch_size, 1, 1, 1,
self.tensor_size[1],
self.tensor_size[2])).sum(5)
return v
def flops(self):
# activations
flops = self.activation.flops() * (1 + self.iterations)
# matmul
flops += np.prod(self.weight.shape) * self.weight.shape[1]
# softmax
flops += (self.weight.shape[0] * self.tensor_size[1] * 3) * \
self.iterations
# s computation
flops += (self.weight.shape[0] * (self.weight.shape[2] + 1)) * \
self.iterations
# bias update _x32x6x6x10x16
flops += self.weight.shape[0] * (self.weight.shape[2] + 2)
return flops
# from tensormonk.activations import Activations
# x = torch.rand(3, 32, 10, 10, 8)
# test = RoutingCapsule((3, 32, 10, 10, 8), 10, 16, 3,)
# test(x).size()
# test.flops()
|
import os
import logging
import pandas as pd
from pathlib import Path
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
DIR_PATH = Path(os.path.dirname(os.path.abspath(__file__)))
SINCE_PATH = DIR_PATH / Path('data/since.txt')
ARTICLES_PATH = DIR_PATH / Path('data/articles.csv')
def record_data_pull_time(timestamp):
with open(SINCE_PATH, 'a+') as f:
f.write('{}\n'.format(timestamp))
def read_times_api_queried():
try:
with open(SINCE_PATH, 'r+') as f:
sinces = f.readlines()
return [s.split('\n')[0] for s in sinces]
except FileNotFoundError:
return []
def get_most_recent_since():
sinces = read_times_api_queried()
if len(sinces) == 0:
return None
return sinces[-1]
def save_articles(articles):
if articles is None:
logger.info(f'no new articles found.')
return
logger.info(f'saving {len(articles)} articles.')
try:
articles_prev = pd.read_csv(ARTICLES_PATH)
articles = pd.concat([articles_prev, articles])
articles_deduped = articles.drop_duplicates(subset=['resolved_id'])
articles_deduped.to_csv(ARTICLES_PATH, index=False, encoding='utf8')
except FileNotFoundError:
articles.to_csv(ARTICLES_PATH, index=False, encoding='utf8')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Command: .react happy|thinking|waving|wtf|love|confused|dead|sad|dog """
from telethon import events
import random
import asyncio
@borg.on(events.NewMessage(pattern=r"\.react (.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if input_str in "happy":
emoticons = [
"( ͡° ͜ʖ ͡°)",
"(ʘ‿ʘ)",
"(✿´‿`)",
"=͟͟͞͞٩(๑☉ᴗ☉)੭ु⁾⁾",
"(*⌒▽⌒*)θ~♪",
"°˖✧◝(⁰▿⁰)◜✧˖°",
"✌(-‿-)✌",
"⌒°(❛ᴗ❛)°⌒",
"(゚<|\(・ω・)/|>゚)",
"ヾ(o✪‿✪o)シ",
]
elif input_str in "thinking":
emoticons = [
"(҂⌣̀_⌣́)",
"(;¬_¬)",
"(-。-;",
"┌[ O ʖ̯ O ]┐",
"〳 ͡° Ĺ̯ ͡° 〵",
]
elif input_str in "waving":
emoticons = [
"(ノ^∇^)",
"(;-_-)/",
"@(o・ェ・)@ノ",
"ヾ(^-^)ノ",
"ヾ(◍’౪`◍)ノ゙♡",
"(ό‿ὸ)ノ",
"(ヾ(´・ω・`)",
]
elif input_str in "wtf":
emoticons = [
"༎ຶ‿༎ຶ",
"(‿ˠ‿)",
"╰U╯☜(◉ɷ◉ )",
"(;´༎ຶ益༎ຶ`)♡",
"╭∩╮(︶ε︶*)chu",
"( ^◡^)っ (‿|‿)",
]
elif input_str in "love":
emoticons = [
"乂❤‿❤乂",
"(。♥‿♥。)",
"( ͡~ ͜ʖ ͡°)",
"໒( ♥ ◡ ♥ )७",
"༼♥ل͜♥༽",
]
elif input_str in "confused":
emoticons = [
"(・_・ヾ",
"「(゚ペ)",
"﴾͡๏̯͡๏﴿",
"( ̄■ ̄;)!?",
"▐ ˵ ͠° (oo) °͠ ˵ ▐",
"(-_-)ゞ゛",
]
elif input_str in "dead":
emoticons = [
"(✖╭╮✖)",
"✖‿✖",
"(+_+)",
"(✖﹏✖)",
"∑(✘Д✘๑)",
]
elif input_str in "sad":
emoticons = [
"(@´_`@)",
"⊙︿⊙",
"(▰˘︹˘▰)",
"●︿●",
"( ´_ノ` )",
"彡(-_-;)彡",
]
elif input_str in "dog":
emoticons = [
"-ᄒᴥᄒ-",
"◖⚆ᴥ⚆◗",
]
else:
emoticons = [
"( ͡° ͜ʖ ͡°)",
"¯\_(ツ)_/¯",
"( ͡°( ͡° ͜ʖ( ͡° ͜ʖ ͡°)ʖ ͡°) ͡°)",
"ʕ•ᴥ•ʔ",
"(▀̿Ĺ̯▀̿ ̿)",
"(ง ͠° ͟ل͜ ͡°)ง",
"༼ つ ◕_◕ ༽つ",
"ಠ_ಠ",
"(☞ ͡° ͜ʖ ͡°)☞",
"¯\_༼ ି ~ ି ༽_/¯",
"c༼ ͡° ͜ʖ ͡° ༽⊃",
]
index = random.randint(0, len(emoticons))
output_str = emoticons[index]
await event.edit(output_str)
|
import requests
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.models import SocialLogin, SocialAccount
from allauth.utils import get_user_model
from provider import GoogleProvider
User = get_user_model()
class GoogleOAuth2Adapter(OAuth2Adapter):
provider_id = GoogleProvider.id
access_token_url = 'https://accounts.google.com/o/oauth2/token'
authorize_url = 'https://accounts.google.com/o/oauth2/auth'
profile_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
def complete_login(self, request, app, token):
resp = requests.get(self.profile_url,
params={ 'access_token': token.token,
'alt': 'json' })
extra_data = resp.json()
# extra_data is something of the form:
#
# {u'family_name': u'Penners', u'name': u'Raymond Penners',
# u'picture': u'https://lh5.googleusercontent.com/-GOFYGBVOdBQ/AAAAAAAAAAI/AAAAAAAAAGM/WzRfPkv4xbo/photo.jpg',
# u'locale': u'nl', u'gender': u'male',
# u'email': u'raymond.penners@gmail.com',
# u'link': u'https://plus.google.com/108204268033311374519',
# u'given_name': u'Raymond', u'id': u'108204268033311374519',
# u'verified_email': True}
#
# TODO: We could use verified_email to bypass allauth email verification
uid = str(extra_data['id'])
user = User(email=extra_data.get('email', ''),
last_name=extra_data.get('family_name', ''),
first_name=extra_data.get('given_name', ''))
email_addresses = []
if user.email and extra_data.get('verified_email'):
email_addresses.append(EmailAddress(email=user.email,
verified=True,
primary=True))
account = SocialAccount(extra_data=extra_data,
uid=uid,
provider=self.provider_id,
user=user)
return SocialLogin(account,
email_addresses=email_addresses)
oauth2_login = OAuth2LoginView.adapter_view(GoogleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GoogleOAuth2Adapter)
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .rule_condition import RuleCondition
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PathMatchCondition(RuleCondition):
"""
The path string and match condition to apply when evaluating an incoming URI for redirection.
"""
#: A constant which can be used with the operator property of a PathMatchCondition.
#: This constant has a value of "EXACT_MATCH"
OPERATOR_EXACT_MATCH = "EXACT_MATCH"
#: A constant which can be used with the operator property of a PathMatchCondition.
#: This constant has a value of "FORCE_LONGEST_PREFIX_MATCH"
OPERATOR_FORCE_LONGEST_PREFIX_MATCH = "FORCE_LONGEST_PREFIX_MATCH"
#: A constant which can be used with the operator property of a PathMatchCondition.
#: This constant has a value of "PREFIX_MATCH"
OPERATOR_PREFIX_MATCH = "PREFIX_MATCH"
#: A constant which can be used with the operator property of a PathMatchCondition.
#: This constant has a value of "SUFFIX_MATCH"
OPERATOR_SUFFIX_MATCH = "SUFFIX_MATCH"
def __init__(self, **kwargs):
"""
Initializes a new PathMatchCondition object with values from keyword arguments. The default value of the :py:attr:`~oci.load_balancer.models.PathMatchCondition.attribute_name` attribute
of this class is ``PATH`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param attribute_name:
The value to assign to the attribute_name property of this PathMatchCondition.
Allowed values for this property are: "SOURCE_IP_ADDRESS", "SOURCE_VCN_ID", "SOURCE_VCN_IP_ADDRESS", "PATH", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type attribute_name: str
:param attribute_value:
The value to assign to the attribute_value property of this PathMatchCondition.
:type attribute_value: str
:param operator:
The value to assign to the operator property of this PathMatchCondition.
Allowed values for this property are: "EXACT_MATCH", "FORCE_LONGEST_PREFIX_MATCH", "PREFIX_MATCH", "SUFFIX_MATCH", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type operator: str
"""
self.swagger_types = {
'attribute_name': 'str',
'attribute_value': 'str',
'operator': 'str'
}
self.attribute_map = {
'attribute_name': 'attributeName',
'attribute_value': 'attributeValue',
'operator': 'operator'
}
self._attribute_name = None
self._attribute_value = None
self._operator = None
self._attribute_name = 'PATH'
@property
def attribute_value(self):
"""
**[Required]** Gets the attribute_value of this PathMatchCondition.
The path string that the redirection rule applies to.
Example: `/example`
:return: The attribute_value of this PathMatchCondition.
:rtype: str
"""
return self._attribute_value
@attribute_value.setter
def attribute_value(self, attribute_value):
"""
Sets the attribute_value of this PathMatchCondition.
The path string that the redirection rule applies to.
Example: `/example`
:param attribute_value: The attribute_value of this PathMatchCondition.
:type: str
"""
self._attribute_value = attribute_value
@property
def operator(self):
"""
**[Required]** Gets the operator of this PathMatchCondition.
A string that specifies how to compare the PathMatchCondition object's `attributeValue` string to the
incoming URI.
* **EXACT_MATCH** - The incoming URI path must exactly and completely match the `attributeValue` string.
* **FORCE_LONGEST_PREFIX_MATCH** - The system looks for the `attributeValue` string with the best,
longest match of the beginning portion of the incoming URI path.
* **PREFIX_MATCH** - The beginning portion of the incoming URI path must exactly match the
`attributeValue` string.
* **SUFFIX_MATCH** - The ending portion of the incoming URI path must exactly match the `attributeValue`
string.
Allowed values for this property are: "EXACT_MATCH", "FORCE_LONGEST_PREFIX_MATCH", "PREFIX_MATCH", "SUFFIX_MATCH", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The operator of this PathMatchCondition.
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""
Sets the operator of this PathMatchCondition.
A string that specifies how to compare the PathMatchCondition object's `attributeValue` string to the
incoming URI.
* **EXACT_MATCH** - The incoming URI path must exactly and completely match the `attributeValue` string.
* **FORCE_LONGEST_PREFIX_MATCH** - The system looks for the `attributeValue` string with the best,
longest match of the beginning portion of the incoming URI path.
* **PREFIX_MATCH** - The beginning portion of the incoming URI path must exactly match the
`attributeValue` string.
* **SUFFIX_MATCH** - The ending portion of the incoming URI path must exactly match the `attributeValue`
string.
:param operator: The operator of this PathMatchCondition.
:type: str
"""
allowed_values = ["EXACT_MATCH", "FORCE_LONGEST_PREFIX_MATCH", "PREFIX_MATCH", "SUFFIX_MATCH"]
if not value_allowed_none_or_none_sentinel(operator, allowed_values):
operator = 'UNKNOWN_ENUM_VALUE'
self._operator = operator
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import hpsModel as HPS
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/sax-phrase-short.wav'))
w = np.blackman(601)
N = 1024
t = -100
nH = 100
minf0 = 350
maxf0 = 700
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns//4
stocf = .2
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, hphase, mYst, Ns, H, fs)
maxplotfreq = 10000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.autoscale(tight=True)
plt.title('x (sax-phrase-short.wav)')
plt.subplot(312)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:int(sizeEnv*maxplotfreq/(.5*fs)+1)]))
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic')
plt.subplot(313)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.autoscale(tight=True)
plt.title('y')
plt.tight_layout()
plt.savefig('hpsModel-sax-phrase.png')
UF.wavwrite(y, fs, 'sax-phrase-hps-synthesis.wav')
UF.wavwrite(yh, fs, 'sax-phrase-harmonic.wav')
UF.wavwrite(yst, fs, 'sax-phrase-stochastic.wav')
plt.show()
|
#!/usr/bin/env python3
from __future__ import unicode_literals
from builtins import bytes, dict, list, int, float, str
import argparse
import json
import sys
import unittest
from reflectrpc.client import RpcClient
from reflectrpc.testing import ServerRunner
server_program = None
class ConformanceTest(unittest.TestCase):
# Table driven conformance test that can also be run against
# implementations in other programming languages
def test_conformance(self):
global server_program
funcs_description = [{'description': 'Returns the message it was sent',
'name': 'echo',
'params': [{'description': 'The message we will send back',
'name': 'message',
'type': 'string'}],
'result_desc': 'The message previously received',
'result_type': 'string'},
{'description': 'Adds two numbers',
'name': 'add',
'params': [{'description': 'First number to add',
'name': 'a',
'type': 'int'},
{'description': 'Second number to add',
'name': 'b',
'type': 'int'}],
'result_desc': 'Sum of the two numbers',
'result_type': 'int'},
{'description': 'Subtracts one number from another',
'name': 'sub',
'params': [{'description': 'Number to subtract from',
'name': 'a',
'type': 'int'},
{'description': 'Number to subtract',
'name': 'b',
'type': 'int'}],
'result_desc': 'Difference of the two numbers',
'result_type': 'int'},
{'description': 'Multiplies two numbers',
'name': 'mul',
'params': [{'description': 'First factor',
'name': 'a',
'type': 'int'},
{'description': 'Second factor',
'name': 'b',
'type': 'int'}],
'result_desc': 'Product of the two numbers',
'result_type': 'int'},
{'description': 'Divide a number by another number',
'name': 'div',
'params': [{'description': 'Dividend',
'name': 'a',
'type': 'float'},
{'description': 'Divisor',
'name': 'b',
'type': 'float'}],
'result_desc': 'Ratio of the two numbers',
'result_type': 'float'},
{'description': 'Test the phone type enum',
'name': 'enum_echo',
'params': [{'description': 'Type of phone number',
'name': 'phone_type',
'type': 'PhoneType'}],
'result_desc': 'Phone type',
'result_type': 'int'},
{'description': 'Test the address hash type',
'name': 'hash_echo',
'params': [{'description': 'Address hash',
'name': 'address',
'type': 'Address'}],
'result_desc': 'Address hash',
'result_type': 'hash'},
{'description': 'Test function for notify requests',
'name': 'notify',
'params': [{'description': 'A value to print on the server side',
'name': 'value',
'type': 'string'}],
'result_desc': '',
'result_type': 'bool'},
{'description': 'Checks if we have an authenticated connection',
'name': 'is_authenticated',
'params': [],
'result_desc': 'The authentication status',
'result_type': 'bool'},
{'description': 'Gets the username of the logged in user',
'name': 'get_username',
'params': [],
'result_desc': 'The username of the logged in user',
'result_type': 'string'}]
types_description = [{'description': 'Type of a phone number',
'name': 'PhoneType',
'type': 'enum',
'values': [{'description': 'Home phone',
'intvalue': 0,
'name': 'HOME'},
{'description': 'Work phone',
'intvalue': 1,
'name': 'WORK'},
{'description': 'Mobile phone',
'intvalue': 2,
'name': 'MOBILE'},
{'description': 'FAX number',
'intvalue': 3,
'name': 'FAX'}]},
{'description': 'Street address',
'fields': [{'description': 'First name',
'name': 'firstname',
'type': 'string'},
{'description': 'Last name',
'name': 'lastname',
'type': 'string'},
{'description': 'First address line',
'name': 'street1',
'type': 'string'},
{'description': 'Second address line',
'name': 'street2',
'type': 'string'},
{'description': 'Zip code',
'name': 'zipcode',
'type': 'string'},
{'description': 'City',
'name': 'city',
'type': 'string'}],
'name': 'Address',
'type': 'hash'}]
tests = [
['{"method": "echo", "params": ["Hello Server"], "id": 1}',
'{"result": "Hello Server", "error": null, "id": 1}'],
['{"method": "add", "params": [5, 6], "id": 2}',
'{"result": 11, "error": null, "id": 2}'],
# test non-int IDs
['{"method": "echo", "params": ["Hello"], "id": "abcd1234"}',
'{"result": "Hello", "error": null, "id": "abcd1234"}'],
['{"method": "add", "params": [34, 67], "id": 3.14}',
'{"result": 101, "error": null, "id": 3.14}'],
# test descriptions
['{"method": "__describe_service", "params": [], "id": 3}',
'{"result": {"version": "1.0", "name": "Example RPC Service", "description": "This is an example service for ReflectRPC", "custom_fields": {}}, "error": null, "id": 3}'],
['{"method": "__describe_functions", "params": [], "id": 4}',
'{"result": %s, "error": null, "id": 4}' % (json.dumps(funcs_description))],
['{"method": "__describe_custom_types", "params": [], "id": 5}',
'{"result": %s, "error": null, "id": 5}' % (json.dumps(types_description))]
]
server = ServerRunner(server_program, 5500)
server.run()
client = RpcClient('localhost', 5500)
self.maxDiff = None
request = None
expected_result = None
result_str = None
i = 0
try:
for test in tests:
i += 1
request = test[0]
expected_result = json.loads(test[1])
result_str = client.rpc_call_raw(request)
result_dict = json.loads(result_str)
self.assertEqual(result_dict, expected_result)
except AssertionError as e:
print("Test number %d failed: " % (i))
print(request)
raise e
finally:
server.stop()
parser = argparse.ArgumentParser(
description="ReflectRPC conformance test to run against a server program that listens on localhost:5500")
parser.add_argument("server_program", metavar='SERVER', type=str,
help="Server program to run the test against")
args = parser.parse_args()
server_program = args.server_program
# reset argv so unittest.main() does not try to interpret our arguments
sys.argv = [sys.argv[0]]
if __name__ == '__main__':
unittest.main()
|
import os
# Read version from VERSION file
__version__ = open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'VERSION')
).read().rstrip()
|
class Car(object):
def __ini__(self, name, model, car_doors, car_wheels, speed = 0):
if not name:
self.name = "General"
else:
self.name = name
if not model:
self.model = "Gm"
else:
self.model = model
if self.name == "Porshe" or self.name == "Koenigsegg":
self.car_doors = 2
else:
self.car_doors = 4
if self.model == "Trailer":
self.car_wheels = 8
else:
self.car_wheels = 4
self.speed = speed
def is_saloon(self):
if self.model == "saloon":
return True
else:
return False
def drive(self, moving_speed):
if self.model == "trailer":
self.speed = moving_speed * 11
else:
self.speed = 10 ** moving_speed
return self.speed
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .models import Requirement#, CreateRequirement
from django.forms.models import model_to_dict
# Create your views here.
class RequirementIndex(generic.ListView):
model = Requirement
template_name = 'requirements/index.html'
context_object_name = 'requirement_list'
paginate_by = 10
def get_queryset(self):
return Requirement.objects.all()
class RequirementDetail(generic.DetailView):
model = Requirement
template_name = 'requirements/detail.html'
# Add a dictionary containing the model information to the context when
# rendering the view.
#def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# requirement_object_dictionary = Requirement.objects.filter(id=context['requirement'].id).values()[0]
# context['requirement_object'] = requirement_object_dictionary
# return context
class RequirementUpdate(generic.UpdateView):
model = Requirement
template_name = 'requirements/edit.html'
fields = [
'description',
'parent',
'is_constraint',
'min_measure_of_effectiveness',
'target_measure_of_effectiveness',
'rationale',
'remarks',
'acceptance_criteria_type',
'priority',
'status'
]
class RequirementCreate(generic.CreateView):
model = Requirement
template_name = 'requirements/create.html'
fields = [
'description',
'parent',
'is_constraint',
'min_measure_of_effectiveness',
'target_measure_of_effectiveness',
'rationale',
'remarks',
'acceptance_criteria_type',
'priority',
'status'
]
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opencvFaceRec.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
'''
(c) University of Liverpool 2020
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=wrong-import-order
from rdkit import Chem
import scipy
from gae.tf import train_single
import numpy as np
import pandas as pd
def _load_data(filename):
'''Load data.'''
df = pd.read_csv(filename)
smiles = df['smiles'][0]
adj, features = _get_data(smiles)
return adj, features
def _get_data(smiles):
'''Get data from SMILES.'''
mol = Chem.MolFromSmiles(smiles)
adj = scipy.sparse.lil_matrix(
(mol.GetNumAtoms(), mol.GetNumAtoms()), dtype=int)
for bond in mol.GetBonds():
adj[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()] = 1
features = np.array([[atom.GetAtomicNum(),
atom.GetMass(),
atom.GetExplicitValence(),
atom.GetFormalCharge()]
for atom in mol.GetAtoms()])
return scipy.sparse.csr_matrix(adj), scipy.sparse.lil_matrix(features)
def main():
'''main method.'''
# Load data:
filename = 'data/spectra.csv'
adj, features = _load_data(filename)
# Train:
train_single.train(adj, features, epochs=10000)
if __name__ == '__main__':
main()
|
import math
import rospy
import tf2_ros as tf2
from geometry_msgs.msg import PointStamped
from bitbots_head_behavior.actions.look_at import AbstractLookAt
class SearchRecentBall(AbstractLookAt):
"""
This action looks at the last position the ball has been seen
and starts searching it from this position on.
"""
def __init__(self, dsd, blackboard, parameters=None):
super(SearchRecentBall, self).__init__(dsd, blackboard, parameters)
self._config = self.blackboard.config['search_recent_ball']
self._pan_speed = self._config['pan_speed']
self._tilt_speed = self._config['tilt_speed']
self._ball_time_out = rospy.Duration.from_sec(self._config['ball_search_time'])
self._offset_pattern = self._config['offset_pattern']
self._threshold = self.blackboard.config['position_reached_threshold']
# Get the coresponding motor goals for the ball position
self._recent_ball_motor_goals = self._get_head_goals_for_recent_ball()
self.first_perform = True
# Init pattern index
self.index = 0
def _get_head_goals_for_recent_ball(self):
"""
Returns the head motor goals to look at the most recent ball position.
:retruns tuple(head_pan, head_tilt): The head motor goals
"""
# Check if Ball has been seen
if not self.blackboard.world_model.ball_seen:
return
# Get last ball position
point = self.blackboard.world_model.get_ball_stamped()
# Transform the points reference frame to be the head
try:
point = self.blackboard.head_capsule.tf_buffer.transform(point, self.head_tf_frame, timeout=rospy.Duration(0.9))
except tf2.LookupException as e:
rospy.logwarn('The frame {} is not being published (LookupException)'.format(self.head_tf_frame))
return
except tf2.ConnectivityException as e:
rospy.logwarn('The transforms {} and {} are not connected in the TF Tree (ConnectivityException)'.format(point.header.frame_id, self.head_tf_frame))
return
except tf2.ExtrapolationException as e:
rospy.logwarn('The transform {} is currently not available (ExtrapolationException)'.format(self.head_tf_frame))
return
motor_goals = self.get_motor_goals_from_point(point.point)
return motor_goals
def perform(self, reevaluate=False):
"""
Call look_at to look at the point which our world-model determines to be the ball
:param reevaluate: No effect here
"""
# Exit action if pattern is finished
if self.index >= len(self._offset_pattern):
return self.pop()
# Check if a ball exists
if self._recent_ball_motor_goals is None:
rospy.loginfo("No ball seen. So we are not able to search for it.", logger_name="search_recent_ball")
return self.pop()
# Check if the ball is too old
if rospy.Time.now() - self.blackboard.world_model.ball_last_seen() > self._ball_time_out and self.first_perform:
rospy.loginfo("Ball is too old to search for it. Let's forget it.", logger_name="search_recent_ball")
return self.pop()
current_head_pan, current_head_tilt = self.blackboard.head_capsule.get_head_position()
# Add offset pattern to last ball position
head_motor_goal_pan = self._recent_ball_motor_goals[0] + math.radians(self._offset_pattern[self.index][0])
head_motor_goal_tilt = self._recent_ball_motor_goals[1] + math.radians(self._offset_pattern[self.index][1])
# Clip the motor goal. So if the goal command clips, the position still can be reached
head_motor_goal_pan, head_motor_goal_tilt = \
self.blackboard.head_capsule.pre_clip(head_motor_goal_pan, head_motor_goal_tilt)
self.blackboard.head_capsule.send_motor_goals(
head_motor_goal_pan,
head_motor_goal_tilt,
pan_speed=self._pan_speed,
tilt_speed=self._tilt_speed)
# Distance between the current and the goal position
distance = math.sqrt(
(current_head_pan - head_motor_goal_pan) ** 2 +
(current_head_tilt - head_motor_goal_tilt) ** 2)
# Increment index when position is reached
if distance < math.radians(self._threshold):
self.index += 1
self.first_perform = False
|
import tkinter as tk
from tkinter import ttk
import re
import os
import wikipedia
import time
import webbrowser
import json
import requests
import ctypes
import youtube_dl
import random
import urllib
import ssl
from bs4 import BeautifulSoup
from urllib.request import urlopen
import speech_recognition as sr
import requests
import pyttsx3
import sys
import threading
from datetime import datetime
import errno
import subprocess
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context=ssl._create_unverified_context
except 'AttributeError':
pass
else:
ssl._create_default_https_context=_create_unverified_https_context
headers = {'''user-agent':'Chrome/53.0.2785.143'''}
#speak=wicl.Dispatch("SAPI.SpVoice")
#reminder settings
reminder_mode = 0
reminder_dirloc = '/home/arib/'
reminder_filedir = reminder_dirloc+'.B.E.N.J.I.'
reminder_filename = reminder_filedir + '/reminders.txt'
reminder = str()
# Creating the graphical user interface
speak = pyttsx3.init()
def events(frame, put,link):
identity_keywords = ["who are you", "who r u", "what is your name"]
youtube_keywords = ["play ", "stream ", "queue "]
launch_keywords = ["open ", "launch "]
search_keywords = ["search ",]
wikipedia_keywords = ["wikipedia ", "wiki "]
download_music=["download","download music"]
reminder_keywords = ["set a reminder"]
calculator_keywords=["calculator","calc"]
youtube = ("play","stream","queue")
download = ("download","download music")
global reminder_mode
if reminder_mode or any(word in put for word in reminder_keywords) :
try :
if reminder_mode == 0 :
try :
os.makedirs(reminder_filedir)
os.chmod(reminder_dirloc, 0o777)
except OSError as e :
if e.errno != errno.EEXIST :
raise
speak.say("Reminder of what?")
speak.runAndWait()
reminder_mode = 1
elif reminder_mode == 1 :
subject = ' '.join(link)
global reminder
reminder = subject + '\t'
speak.say("When to remind you?")
speak.runAndWait()
reminder_mode = 2
elif reminder_mode == 2 :
reminder_mode = 0
date_as_string = ' '.join(link)
date = datetime.strptime(date_as_string, '%d %b %Y %I %M %p')
# global reminder
reminder = reminder + date_as_string
file_hand = open(reminder_filename, 'a')
file_hand.write(reminder)
file_hand.write('\n')
file_hand.close()
speak.say("Reminder Added")
speak.runAndWait()
except :
frame.displayText("Cannot set reminder")
#Play song on Youtube
elif put.startswith(youtube):
try:
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("playing "+say)
speak.runAndWait()
webbrowser.open('https://www.youtube.com'+hit)
except:
frame.displayText('Sorry Ethan. Looks like its not working!')
elif put.startswith(download):
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("downloading "+say)
speak.runAndWait()
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'quiet': True,
'restrictfilenames': True,
'outtmpl': os.environ['HOME']+'/Desktop/%(title)s.%(ext)s'
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
ydl.download(['https://www.youtube.com'+hit])
speak.say("download completed.Check your desktop for the song")
speak.runAndWait()
#Calculator
elif any(word in put for word in calculator_keywords):
try:
speak.say("Opening Calaculator")
subprocess.run("gnome-calculator",shell=True,check=True)
speak.runAndWait()
except:
frame.displayText('Care to try again?')
#BENJI Intro
elif any(word in put for word in identity_keywords):
try:
speak.say("I am BENJI, a digital assistant declassified for civilian use. Previously I was used by the Impossible Missions Force")
speak.runAndWait()
except:
frame.displayText('Error. Try reading the ReadMe to know about me!')
#Open a webpage
elif any(word in put for word in launch_keywords):
try:
link = '+'.join(link[1:])
speak.say("opening "+link)
speak.runAndWait()
webbrowser.open('http://www.'+ link)
except:
frame.displayText('Sorry Ethan,unable to access it. Cannot hack either-IMF protocol!')
#Google search
elif any(word in put for word in search_keywords):
try:
link='+'.join(link[1:])
say=link.replace('+',' ')
speak.say("searching google for "+say)
speak.runAndWait()
webbrowser.open('https://www.google.com/search?q='+link)
except:
print('Nope, this is not working.')
#Google Images
elif put.startswith("images of "):
try:
link='+'.join(link[2:])
say=link.replace('+',' ')
speak.say("searching images of " + say)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q=' + link + '&source=lnms&tbm=isch')
except:
print('Could not search for images!')
#Gmail
elif put.startswith("gmail"):
try:
speak.say("Opening Gmail!")
speak.runAndWait()
webbrowser.open('https://www.google.com/gmail')
except:
print("Could not open Gmail!")
#Google Cloud Print
elif put.startswith("google cloud print"):
try:
speak.say("Opening google cloud print!")
speak.runAndWait()
webbrowser.open('https://www.google.com/cloudprint')
except:
print("Could not open Google Cloud Print!")
#Google Others
elif put.startswith("google "):
try:
say = link[1]
speak.say("Opening google " + say)
speak.runAndWait()
webbrowser.open('https://'+ say +'.google.com')
except:
print("Could not open Google " + say.capitalize() + "!")
#Blogger
elif put.startswith("blogger"):
try:
speak.say("Opening blogger!")
speak.runAndWait()
webbrowser.open('https://www.blogger.com')
except:
print("Could not open Blogger!")
#Wikipedia
elif any(word in put for word in wikipedia_keywords):
try:
link = '+'.join(link[1:])
say = link.replace('+', ' ')
wikisearch = wikipedia.page(say)
speak.say("Opening wikipedia page for" + say)
speak.runAndWait()
webbrowser.open(wikisearch.url)
except:
frame.displayText('Wikipedia could not either find the article or your Third-world connection is unstable')
#Lock the device
elif put.startswith('secure'):
try:
speak.say("locking the device")
speak.runAndWait()
subprocess.run("xdg-screensaver lock",shell=True,check=True)
except :
frame.displayText('Cannot lock device')
#News of various press agencies
elif put.startswith('al jazeera '):
try:
aljazeeraurl = ('https://newsapi.org/v1/articles?source=al-jazeera-english&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(aljazeeraurl)
newsjson = newsresponce.json()
speak.say('Our agents from Al-Jazeera report this')
speak.runAndWait()
frame.displayText(' =====Al Jazeera===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('Qatari agents have refused to share this intel, Ethan')
elif put.startswith('bbc '):
try:
bbcurl = ('https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(bbcurl)
newsjson = newsresponce.json()
speak.say('Our agents from BBC report this')
speak.runAndWait()
frame.displayText(' =====BBC===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('MI6 is going crazy! Not allowing this!')
elif put.startswith('cricket '):
try:
cricketurl = ('https://newsapi.org/v1/articles?source=espn-cric-info&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(cricketurl)
newsjson = newsresponce.json()
speak.say('Our agents from ESPN Cricket report this')
speak.runAndWait()
frame.displayText(' =====CRICKET NEWS===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('Connection not secure')
elif put.startswith('hindus '):
try:
hindusurl = ('https://newsapi.org/v1/articles?source=the-hindu&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(hindusurl)
newsjson = newsresponce.json()
speak.say('Our agents from Hindu News report this')
speak.runAndWait()
frame.displayText(' =====HINDU NEWS===== \n')
i = 1
for item in newsjson['articles']:
frame.displayText(str(i) + '. ' + item['title'] + '\n')
frame.displayText(item['description'] + '\n')
i += 1
except:
frame.displayText('R&A W is blocking our reports, Ethan. Sorry! ')
# Finding files in pc
elif put.startswith('lookfor '):
try:
link1=put.split()
name=link1[1]
rex=regex.compile(name)
filepath=link1[2]
for root,dirs,files in os.walk(os.path.normpath(filepath)):
for f in files:
result = rex.search(f)
if result:
print (os.path.join(root, f))
except:
print("Error")
#A customized thread class for tracking reminders
class reminderThread(threading.Thread):
def __init__(self, frame):
threading.Thread.__init__(self)
self.event = threading.Event()
self.reminder_given_flag = False
self.frame = frame
def run(self):
while not self.event.is_set() :
upcoming_reminders = list()
self.removePastReminders()
try :
#reading the reminders from reminders.txt
file_hand = open(reminder_filename, 'r')
reminder_list = file_hand.readlines()
file_hand.close()
for line in reminder_list :
vals = line.split('\t')
date_time = datetime.strptime(vals[1].replace('\n',''), '%d %b %Y %I %M %p')
time_now = datetime.now()
#getting diff between time now and the reminder
time_diff = date_time - time_now
time_diff_hour = time_diff.days * 24 + time_diff.seconds // 3600
#if time diff less than 1 hour, add it to upcoming lists
if time_diff_hour < 1 :
upcoming_reminders.append(vals)
except :
pass
if not self.reminder_given_flag and len(upcoming_reminders) > 0 :
speak.say("You have " + str(len(upcoming_reminders))+" upcoming reminders")
speak.runAndWait()
for reminder in upcoming_reminders :
#wx.CallAfter(self.frame.displayText, reminder[0]+'\t\t'+reminder[1])
self.frame.displayText(reminder[0]+'\t\t'+reminder[1])
self.reminder_given_flag = True
time.sleep(1)
def removePastReminders(self):
try :
file_hand = open(reminder_filename, 'r')
reminder_list = file_hand.readlines()
file_hand.close()
new_list = list()
for reminder in reminder_list :
date_time = datetime.strptime(reminder.split('\t')[1].replace('\n',''), '%d %b %Y %I %M %p')
time_diff = date_time - datetime.now()
if time_diff.seconds >= 0 and time_diff.days >= 0 :
new_list.append(reminder)
file_hand = open(reminder_filename, 'w')
for line in new_list :
file_hand.write(line)
file_hand.close()
except FileNotFoundError :
pass
except :
self.frame.displayText("Error occured")
i=0
#A stdout class to redirect output to tkinter window
class StdRedirector(object):
def __init__(self, text_window):
self.text_window = text_window
def write(self, output):
self.text_window.insert(tk.END, output)
class MyFrame(tk.Frame):
def __init__(self,*args,**kwargs):
#new Thread to track reminders
global reminder_thread
reminder_thread = reminderThread(self)
tk.Frame.__init__(self,*args,**kwargs)
self.textBox = tk.Text(root,
height=1,width=30,
font=("Times", 16),
bg="#666", fg="#0f0",
spacing1=6, spacing3=6,
insertbackground="#0f0"
)
self.textBox.insert("1.0", "$>")
self.textBox.grid(row=1,column=1, padx=10, pady=10)
root.bind('<Return>', self.OnEnter)
root.bind('<Destroy>', self.onClose)
self.textBox.focus_set()
speak.say('''Hi Agent! BENJI at your service''')
speak.runAndWait()
self.photo1 = tk.PhotoImage(file="mic_icon.png")
self.btn = ttk.Button(root,command=self.OnClicked,
image=self.photo1, style="C.TButton")
self.btn.grid(row=1,column=2, padx=10, pady=20)
'''
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
self.output_window.withdraw()
'''
reminder_thread.start()
def OnEnter(self,event):
put=self.textBox.get("1.2","end-1c")
print(put)
self.textBox.delete('1.2',tk.END)
put=put.lower()
put = put.strip()
#put = re.sub(r'[?|$|.|!]', r'', put)
link=put.split()
events(self, put,link)
if put=='':
self.displayText('Reenter')
def OnClicked(self):
r = sr.Recognizer()
with sr.Microphone() as source:
speak.say('Hey I am Listening ')
speak.runAndWait()
audio = r.listen(source)
try:
put=r.recognize_google(audio)
self.displayText(put)
self.textBox.insert('1.2',put)
put=put.lower()
put = put.strip()
#put = re.sub(r'[?|$|.|!]', r'', put)
link=put.split()
events(self,put,link)
except sr.UnknownValueError:
self.displayText("Could not understand audio")
except sr.RequestError as e:
self.displayText("Could not request results; {0}".format(e))
def onClose(self, event):
global reminder_thread
reminder_thread.event.set()
#root.destroy()
def displayText(self, text):
try :
if not self.output_window.winfo_viewable() :
self.output_window.update()
self.output_window.deiconify()
except :
self.createOutputWindow()
print(text)
def createOutputWindow(self):
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
#Trigger the GUI. Light the fuse!
if __name__=="__main__":
root = tk.Tk()
view = MyFrame(root)
style = ttk.Style()
style.configure('C.TButton',
background='#555',
highlightthickness='0'
)
style.map("C.TButton",
background=[('pressed', '!disabled', '#333'), ('active', '#666')]
)
# root.geometry('{}x{}'.format(400, 100))
# view.pack(side="top",fill="both",expand=False)
root.iconphoto(True, tk.PhotoImage(file=os.path.join(sys.path[0],'benji_final.gif')))
root.title('B.E.N.J.I.')
root.configure(background="#444")
root.resizable(0,0)
root.mainloop()
|
import bpy
holeDepth=5
holeRadius=1.5
bpy.ops.mesh.primitive_cube_add()
gearHole = bpy.context.selected_objects[0]
gearHole.name="GearHole"
bpy.ops.transform.resize(value=(9, 14, holeDepth/2.0))
gearHole.location = (0,0,2)
bpy.ops.mesh.primitive_cube_add()
rightWheel = bpy.context.selected_objects[0]
rightWheel.name="RightWheel"
bpy.ops.transform.resize(value=(14, 4, holeDepth/2.0))
rightWheel.location = (0,49,2)
#bpy.ops.object.select_all(action='DESELECT')
#bpy.context.scene.objects.active = bpy.data.objects['GearHole']
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = gearHole
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles = bpy.context.selected_objects[0]
newHoles.name="NewHoles"
# reselect the first cylinder and delete it (the only one worked!!!)
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_pattern(pattern = 'GearHole')
bpy.ops.object.delete()
bpy.ops.object.select_pattern(pattern = 'RightWheel')
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cube_add()
leftWheel = bpy.context.selected_objects[0]
leftWheel.name="LeftWheel"
bpy.ops.transform.resize(value=(14, 4, holeDepth/2.0))
leftWheel.location = (0,-49,2)
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles1 = bpy.context.selected_objects[0]
newHoles1.name = "NewHoles1"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles1'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(4.5, 20.0, 2.0))
screw1 = bpy.context.selected_objects[0]
screw1.name="Screw1"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles1
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles2 = bpy.context.selected_objects[0]
newHoles2.name = "NewHoles2"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles2'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(4.5, 38, 2.0))
screw2 = bpy.context.selected_objects[0]
screw2.name="Screw2"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles2
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles3 = bpy.context.selected_objects[0]
newHoles3.name = "NewHoles3"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles3'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(4.5, -20.0, 2.0))
screw3 = bpy.context.selected_objects[0]
screw3.name="Screw3"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles3
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles4 = bpy.context.selected_objects[0]
newHoles4.name = "NewHoles4"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles4'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(4.5, -38, 2.0))
screw4 = bpy.context.selected_objects[0]
screw4.name="Screw4"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles4
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles5 = bpy.context.selected_objects[0]
newHoles5.name = "NewHoles5"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles5'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(-37.5, 0, 2.0))
screw5 = bpy.context.selected_objects[0]
screw5.name="Screw5"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles5
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles6 = bpy.context.selected_objects[0]
newHoles6.name = "NewHoles6"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles6'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(-12, 22.5, 2.0))
screw6 = bpy.context.selected_objects[0]
screw6.name="Screw6"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles6
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
newHoles7 = bpy.context.selected_objects[0]
newHoles7.name = "NewHoles7"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['NewHoles7'].select_set(state=False)
bpy.ops.object.delete()
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=holeRadius, depth=holeDepth, location=(-12, -22.5, 2.0))
screw7 = bpy.context.selected_objects[0]
screw7.name="Screw7"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = newHoles7
bpy.context.object.modifiers["Boolean"].operation = 'UNION'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
allTheHoles = bpy.context.selected_objects[0]
allTheHoles.name = "AllTheHoles"
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['AllTheHoles'].select_set(state=False)
bpy.ops.object.delete()
#bpy.data.objects['AllTheHoles'].select_set(state=False)
bpy.data.objects["AllTheHoles"].location.x += 12.0
bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=62, depth=2.0, location=(0,0, 1.0))
basePlate = bpy.context.selected_objects[0]
basePlate.name="BasePlate"
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].object = allTheHoles
bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['BasePlate'].select_set(state=False)
bpy.ops.object.delete()
#bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=59, depth=5.0, location=(0,0, 1.25))
#bigHole = bpy.context.selected_objects[0]
#bigHole.name="BigHole"
#
# somewhere in here build out the rest of the next layer.
#
#bpy.ops.mesh.primitive_cylinder_add(vertices=64, radius=61, depth=3.0, location=(0,0, 1.5))
#nextLayer = bpy.context.selected_objects[0]
#nextLayer.name="NextLayer"
#bpy.ops.object.modifier_add(type='BOOLEAN')
#bpy.context.object.modifiers["Boolean"].object = bigHole
#bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
#bpy.ops.object.select_all(action='DESELECT')
#bpy.ops.object.select_pattern(pattern = 'BigHole')
#bpy.ops.object.delete()
#bpy.data.objects["NextLayer"].location.z += 2.0
#bpy.ops.object.modifier_add(type='BOOLEAN')
#bpy.context.object.modifiers["Boolean"].object = basePlate
#bpy.context.object.modifiers["Boolean"].operation = 'UNION'
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
#bpy.ops.object.select_all(action='DESELECT')
#bpy.ops.object.select_pattern(pattern = 'BasePlate')
#bpy.ops.object.delete()
#newHoles8 = bpy.context.selected_objects[0]
#newHoles8.name = ""
##bpy.ops.object.select_all(action='DESELECT')
#bpy.context.scene.objects.active = bpy.data.objects['Cylinder']
# bpy.ops.object.select_pattern(pattern = 'tube')
#motormount mount hole 11,22 11,-22, 35,0
|
"""
This test module has tests relating to t-plots
All functions in /calculations/tplot.py are tested here.
The purposes are:
- testing the user-facing API function (tplot)
- testing individual low level functions against known results.
Functions are tested against pre-calculated values on real isotherms.
All pre-calculated data for characterisation can be found in the
/.conftest file together with the other isotherm parameters.
"""
import pytest
from matplotlib.testing.decorators import cleanup
from numpy import isclose
import pygaps.characterisation.t_plots as pt
import pygaps.parsing as pgp
import pygaps.utilities.exceptions as pgEx
from .conftest import DATA
from .conftest import DATA_N77_PATH
@pytest.mark.characterisation
class TestTPlot():
"""Tests t-plot calculations."""
def test_t_plot_checks(self, use_adsorbate, basic_pointisotherm):
"""Checks for built-in safeguards."""
# Will raise a "no suitable model exception"
with pytest.raises(pgEx.ParameterError):
pt.t_plot(basic_pointisotherm, thickness_model='random')
@pytest.mark.parametrize('sample', [sample for sample in DATA])
def test_t_plot(self, sample):
"""Test calculation with several model isotherms."""
sample = DATA[sample]
# exclude datasets where it is not applicable
if sample.get('t_area', None):
filepath = DATA_N77_PATH / sample['file']
isotherm = pgp.isotherm_from_json(filepath)
res = pt.t_plot(isotherm)
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'], err_relative,
err_absolute_area
)
assert isclose(
results[0].get('area'), sample['t_area'], err_relative, err_absolute_volume
)
def test_t_plot_choice(self):
"""Test choice of points."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pgp.isotherm_from_json(filepath)
res = pt.t_plot(isotherm, t_limits=[0.7, 1.0])
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'], err_relative,
err_absolute_area
)
assert isclose(
results[-1].get('area'), sample['s_t_area'], err_relative, err_absolute_volume
)
@cleanup
def test_t_plot_output(self):
"""Test verbosity."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pgp.isotherm_from_json(filepath)
pt.t_plot(isotherm, 'Halsey', verbose=True)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Collection
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class VaswaniInverseSquareRootLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=4000,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
encoder_embed_dim: float = field(
default=512,
metadata={
"help": "initial learning rate during warmup phase; default is cfg.lr"
},
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("vaswani_inverse_sqrt", dataclass=VaswaniInverseSquareRootLRScheduleConfig)
class VaswaniInverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = cfg.lr * sqrt(cfg.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, cfg: VaswaniInverseSquareRootLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with inverse_sqrt."
" Consider --lr-scheduler=fixed instead."
)
self.warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
# then, decay prop. to the inverse square root of the update number
num_updates = 0
self.warmup_updates = cfg.warmup_updates
self.multiplier = 10 * (cfg.encoder_embed_dim ** -0.5)
self.decay_factor = self.multiplier * min((num_updates + 1) * (self.warmup_updates ** -1.5), (num_updates + 1) ** -0.5)
self.lr = self.decay_factor * self.warmup_end_lr
print("Initial learning rate: {}".format(self.lr))
# initial learning rate
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
self.decay_factor = self.multiplier * min((num_updates + 1) * (self.warmup_updates ** -1.5), (num_updates + 1) ** -0.5)
self.lr = self.decay_factor * self.warmup_end_lr
self.optimizer.set_lr(self.lr)
return self.lr
|
#!/usr/bin/python3
__version__ = '0.0.1' # Time-stamp: <2021-09-25T07:39:16Z>
## Language: Japanese/UTF-8
"""Simulation Buddhism Prototype No.3 x.1 - Death
死亡関連
"""
##
## Author:
##
## JRF ( http://jrf.cocolog-nifty.com/statuses/ (in Japanese))
##
## License:
##
## The author is a Japanese.
##
## I intended this program to be public-domain, but you can treat
## this program under the (new) BSD-License or under the Artistic
## License, if it is convenient for you.
##
## Within three months after the release of this program, I
## especially admit responsibility of efforts for rational requests
## of correction to this program.
##
## I often have bouts of schizophrenia, but I believe that my
## intention is legitimately fulfilled.
##
import math
import random
import simbdp3x1.base as base
from simbdp3x1.base import ARGS, Person0, Economy0
from simbdp3x1.common import Death, Tomb, np_clip
from simbdp3x1.inherit import calc_inheritance_share
class PersonDT (Person0):
def is_dead (self):
return self.death is not None
def die_relation (self, relation):
p = self
rel = relation
economy = self.economy
if p.age > 60:
p.a60_spouse_death = True
rel.end = economy.term
if rel.spouse != '' and economy.is_living(rel.spouse):
s = economy.people[rel.spouse]
if s.marriage is not None and s.marriage.spouse == p.id:
s.marriage.end = economy.term
s.trash.append(s.marriage)
s.marriage = None
for a in s.adulteries:
if a.spouse == p.id:
a.end = economy.term
s.trash.append(a)
s.adulteries.remove(a)
def die_child (self, child_id):
p = self
economy = self.economy
ch = None
for x in p.children:
if x.id == child_id:
ch = x
if ch is None:
return
ch.death_term = economy.term
p.children.remove(ch)
p.trash.append(ch)
def die_supporting (self, new_supporter):
p = self
economy = self.economy
ns = None
if new_supporter is not None \
and new_supporter != '':
assert economy.is_living(new_supporter)
ns = economy.people[new_supporter]
assert new_supporter is None or new_supporter == ''\
or (ns is not None and ns.supported is None)
if new_supporter is None or new_supporter == '':
for x in [x for x in p.supporting]:
if x != '' and x in economy.people:
s = economy.people[x]
assert s.supported == p.id
if new_supporter is None:
s.remove_supported()
else:
s.supported = ''
else:
ns.add_supporting(p.supporting_non_nil())
p.supporting = []
def do_inheritance (self):
p = self
economy = self.economy
assert p.is_dead()
q = p.death.inheritance_share
a = p.prop + p.land * ARGS.prop_value_of_land
if q is None or a <= 0:
economy.cur_forfeit_prop += p.prop
economy.cur_forfeit_land += p.land
p.prop = 0
p.land = 0
return
land = p.land
prop = p.prop
for x, y in sorted(q.items(), key=lambda x: x[1], reverse=True):
a1 = a * y
l = math.floor(a1 / ARGS.prop_value_of_land)
if l > land:
l = land
land = 0
else:
land -= l
if x == '':
economy.cur_forfeit_land += l
economy.cur_forfeit_prop += a1 - l * ARGS.prop_value_of_land
prop -= a1 - l * ARGS.prop_value_of_land
else:
assert economy.is_living(x)
p1 = economy.people[x]
if l > 0:
p1.tmp_land_damage = \
(p1.tmp_land_damage * p1.land
+ p.tmp_land_damage * l) / (p1.land + l)
p1.land += l
p1.prop += a1 - l * ARGS.prop_value_of_land
prop -= a1 - l * ARGS.prop_value_of_land
p.land = 0
p.prop = 0
class EconomyDT (Economy0):
def is_living (self, id_or_person):
s = id_or_person
if type(id_or_person) is not str:
s = id_or_person.id
return s in self.people and self.people[s].death is None
def get_person (self, id1):
economy = self
if id1 in economy.people:
return economy.people[id1]
elif id1 in economy.tombs:
return economy.tombs[id1].person
return None
def die (self, persons):
economy = self
if isinstance(persons, base.Person):
persons = [persons]
for p in persons:
assert not p.is_dead()
dt = Death()
dt.term = economy.term
p.death = dt
tomb = Tomb()
tomb.death_term = economy.term
tomb.person = p
tomb.death_hating = p.hating.copy()
tomb.death_hating_unknown = p.hating_unknown
tomb.death_political_hating = p.political_hating
tomb.death_merchant_hating = p.merchant_hating
tomb.death_merchant_hated = p.merchant_hated
economy.tombs[p.id] = tomb
prs = [[] for dist in economy.nation.districts]
for p in economy.people.values():
if not p.is_dead() and p.in_priesthood():
prs[p.district].append(p.id)
for p in persons:
tomb = economy.tombs[p.id]
if prs[p.district]:
tomb.priest = random.choice(prs[p.district])
a = (p.prop + p.land * ARGS.prop_value_of_land) \
* ARGS.priest_share
if a > 0:
p.prop -= a
economy.nation.districts[p.district].priests_share += a
for p in persons:
if p.in_jail():
p.release_from_jail()
for p in persons:
if p.dominator_position is None:
continue
p.get_dominator().resign()
for p in persons:
if p.id in economy.dominator_parameters:
economy.dominator_parameters[p.id].economy = None
del economy.dominator_parameters[p.id]
for p in persons:
p.death.inheritance_share = calc_inheritance_share(economy, p.id)
for p in persons:
spouse = None
if p.marriage is not None \
and (p.marriage.spouse == ''
or economy.is_living(p.marriage.spouse)):
spouse = p.marriage.spouse
if p.marriage is not None:
p.die_relation(p.marriage)
for a in p.adulteries:
p.die_relation(a)
# father mother は死んでも情報の更新はないが、child は欲し
# い子供の数に影響するため、更新が必要。
if p.father != '' and economy.is_living(p.father):
economy.people[p.father].die_child(p.id)
if p.mother != '' and economy.is_living(p.mother):
economy.people[p.mother].die_child(p.id)
fst_heir = None
if p.death.inheritance_share is not None:
l1 = [(x, y) for x, y
in p.death.inheritance_share.items()
if x != '' and economy.is_living(x)
and x != spouse
and (economy.people[x].supported is None or
economy.people[x].supported == p.id)
and economy.people[x].age >= 18]
if l1:
u = max(l1, key=lambda x: x[1])[1]
l2 = [x for x, y in l1 if y == u]
fst_heir = max(l2, key=lambda x:
economy.people[x].asset_value())
if (fst_heir is None
or fst_heir not in [ch.id for ch in p.children]) \
and spouse is not None and spouse in p.supporting:
if spouse == '':
fst_heir = ''
p.remove_supporting_nil()
else:
s = economy.people[spouse]
if s.age >= 18 and s.age < 70:
fst_heir = spouse
s.remove_supported()
if fst_heir is not None and fst_heir != '' \
and fst_heir in p.supporting:
fh = economy.people[fst_heir]
fh.remove_supported()
if p.supporting:
if p.supported is not None \
and economy.is_living(p.supported):
p.die_supporting(p.supported)
elif fst_heir is None or p.death.inheritance_share is None:
p.die_supporting(None)
else:
p.die_supporting(fst_heir)
if p.supported is not None:
p.remove_supported()
if fst_heir is not None and fst_heir != '':
fh = economy.people[fst_heir]
fh.add_supporting(p)
for p in persons:
p.do_inheritance()
def update_death (economy):
print("\nDeath:...", flush=True)
l = []
for p in economy.people.values():
if not p.is_dead():
if random.random() < ARGS.general_death_rate:
l.append(p)
else:
threshold = 0
if p.age > 110:
threshold = 1
elif p.age > 80 and p.age <= 100:
threshold = ARGS.a80_death_rate
elif p.age > 60 and p.age <= 80:
threshold = ARGS.a60_death_rate
elif p.age >= 0 and p.age <= 3:
threshold = ARGS.infant_death_rate
ij = np_clip(p.injured + p.tmp_injured, 0, 1)
threshold2 = ARGS.injured_death_rate * ij
if random.random() < max([threshold, threshold2]):
l.append(p)
economy.die(l)
|
import random
import pandas as pd
import numpy as np
df1 = pd.read_csv('train.csv')
df2 = pd.read_csv('train.csv')
df3 = pd.read_csv('train.csv')
df4 = pd.read_csv('train.csv')
df5 = pd.read_csv('train.csv')
for i in range(0,1000000):
for k in range (1,5):
x = 0
# Create Pre-Flop round
if k == 1:
df1.loc[i * 4 + k-1] = -1
df2.loc[i * 4 + k-1] = -1
df3.loc[i * 4 + k - 1] = -1
df4.loc[i * 4 + k - 1] = -1
df5.loc[i * 4 + k - 1] = -1
# Generate first card for P1
df1.iloc[i * 4 + k-1, 0] = random.randrange(1,5)
df1.iloc[i * 4 + k-1, 1] = random.randrange(2,15)
while x == 0:
print "Step 1"
# Generate 2nd card for P1
df1.iloc[i * 4 + k-1, 2] = random.randrange(1,5)
df1.iloc[i * 4 + k-1, 3] = random.randrange(2,15)
#Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 2] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 3] == df1.iloc[i * 4 + k-1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 2"
# Generate 2nd card for P2
df2.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df2.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df2.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df2.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df2.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 3] == \
df1.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 3"
# Generate 2nd card for P3
df3.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df3.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df3.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df3.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df3.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 4"
# Generate 2nd card for P4
df4.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df4.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df4.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df4.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df4.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 2])\
or (df4.iloc[i * 4 + k - 1, 2] == df3.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df3.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 5"
# Generate 2nd card for P5
df5.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df5.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df5.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df5.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df3.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df3.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df4.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df4.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 6"
# Generate 1st card for P2
df2.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df2.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df2.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df2.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df2.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 7"
# Generate 1st card for P3
df3.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df3.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df3.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df3.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df3.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 8"
# Generate 1st card for P4
df4.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df4.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df4.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df4.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df4.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1])\
or (df4.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 9"
# Generate 1st card for P5
df5.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df5.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df5.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df5.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1])\
or (df5.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 1]) \
or (df5.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
list = []
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
#Pre-flop Hand evaluation
#Evaluate each player's hand for a pair
if df1.iloc[i * 4 + k - 1, 1] == df1.iloc[i * 4 + k - 1, 3]:
list.append(df1.iloc[i * 4 + k - 1, 1])
if df2.iloc[i * 4 + k - 1, 1] == df2.iloc[i * 4 + k - 1, 3]:
list.append(df2.iloc[i * 4 + k - 1, 1])
if df3.iloc[i * 4 + k - 1, 1] == df3.iloc[i * 4 + k - 1, 3]:
list.append(df3.iloc[i * 4 + k - 1, 1])
if df4.iloc[i * 4 + k - 1, 1] == df4.iloc[i * 4 + k - 1, 3]:
list.append(df4.iloc[i * 4 + k - 1, 1])
if df5.iloc[i * 4 + k - 1, 1] == df5.iloc[i * 4 + k - 1, 3]:
list.append(df5.iloc[i * 4 + k - 1, 1])
#Check if more than one player have a pair
if (len(list) > 1):
winner = max(list)
if df1.iloc[i * 4 + k - 1, 1] == winner and df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner and df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner and df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner and df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner and df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check if only one player has a pair
elif (len(list) == 1):
winner = max(list)
if df1.iloc[i * 4 + k - 1, 1] == winner and df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
elif df2.iloc[i * 4 + k - 1, 1] == winner and df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
elif df3.iloc[i * 4 + k - 1, 1] == winner and df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
elif df4.iloc[i * 4 + k - 1, 1] == winner and df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
elif df5.iloc[i * 4 + k - 1, 1] == winner and df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
#Evaluate for the high card
else:
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],)
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
# Create Flop Round
if k == 2:
df1.loc[i * 4 + k-1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 10"
#Generate 1st community card
df1.iloc[i * 4 + k-1, 4] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 5] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df2.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df3.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df3.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df4.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df4.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df5.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df5.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 4] == df1.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df1.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k-1, 4] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 5] == df1.iloc[i * 4 + k-1, 1])\
or (df1.iloc[i * 4 + k-1, 4] == df2.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df2.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df3.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df3.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df4.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df4.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df5.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df5.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k - 1, 4] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 11"
# Generate 2nd community card
df1.iloc[i * 4 + k-1, 6] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 7] = random.randrange(2, 15)
df2.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df2.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df3.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df3.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df4.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df4.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df5.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df5.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 4] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 5]) \
or (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 1])\
or (df1.iloc[i * 4 + k-1, 6] == df2.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df2.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df3.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df3.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df4.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df4.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df5.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df5.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k - 1, 6] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 12"
#Generate 3rd community card
df1.iloc[i * 4 + k-1, 8] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 9] = random.randrange(2, 15)
df2.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df2.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df3.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df3.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df4.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df4.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df5.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df5.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[i * 4 + k - 1, 9] == df1.iloc[
i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 8] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 8] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
df2.iloc[i * 4 + k - 1, 4] = df3.iloc[i * 4 + k - 1, 4] = df4.iloc[i * 4 + k - 1, 4] = df5.iloc[
i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df2.iloc[i * 4 + k - 1, 5] = df3.iloc[i * 4 + k - 1, 5] = df4.iloc[i * 4 + k - 1, 5] = df5.iloc[
i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df2.iloc[i * 4 + k - 1, 6] = df3.iloc[i * 4 + k - 1, 6] = df4.iloc[i * 4 + k - 1, 6] = df5.iloc[
i * 4 + k - 1, 6] = df1.iloc[i * 4 + k - 1, 6]
df2.iloc[i * 4 + k - 1, 7] = df3.iloc[i * 4 + k - 1, 7] = df4.iloc[i * 4 + k - 1, 7] = df5.iloc[
i * 4 + k - 1, 7] = df1.iloc[i * 4 + k - 1, 7]
df2.iloc[i * 4 + k - 1, 8] = df3.iloc[i * 4 + k - 1, 8] = df4.iloc[i * 4 + k - 1, 8] = df5.iloc[
i * 4 + k - 1, 8] = df1.iloc[i * 4 + k - 1, 8]
df2.iloc[i * 4 + k - 1, 9] = df3.iloc[i * 4 + k - 1, 9] = df4.iloc[i * 4 + k - 1, 9] = df5.iloc[
i * 4 + k - 1, 9] = df1.iloc[i * 4 + k - 1, 9]
#Flop hand evaluation
x = 0
list = [-1,-1,-1,-1,-1]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
#Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#P1 Evaluation
#With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
for m in range (0,5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0]+1 == list[1] and list[1]+1 == list[2] and list[2]+1 == list[3] and list[3]+1 == list[4]:
a1 = max(list[0],list[1],list[2],list[3],list[4])
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a1 = 0
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0]+1 == list[1] and list[1]+1 == list[2] and list[2]+1 == list[3] and list[3]+1 == list[4]:
a1 = max(list[0],list[1],list[2],list[3],list[4])
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a1 = 0
#P2 Evaluation
#With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a2 = 0
#With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a2 = 0
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a3 = 0
#With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a3 = 0
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a4 = 0
#With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a4 = 0
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a5 = 0
#With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a5 = 0
#Check for Straight flush
if (SF > 0):
print "Straight Flush"
b = max(a1,a2,a3,a4,a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a1 = list[0]
#Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a2 = list[0]
#Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a3 = list[0]
#Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a4 = list[0]
#Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a5 = list[0]
#Checking for Four of a kind
if(FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a1i = list[0]
a1ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a1i = list[0]
a1ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a1i = list[0]
a1ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a1i = list[0]
a1ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a1i = list[0]
a1ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a1i = list[3]
a1ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a1i = list[4]
a1ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a1i = list[0]
a1ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a1i = list[3]
a1ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a1i = list[3]
a1ii = list[1]
FH = FH + 1
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a2i = list[0]
a2ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a2i = list[0]
a2ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a2i = list[0]
a2ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a2i = list[0]
a2ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a2i = list[0]
a2ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a2i = list[3]
a2ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a2i = list[4]
a2ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a2i = list[0]
a2ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a2i = list[3]
a2ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a2i = list[3]
a2ii = list[1]
FH = FH + 1
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a3i = list[0]
a3ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a3i = list[0]
a3ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a3i = list[0]
a3ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a3i = list[0]
a3ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a3i = list[0]
a3ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a3i = list[3]
a3ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a3i = list[4]
a3ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a3i = list[0]
a3ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a3i = list[3]
a3ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a3i = list[3]
a3ii = list[1]
FH = FH + 1
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a4i = list[0]
a4ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a4i = list[0]
a4ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a4i = list[0]
a4ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a4i = list[0]
a4ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a4i = list[0]
a4ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a4i = list[3]
a4ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a4i = list[4]
a4ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a4i = list[0]
a4ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a4i = list[3]
a4ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a4i = list[3]
a4ii = list[1]
FH = FH + 1
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a5i = list[0]
a5ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a5i = list[0]
a5ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a5i = list[0]
a5ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a5i = list[0]
a5ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a5i = list[0]
a5ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a5i = list[3]
a5ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a5i = list[4]
a5ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a5i = list[0]
a5ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a5i = list[3]
a5ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a5i = list[3]
a5ii = list[1]
FH = FH + 1
#Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
elif a2i == b:
c = c + 1
elif a3i == b:
c = c + 1
elif a4i == b:
c = c + 1
elif a5i == b:
c = c + 1
if c > 1:
print "Full House"
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i,a2i,a3i,a4i,a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == \
list[4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[
3] == list[4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and \
list[3] == list[4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a1i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a1i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a1i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a1i = list[3]
FH = FH + 1
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a2i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a2i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a2i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a2i = list[3]
FH = FH + 1
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a3i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a3i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a3i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a3i = list[3]
FH = FH + 1
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a4i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a4i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a4i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a4i = list[3]
FH = FH + 1
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a5i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1]:
a5i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a5i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a5i = list[3]
FH = FH + 1
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
if a1i == a2i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a3i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a4i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a5i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a3i and a2i != 0:
b = max (df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],)
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a4i and a2i != 0:
b = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], )
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a5i and a2i != 0:
b = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a3i == a4i and a3i != 0:
b = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], )
if b == df3.iloc[i * 4 + k - 1, 1] or b == df3.iloc[i * 4 + k - 1, 3]:
df3.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a3i == a5i and a3i != 0:
b = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df3.iloc[i * 4 + k - 1, 1] or b == df3.iloc[i * 4 + k - 1, 3]:
df3.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a4i == a5i and a4i != 0:
b = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df4.iloc[i * 4 + k - 1, 1] or b == df4.iloc[i * 4 + k - 1, 3]:
df4.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
# Create Turn Round
if k == 3:
df1.loc[i * 4 + k - 1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 13"
#Generate 4th community card or the turn
df1.iloc[i * 4 + k - 1][10] = random.randrange(1, 5)
df1.iloc[i * 4 + k - 1][11] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df2.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df3.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df3.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df4.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df4.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df5.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df5.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 8] and df1.iloc[i * 4 + k - 1, 11] ==
df1.iloc[i * 4 + k - 1, 9]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 10] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 10] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
#Evaluate turn round
list = [[-1,-1],
[-1, -1],
[-1, -1],
[-1,-1],
[-1, -1],
[-1, -1]]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
# Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
#With Ace Low
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P2 Evaluation
#With Ace Low
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P3 Evaluation
#With Ace Low
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P4 Evaluation
#With Ace Low
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P5 Evaluation
#With Ace Low
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# Check for Straight flush
if (SF > 0):
print "Straight Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a1 = m
break
if count == 4:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a2 = m
break
if count == 4:
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a3 = m
break
if count == 4:
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a4 = m
break
if count == 4:
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a5 = m
break
if count == 4:
break
# Checking for Four of a kind
if (FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a1i = m
a1ii = n
break
if count == 2:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a2i = m
a2ii = n
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a3i = m
a3ii = n
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a4i = m
a4ii = n
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a5i = m
a5ii = n
break
# Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
if a2i == b:
c = c + 1
if a3i == b:
c = c + 1
if a4i == b:
c = c + 1
if a5i == b:
c = c + 1
if c > 1:
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
list[5] = df1.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 11],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 11],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[5]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 11])
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
list[5] = df2.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 11],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 11],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 11])
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
list[5] = df3.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 11],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 11],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 11])
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
list[5] = df4.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 11],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 11],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 11])
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
list[5] = df5.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 11],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 11],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 11])
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in range(0, 6):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[
5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a1i = m
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a2i = m
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a3i = m
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a4i = m
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a5i = m
break
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4] or list[0] == list[5]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4] or list[0] == list[5]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4] or list[1] == list[5]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4] or list[0] == list[5]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4] or list[1] == list[5]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3] )
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
if k == 4:
#Create the River
df1.loc[i * 4 + k-1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 14"
# Generate 5th community card or the river
df1.iloc[i * 4 + k-1][12] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1][13] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df2.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df3.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df3.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df4.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df4.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df5.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df5.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 10] and df1.iloc[i * 4 + k - 1, 13] ==
df1.iloc[i * 4 + k - 1, 11]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 8] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 9]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 12] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 12] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
#Evaluate river round
list = [[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1]]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
# Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
#With Ace Low
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list[6] = [df1.iloc[i * 4 + k - 1, 13], df1.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][0] and list[5][0]+1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and list[5][1] == list[6][1]:
a1 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list[6] = [df1.iloc[i * 4 + k - 1, 13], df1.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][0] and list[5][0]+1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and list[5][1] == list[6][1]:
a1 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list[6] = [df2.iloc[i * 4 + k - 1, 13], df2.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a2 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list[6] = [df2.iloc[i * 4 + k - 1, 13], df2.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a2 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list[6] = [df3.iloc[i * 4 + k - 1, 13], df3.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a3 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list[6] = [df3.iloc[i * 4 + k - 1, 13], df3.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a3 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list[6] = [df4.iloc[i * 4 + k - 1, 13], df4.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a4 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list[6] = [df4.iloc[i * 4 + k - 1, 13], df4.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a4 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a5 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a5 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# Straight Flush Evaluation in community cards
master = 0
# With Ace Low
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
master = 1
# With Ace High
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
master = 1
# Check for Straight flush
if master == 1:
print "Royal Flush in community cards"
elif (SF > 0):
print "Straight Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a1 = m
break
if count == 4:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a2 = m
break
if count == 4:
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a3 = m
break
if count == 4:
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a4 = m
break
if count == 4:
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a5 = m
break
if count == 4:
break
# Checking for Four of a kind
if (FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
next = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a1i = m
a1ii = n
break
if count == 2:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a2i = m
a2ii = n
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a3i = m
a3ii = n
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a4i = m
a4ii = n
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a5i = m
a5ii = n
break
# Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
elif a2i == b:
c = c + 1
elif a3i == b:
c = c + 1
elif a4i == b:
c = c + 1
elif a5i == b:
c = c + 1
if c > 1:
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
list[5] = df1.iloc[i * 4 + k - 1, 10]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a1 = m
break
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
list[5] = df2.iloc[i * 4 + k - 1, 10]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a2 = m
break
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
list[5] = df3.iloc[i * 4 + k - 1, 10]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a3 = m
break
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
list[5] = df4.iloc[i * 4 + k - 1, 10]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a4 = m
break
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
list[5] = df5.iloc[i * 4 + k - 1, 10]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a5 = m
break
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in range(0, 7):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == list[6]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[
5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a1i = m
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a2i = m
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a3i = m
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a4i = m
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a5i = m
break
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4] or list[1] == list[5] or list[1] == list[6]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4] or list[1] == list[5] or list[1] == list[6]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3])
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
df1.to_csv('P1.csv', index= False)
df2.to_csv('P2.csv', index= False)
df3.to_csv('P3.csv', index= False)
df4.to_csv('P4.csv', index= False)
df5.to_csv('P5.csv', index= False)
|
import frappe
from frappe import _
from chat.utils import validate_token, get_admin_name, get_chat_settings, get_user_settings
import json
@frappe.whitelist(allow_guest=True)
def settings(token):
"""Fetch and return the settings for a chat session
Args:
token (str): Guest token.
"""
config = {
'socketio_port': frappe.conf.socketio_port,
'user_email': frappe.session.user,
'is_admin': True if 'user_type' in frappe.session.data else False,
'guest_title': ''.join(frappe.get_hooks('guest_title')),
}
config = {**config, **get_chat_settings()}
if config['is_admin']:
config['user'] = get_admin_name(config['user_email'])
config['user_settings'] = get_user_settings()
else:
config['user'] = 'Guest'
token_verify = validate_token(token)
if token_verify[0] is True:
config['room'] = token_verify[1]['room']
config['user_email'] = token_verify[1]['email']
config['is_verified'] = True
else:
config['is_verified'] = False
return config
@frappe.whitelist()
def user_settings(settings):
settings = json.loads(settings)
if not frappe.db.exists('Chat User Settings', frappe.session.user):
settings_doc = frappe.get_doc({
'doctype': 'Chat User Settings',
'user': frappe.session.user,
'enable_notifications': settings['enable_notifications'],
'enable_message_tone': settings['enable_message_tone'],
}).insert()
else:
settings_doc = frappe.get_doc(
'Chat User Settings', frappe.session.user)
settings_doc.enable_notifications = settings['enable_notifications']
settings_doc.enable_message_tone = settings['enable_message_tone']
settings_doc.save()
|
import sys
from taggedtree.repl import dispatch_subcommand
from os.path import expanduser
def main():
fname = expanduser("~/.tt.json")
cmds = tuple(sys.argv[1:])
dispatch_subcommand(fname, cmds)
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('manual')
class ManualTask(object):
"""Only execute task when specified with --tasks"""
schema = {'type': 'boolean'}
@plugin.priority(255)
def on_task_start(self, task, config):
# Make sure we need to run
if not config:
return
# If --task hasn't been specified disable this plugin
if not task.options.tasks or task.name not in task.options.tasks:
log.debug('Disabling task %s' % task.name)
task.abort('manual task not specified in --tasks', silent=True)
@event('plugin.register')
def register_plugin():
plugin.register(ManualTask, 'manual', api_ver=2)
|
"""
McsPy
~~~~~
McsPy is a Python module/package to read, handle and operate on HDF5-based raw data
files converted from recordings of devices of the Multi Channel Systems MCS GmbH.
:copyright: (c) 2020 by Multi Channel Systems MCS GmbH
:license: see LICENSE for more details
"""
#print("McsPy init!")
version = "0.4.1"
#__all__ = ["CMOSData", "CMOSConvProxy", "RawData", "Recording", "Stream", "AnalogStream",
# "Info", "InfoSampledData", "ChannelInfo", "FrameStream", "FrameEntity", "Frame",
# "FrameEntityInfo", "EventStream", "EventEntity", "EventEntityInfo", "SegmentStream",
# "SegmentEntity", "AverageSegmentTuple", "AverageSegmentEntity", "SegmentEntityInfo",
# "TimeStampStream", "TimeStampEntity", "TimeStampEntityInfo"]
# Supported MCS-HDF5 protocol types and versions:
class McsHdf5Protocols:
"""
Class of supported MCS-HDF5 protocol types and version ranges
Entry: (Protocol Type Name => Tuple of supported version range from (including) the first version entry up to (including) the second version entry)
"""
SUPPORTED_PROTOCOLS = {"RawData" : (1, 3), # from first to second version number and including this versions
"CMOS_MEA" : (1, 1), #from first to first version
"InfoChannel" : (1, 1), # Info-Object Versions
"FrameEntityInfo" : (1, 1),
"EventEntityInfo" : (1, 1),
"SegmentEntityInfo" : (1, 4),
"TimeStampEntityInfo" : (1, 1),
"AnalogStreamInfoVersion" : (1, 1), # StreamInfo-Object Versions
"FrameStreamInfoVersion" : (1, 1),
"EventStreamInfoVersion" : (1, 1),
"SegmentStreamInfoVersion" : (1, 1),
"TimeStampStreamInfoVersion" : (1, 1)}
@classmethod
def check_protocol_type_version(self, protocol_type_name, version):
"""
Check if the given version of a protocol is supported by the implementation
:param protocol_type_name: name of the protocol that is tested
:param version: version number that should be checked
:returns: is true if the given protocol and version is supported
"""
if protocol_type_name in McsHdf5Protocols.SUPPORTED_PROTOCOLS:
supported_versions = McsHdf5Protocols.SUPPORTED_PROTOCOLS[protocol_type_name]
if (version < supported_versions[0]) or (supported_versions[1] < version):
raise IOError('Given HDF5 file contains \'%s\' type of version %s and supported are only all versions from %s up to %s' %
(protocol_type_name, version, supported_versions[0], supported_versions[1]))
else:
raise IOError("The given HDF5 contains a type \'%s\' that is unknown in this implementation!" % protocol_type_name)
return True
# Supported MCS-HDF5 file structure types and versions:
class McsHdf5Types:
"""
Class of supported MCS-HDF5 file structure types and version ranges
Entry: (Protocol TypeID => Tuple of supported version range from (including) the first version entry up to (including) the second version entry)
"""
SUPPORTED_TYPES = {"RawData" : (1, 3), # from first to second version number and including this versions
"cabb6cdd-47e0-417a-8e04-5664cbbc449b" : {"McsPyClass": "McsCMOSMEAData", "Tag": None}, #CMOSMEA file format, from first to first version
"650d88ce-9f24-4b20-ac2b-254defd12761" : {"McsPyClass": "Acquisition", "Tag": None}, #Acquisition group
"9217aeb4-59a0-4d7f-bdcd-0371c9fd66eb" : {"McsPyClass": "McsChannelStream", "Tag": "Channel Stream"}, #Analog Stream group (comprises analog and digital data)
"9e8ac9cd-5571-4ee5-bbfa-8e9d9c436daa" : {"McsPyClass": "McsInfo", "Tag": "Channel Stream Meta"}, #Analog Stream Meta Dataset
"5efe7932-dcfe-49ff-ba53-25accff5d622" : {"McsPyClass": "McsChannelEntity", "Tag": "Channel Stream Data"}, #Analog Stream Data Dataset
"09f288a5-6286-4bed-a05c-02859baea8e3" : {"McsPyClass": "McsEventStream", "Tag": "Event Stream"}, #Event Stream group
"8f58017a-1279-4d0f-80b0-78f2d80402b4" : {"McsPyClass": "McsInfo", "Tag": "Event Stream Meta"}, #Event Meta Dataset
"abca7b0c-b6ce-49fa-ad74-a20c352fe4a7" : {"McsPyClass": "McsDataset", "Tag": "Event Stream Data"}, #Event Data Dataset
"15e5a1fe-df2f-421b-8b60-23eeb2213c45" : {"McsPyClass": "McsSensorStream", "Tag": "Sensor Stream"}, #Sensor Stream group, FrameStream
"ab2aa189-2e72-4148-a2ef-978119223412" : {"McsPyClass": "McsInfo", "Tag": "Sensor Stream Meta"}, #Sensor Meta Dataset
"49da47df-f397-4121-b5da-35317a93e705" : {"McsPyClass": "McsSensorEntity", "Tag": "Sensor Stream Data"}, #Sensor Data Dataset
"35f15fa5-8427-4d07-8460-b77a7e9b7f8d" : {"McsPyClass": "SegmentStream", "Tag": "Segment Stream"}, #SegmentStream"
"425ce2e0-f1d6-4604-8ab4-6a2facbb2c3e" : {"McsPyClass": None, "Tag": "TimeStamp Stream"}, #TimeStampStream
"26efe891-c075-409b-94f8-eb3a7dd68c94" : {"McsPyClass": "McsSpikeStream", "Tag": "Spike Stream"}, #SpikeStream
"e1d7616f-621c-4a26-8f60-a7e63a9030b7" : {"McsPyClass": "McsInfo", "Tag": "Spike Stream Meta"}, #SpikeStream Meta Dataset
"3e8aaacc-268b-4057-b0bb-45d7dc9ec73b" : {"McsPyClass": "McsSpikeEntity", "Tag": "Spike Stream Data"}, #SpikeStream Data Dataset
"2f8c246f-9bab-4193-b09e-03aefe17ede0" : {"McsPyClass": "FilterTool", "Tag": None}, #Filter Tool group
"c632506d-c961-4a9f-b22b-ac7a56ce3552" : {"McsPyClass": None, "Tag": None}, #Pipe Tool group
"941c8edb-78b3-4275-a5b2-6876cbcdeffc" : {"McsPyClass": "NetworkExplorer", "Tag": None}, #STA Explorer group
"442b7514-fe3a-4c66-8ae9-4f249ef48f2f" : {"McsPyClass": None, "Tag": None}, #STA Entity Dataset
"a95db4a1-d124-4c52-8889-2264fcdb489b" : {"McsPyClass": None, "Tag": None}, #SettingsMapCreatorSpike and SettingsMapCreatorSta Dataset
"de316ac6-ad66-4d78-acc4-e3f29bd40991" : {"McsPyClass": None, "Tag": None}, #SettingsVideoControl Dataset
"44b29fba-ec5c-48b5-8e0e-02ad9b9ac83a" : {"McsPyClass": None, "Tag": None}, #SettingsStaExplorer Dataset
"935a1aa6-4082-482e-9d4d-1ad60d1b1680" : {"McsPyClass": None, "Tag": None}, #SettingsStaCreator Dataset
"c6a37148-fa9e-42f2-9d38-eea0434851e2" : {"McsPyClass": "SpikeExplorer", "Tag": None}, #Spike Explorer group
"58c92502-516e-46f6-ac50-44e6dd17a3ff" : {"McsPyClass": None, "Tag": None}, #SettingsSpikeDetector Dataset
"ef54ef3d-3619-43aa-87ba-dc5f57f7e861" : {"McsPyClass": None, "Tag": None}, #SettingsSpikeExplorer Dataset
"1b4e0b8b-6af1-4b55-a685-a6d28a922eb3" : {"McsPyClass": "McsSpikeEntity", "Tag": "Spike Data"}, #SpikeData Dataset
"f5dc873b-4aed-4a54-8c19-5743908684bb" : {"McsPyClass": None, "Tag": None}, #SpikePeakActivity Dataset
"7263d1b7-f57a-42de-8f51-5d6326d22f2a" : {"McsPyClass": "SpikeSorter", "Tag": None}, #Spike Sorter group
"0e5a97df-9de0-4a22-ab8c-54845c1ff3b9" : {"McsPyClass": "SpikeSorterUnitEntity","Tag": None}, #Spike Sorter Entity group
"3fa908a3-fac9-4a80-96a1-310d9bcdf617" : {"McsPyClass": None, "Tag": None}, #ProjectionMatrix Dataset
"3533aded-b369-4529-836d-9629eb1a27a8" : {"McsPyClass": None, "Tag": None}, #SettingsPeakDetection Dataset
"f20b653e-25fb-4f7a-ae8a-f35044f46720" : {"McsPyClass": None, "Tag": None}, #SettingsPostProcessing Dataset
"c7d23018-9006-45fe-942f-c5d0f9cde284" : {"McsPyClass": None, "Tag": None}, #SettingsRoiDetection Dataset
"713a9202-87e1-4bfe-ba80-b909a000aae5" : {"McsPyClass": None, "Tag": None}, #SettingsSorterComputing Dataset
"62bc7b9f-7eea-4a88-a438-c618067d49f4" : {"McsPyClass": None, "Tag": None}, #SettingsSorterGeneral
"9cdcea3f-88aa-40cf-89db-818315a2644a" : {"McsPyClass": "ActivitySummary", "Tag": None}, #Activity Summary group
}
@classmethod
def get_mcs_class_name(self, typeID):
"""
Returns the McsPy class name, that corresponds to a given Mcs HDF5 file structure type. The function also checks if the requested class supports
the Mcs HDF5 file structure type version
:param typeID: name of the type that is tested
:returns: a McsCMOSMEA class if the given type and version is supported
"""
if not typeID in McsHdf5Types.SUPPORTED_TYPES:
return None
class_name = McsHdf5Types.SUPPORTED_TYPES[typeID]['McsPyClass']
if class_name is None:
return None
return getattr(McsCMOSMEA, class_name)
from pint import UnitRegistry
ureg = UnitRegistry()
Q_ = ureg.Quantity
ureg.define('NoUnit = [quantity]')
from McsPy import McsCMOSMEA
|
from pythoncalculator.JMuten_divide import divide
def test_divide():
assert divide(10, 2) == 5
|
import numpy as np
def build_local_integration_grid_circle(n_quad_points, r_c):
# Guass-Legendre quadrature on the unit disk (by KyoungJoong Kim and ManSuk Song)
if n_quad_points == 1:
w_1 = 3.141592653589793
x_1 = 0.0
quad_point_x = np.array([x_1]) * r_c
quad_point_y = np.array([x_1]) * r_c
quad_weight = np.array([w_1]) * r_c * r_c
elif n_quad_points == 4:
w_1 = 0.785398163397448
x_1 = 0.5
quad_point_x = np.array([x_1, -x_1, -x_1, x_1]) * r_c
quad_point_y = np.array([x_1, x_1, -x_1, -x_1]) * r_c
quad_weight = np.array([w_1, w_1, w_1, w_1]) * r_c * r_c
elif n_quad_points == 8:
w_1 = 0.732786462492640
w_2 = 0.052611700904808
x_1 = 0.650115167343736
x_2 = 0.888073833977115
quad_point_x = np.array([x_1, 0.0, -x_1, 0.0, x_2, -x_2, -x_2, x_2]) * r_c
quad_point_y = np.array([0.0, x_1, 0.0, -x_1, x_2, x_2, -x_2, -x_2]) * r_c
quad_weight = np.array([w_1, w_1, w_1, w_1, w_2, w_2, w_2, w_2]) * r_c * r_c
elif n_quad_points == 12:
w_1 = 0.232710566932577
w_2 = 0.387077796006226
w_3 = 0.165609800458645
x_1 = 0.866025403784439
x_2 = 0.322914992067400
x_3 = 0.644171310389465
quad_point_x = np.array([x_1, 0.0, -x_1, 0.0, x_2, -x_2, -x_2, x_2, x_3, -x_3, -x_3, x_3]) * r_c
quad_point_y = np.array([0.0, x_1, 0.0, -x_1, x_2, x_2, -x_2, -x_2, x_3, x_3, -x_3, -x_3]) * r_c
quad_weight = np.array([w_1, w_1, w_1, w_1, w_2, w_2, w_2, w_2, w_3, w_3, w_3, w_3]) * r_c * r_c
elif n_quad_points == 20:
w_1 = 0.071488826617391
w_2 = 0.327176874928167
w_3 = 0.005591341512851
w_4 = 0.190570560169519
x_1 = 0.952458896434417
x_2 = 0.415187657878755
x_3 = 0.834794942216211
x_4 = 0.740334457173511
y_4 = 0.379016937530835
quad_point_x = np.array(
[x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, 0.0, x_3, -x_3, -x_3, x_3, x_4, -x_4, -x_4, x_4, y_4, y_4, -y_4,
-y_4]) * r_c
quad_point_y = np.array(
[0.0, x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, x_3, x_3, -x_3, -x_3, y_4, y_4, -y_4, -y_4, x_4, -x_4, -x_4,
x_4]) * r_c
quad_weight = np.array(
[w_1, w_1, w_1, w_1, w_2, w_2, w_2, w_2, w_3, w_3, w_3, w_3, w_4, w_4, w_4, w_4, w_4, w_4, w_4,
w_4]) * r_c * r_c
elif n_quad_points == 44:
x_1 = 0.252863797091293
x_2 = 0.989746802511614
x_3 = 0.577728928444958
x_4 = 0.873836956645035
x_5 = 0.689299380791136
x_6 = 0.597614304667208
x_7 = 0.375416824626170
x_8 = 0.883097111318591
y_8 = 0.365790800400663
x_9 = 0.707438744960070
y_9 = 0.293030722710664
w_1 = 0.125290208564338
w_2 = 0.016712625496982
w_3 = 0.109500391126365
w_4 = 0.066237455796397
w_5 = 0.026102860184358
w_6 = 0.066000934661100
w_7 = 0.127428372681720
w_8 = 0.042523065826681
w_9 = 0.081539591616413
quad_point_x = np.array(
[x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, 0.0, x_3, 0.0, -x_3, 0.0, x_4, 0.0, -x_4, 0.0,
x_5, -x_5, -x_5, x_5, x_6, -x_6, -x_6, x_6, x_7, -x_7, -x_7, x_7,
x_8, -x_8, -x_8, x_8, y_8, y_8, -y_8, -y_8,
x_9, -x_9, -x_9, x_9, y_9, y_9, -y_9, -y_9]) * r_c
quad_point_y = np.array(
[0.0, x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, 0.0, x_3, 0.0, -x_3, 0.0, x_4, 0.0, -x_4,
x_5, x_5, -x_5, -x_5, x_6, x_6, -x_6, -x_6, x_7, x_7, -x_7, -x_7,
y_8, y_8, -y_8, -y_8, x_8, -x_8, -x_8, x_8,
y_9, y_9, -y_9, -y_9, x_9, -x_9, -x_9, x_9]) * r_c
quad_weight = np.array(
[w_1, w_1, w_1, w_1, w_2, w_2, w_2, w_2, w_3, w_3, w_3, w_3, w_4, w_4, w_4, w_4,
w_5, w_5, w_5, w_5, w_6, w_6, w_6, w_6, w_7, w_7, w_7, w_7,
w_8, w_8, w_8, w_8, w_8, w_8, w_8, w_8,
w_9, w_9, w_9, w_9, w_9, w_9, w_9, w_9]) * r_c * r_c
elif n_quad_points == 72:
w_1 = 0.082558858859169
x_1 = 0.204668989256100
w_2 = 0.009721593541193
x_2 = 0.992309839464756
w_3 = 0.061920685878045
x_3 = 0.740931035494388
w_4 = 0.079123279187043
x_4 = 0.477987648986077
w_5 = 0.087526733002317
x_5 = 0.306138805262459
w_6 = 0.057076811471306
x_6 = 0.524780156099700
w_7 = 0.020981864256888
x_7 = 0.921806074110042
y_7 = 0.310920075968188
w_8 = 0.015226392255721
x_8 = 0.790235832571934
y_8 = 0.579897645710646
w_9 = 0.033136884897617
x_9 = 0.725790566968788
y_9 = 0.525045580895713
w_10 = 0.044853730819348
x_10 = 0.788230650371813
y_10 = 0.290244481132460
w_11 = 0.065321481701811
x_11 = 0.584894890453686
y_11 = 0.264317463415838
w_12 = 0.024214746797802
x_12 = 0.909637445684200
y_12 = 0.09257113237088
quad_point_x = np.array(
[x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, 0.0, x_3, 0.0, -x_3, 0.0, x_4, 0.0, -x_4, 0.0,
x_5, -x_5, -x_5, x_5, x_6, -x_6, -x_6, x_6,
x_7, -x_7, -x_7, x_7, y_7, y_7, -y_7, -y_7,
x_8, -x_8, -x_8, x_8, y_8, y_8, -y_8, -y_8,
x_9, -x_9, -x_9, x_9, y_9, y_9, -y_9, -y_9,
x_10, -x_10, -x_10, x_10, y_10, y_10, -y_10, -y_10,
x_11, -x_11, -x_11, x_11, y_11, y_11, -y_11, -y_11,
x_12, -x_12, -x_12, x_12, y_12, y_12, -y_12, -y_12]) * r_c
quad_point_y = np.array(
[0.0, x_1, 0.0, -x_1, 0.0, x_2, 0.0, -x_2, 0.0, x_3, 0.0, -x_3, 0.0, x_4, 0.0, -x_4,
x_5, x_5, -x_5, -x_5, x_6, x_6, -x_6, -x_6,
y_7, y_7, -y_7, -y_7, x_7, -x_7, -x_7, x_7,
y_8, y_8, -y_8, -y_8, x_8, -x_8, -x_8, x_8,
y_9, y_9, -y_9, -y_9, x_9, -x_9, -x_9, x_9,
y_10, y_10, -y_10, -y_10, x_10, -x_10, -x_10, x_10,
y_11, y_11, -y_11, -y_11, x_11, -x_11, -x_11, x_11,
y_12, y_12, -y_12, -y_12, x_12, -x_12, -x_12, x_12]) * r_c
quad_weight = np.array(
[w_1, w_1, w_1, w_1, w_2, w_2, w_2, w_2, w_3, w_3, w_3, w_3, w_4, w_4, w_4, w_4,
w_5, w_5, w_5, w_5, w_6, w_6, w_6, w_6,
w_7, w_7, w_7, w_7, w_7, w_7, w_7, w_7,
w_8, w_8, w_8, w_8, w_8, w_8, w_8, w_8,
w_9, w_9, w_9, w_9, w_9, w_9, w_9, w_9,
w_10, w_10, w_10, w_10, w_10, w_10, w_10, w_10,
w_11, w_11, w_11, w_11, w_11, w_11, w_11, w_11,
w_12, w_12, w_12, w_12, w_12, w_12, w_12, w_12]) * r_c * r_c
else:
raise ValueError("No set of points/weights for the choice of " + str(n_quad_points) + " quadrature point!")
return quad_point_x, quad_point_y, quad_weight
|
#!/usr/bin/env python
"""
Copyright 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint:disable=missing-docstring
import logging
import unittest
import time
import os
from test.hardware.test_helper import Helper
import serial
import six
import mbed_lstools
from mbed_flasher.flash import Flash
from mbed_flasher.reset import Reset
from mbed_flasher.return_codes import EXIT_CODE_SUCCESS
def verify_output_per_device(serial_port, command, output):
# print 'Inspecting %s SERIAL device' % serial_port
ser = serial.Serial(
port=serial_port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
if ser.isOpen():
time.sleep(0.2)
if six.PY2:
ser.write('%s\n\r' % command)
else:
new_command = '%s\n\r' % command
ser.write(new_command.encode('utf-8'))
out = ''
time.sleep(0.5)
while ser.inWaiting() > 0:
if six.PY2:
out += ser.read(1)
else:
out += ser.read(1).decode('utf-8', "replace")
if out.find(output) != -1:
ser.close()
return True
ser.close()
return False
# this is not a const
# pylint: disable=invalid-name
mbed = mbed_lstools.create()
class FlashVerifyTestCase(unittest.TestCase):
"""
Flash verification with Hardware, three step verification for all attached devices:
first flashes the helloworld binary to device and verifies that no response is seen
second flashes found second binary to device and verifies that response is seen
third flashes the helloworld binary to device and verifies that no response is seen
"""
bin_path = os.path.join('test', 'helloworld.bin')
second_bin_path = os.path.join('test', 'example_app_K64F.bin')
def setUp(self):
logging.disable(logging.CRITICAL)
Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()
def tearDown(self):
Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()
def test_verify_hw_flash(self):
mbeds = mbed_lstools.create()
targets = mbeds.list_mbeds()
flasher = Flash()
target_id = None
serial_port = None
for target in targets:
if target['platform_name'] == 'K64F':
if 'serial_port' and 'target_id' in target:
target_id = target['target_id']
serial_port = target['serial_port']
break
if target_id and serial_port:
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id, platform_name='K64F',
device_mapping_table=False, method='simple',
target_filename=self.second_bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
def test_verify_hw_flash_no_reset(self):
mbeds = mbed_lstools.create()
targets = mbeds.list_mbeds()
flasher = Flash()
resetter = Reset()
target_id = None
serial_port = None
for target in targets:
if target['platform_name'] == 'K64F':
if 'serial_port' and 'target_id' in target:
target_id = target['target_id']
serial_port = target['serial_port']
break
if target_id and serial_port:
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple')
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.second_bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
no_reset=True,
target_filename=self.second_bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
ret = resetter.reset(target_id=target_id, method='simple')
self.assertEqual(ret, EXIT_CODE_SUCCESS)
if not verify_output_per_device(serial_port, 'help', 'echo'):
self.assertEqual(
verify_output_per_device(serial_port, 'help', 'echo'), True)
ret = flasher.flash(build=self.bin_path,
target_id=target_id,
platform_name='K64F',
device_mapping_table=False,
method='simple',
target_filename=self.bin_path)
self.assertEqual(ret, EXIT_CODE_SUCCESS)
self.assertEqual(verify_output_per_device(serial_port, 'help', 'echo'), False)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2013 Evan Hazlett and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, include, url
from tastypie.api import Api
from django.contrib import admin
admin.autodiscover()
from containers.api import ContainerResource
from applications.api import ApplicationResource
from hosts.api import HostResource
v1_api = Api(api_name='v1')
v1_api.register(ContainerResource())
v1_api.register(ApplicationResource())
v1_api.register(HostResource())
urlpatterns = patterns('',
url(r'^$', 'shipyard.views.index', name='index'),
url(r'^api/', include(v1_api.urls)),
url(r'^accounts/', include('accounts.urls')),
url(r'^applications/', include('applications.urls')),
url(r'^containers/', include('containers.urls')),
url(r'^images/', include('images.urls')),
url(r'^hosts/', include('hosts.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
import os
import codecs
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with codecs.open(os.path.join(*paths), 'r', 'utf-8') as f:
return f.read()
version = '0.6.1'
setup(
name='deezer-python',
version=version,
description='A friendly wrapper library for the Deezer API',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
author='Bruno Alla',
author_email='alla.brunoo@gmail.com',
url='https://github.com/browniebroke/deezer-python',
download_url='https://github.com/browniebroke/deezer-python/tarball/{0}'.format(version),
license='MIT',
packages=['deezer'],
install_requires=[
'tornado',
'six',
'requests',
],
tests_require=[
'requests-mock',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
import sys
from rpython.rtyper.lltypesystem.lltype import *
from rpython.translator.translator import TranslationContext
from rpython.translator.c.database import LowLevelDatabase
from rpython.flowspace.model import Constant, Variable, SpaceOperation
from rpython.flowspace.model import Block, Link, FunctionGraph
from rpython.rtyper.lltypesystem.lltype import getfunctionptr
from rpython.rtyper.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr
def dump_on_stdout(database):
print '/*********************************/'
structdeflist = database.getstructdeflist()
for node in structdeflist:
for line in node.definition():
print line
print
for node in database.globalcontainers():
for line in node.forward_declaration():
print line
for node in database.globalcontainers():
print
for line in node.implementation():
print line
def test_primitive():
db = LowLevelDatabase()
if is_emulated_long:
assert db.get(5) == '5LL'
else:
assert db.get(5) == '5L'
assert db.get(True) == '1'
def test_struct():
db = LowLevelDatabase()
pfx = db.namespace.global_prefix + 'g_'
S = GcStruct('test', ('x', Signed))
s = malloc(S)
s.x = 42
assert db.get(s).startswith('(&'+pfx)
assert db.containernodes.keys() == [s._obj]
assert db.structdefnodes.keys() == [S]
def test_inlined_struct():
db = LowLevelDatabase()
pfx = db.namespace.global_prefix + 'g_'
S = GcStruct('test', ('x', Struct('subtest', ('y', Signed))))
s = malloc(S)
s.x.y = 42
assert db.get(s).startswith('(&'+pfx)
assert db.containernodes.keys() == [s._obj]
db.complete()
assert len(db.structdefnodes) == 2
assert S in db.structdefnodes
assert S.x in db.structdefnodes
def test_complete():
db = LowLevelDatabase()
pfx = db.namespace.global_prefix + 'g_'
T = GcStruct('subtest', ('y', Signed))
S = GcStruct('test', ('x', Ptr(T)))
s = malloc(S)
s.x = malloc(T)
s.x.y = 42
assert db.get(s).startswith('(&'+pfx)
assert db.containernodes.keys() == [s._obj]
db.complete()
assert len(db.containernodes) == 2
assert s._obj in db.containernodes
assert s.x._obj in db.containernodes
assert len(db.structdefnodes) == 2
assert S in db.structdefnodes
assert S.x.TO in db.structdefnodes
def test_codegen():
db = LowLevelDatabase()
U = Struct('inlined', ('z', Signed))
T = Struct('subtest', ('y', Signed))
S = Struct('test', ('x', Ptr(T)), ('u', U), ('p', Ptr(U)))
s = malloc(S, immortal=True)
s.x = malloc(T, immortal=True)
s.x.y = 42
s.u.z = -100
s.p = s.u
db.get(s)
db.complete()
dump_on_stdout(db)
def test_codegen_2():
db = LowLevelDatabase()
A = GcArray(('x', Signed))
S = GcStruct('test', ('aptr', Ptr(A)))
a = malloc(A, 3)
a[0].x = 100
a[1].x = 101
a[2].x = 102
s = malloc(S)
s.aptr = a
db.get(s)
db.complete()
dump_on_stdout(db)
def test_codegen_3():
db = LowLevelDatabase()
A = Struct('varsizedstuff', ('x', Signed), ('y', Array(('i', Signed))))
S = Struct('test', ('aptr', Ptr(A)),
('anitem', Ptr(A.y.OF)),
('anarray', Ptr(A.y)))
a = malloc(A, 3, immortal=True)
a.x = 99
a.y[0].i = 100
a.y[1].i = 101
a.y[2].i = 102
s = malloc(S, immortal=True)
s.aptr = a
s.anitem = a.y[1]
s.anarray = a.y
db.get(s)
db.complete()
dump_on_stdout(db)
def test_func_simple():
# -------------------- flowgraph building --------------------
# def f(x):
# return x+1
x = Variable("x")
x.concretetype = Signed
result = Variable("result")
result.concretetype = Signed
one = Constant(1)
one.concretetype = Signed
op = SpaceOperation("int_add", [x, one], result)
block = Block([x])
graph = FunctionGraph("f", block)
block.operations.append(op)
block.closeblock(Link([result], graph.returnblock))
graph.getreturnvar().concretetype = Signed
# -------------------- end --------------------
F = FuncType([Signed], Signed)
f = functionptr(F, "f", graph=graph)
db = LowLevelDatabase()
db.get(f)
db.complete()
dump_on_stdout(db)
S = GcStruct('testing', ('fptr', Ptr(F)))
s = malloc(S)
s.fptr = f
db = LowLevelDatabase()
db.get(s)
db.complete()
dump_on_stdout(db)
# ____________________________________________________________
def makegraph(func, argtypes):
t = TranslationContext()
t.buildannotator().build_types(func, [int])
t.buildrtyper().specialize()
bk = t.annotator.bookkeeper
graph = bk.getdesc(func).getuniquegraph()
return t, graph
def test_function_call():
def g(x, y):
return x-y
def f(x):
return g(1, x)
t, graph = makegraph(f, [int])
F = FuncType([Signed], Signed)
f = functionptr(F, "f", graph=graph)
db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer())
db.get(f)
db.complete()
dump_on_stdout(db)
def test_malloc():
S = GcStruct('testing', ('x', Signed), ('y', Signed))
def ll_f(x):
p = malloc(S)
p.x = x
p.y = x+1
return p.x * p.y
t, graph = makegraph(ll_f, [int])
db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer())
db.get(getfunctionptr(graph))
db.complete()
dump_on_stdout(db)
def test_multiple_malloc():
S1 = GcStruct('testing1', ('x', Signed), ('y', Signed))
S = GcStruct('testing', ('ptr1', Ptr(S1)),
('ptr2', Ptr(S1)),
('z', Signed))
def ll_f(x):
ptr1 = malloc(S1)
ptr1.x = x
ptr2 = malloc(S1)
ptr2.x = x+1
s = malloc(S)
s.ptr1 = ptr1
s.ptr2 = ptr2
return s.ptr1.x * s.ptr2.x
t, graph = makegraph(ll_f, [int])
db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer())
db.get(getfunctionptr(graph))
db.complete()
dump_on_stdout(db)
def test_array_of_char():
A = GcArray(Char)
a = malloc(A, 11)
for i, c in zip(range(11), 'hello world'):
a[i] = c
db = LowLevelDatabase()
db.get(a)
db.complete()
dump_on_stdout(db)
def test_voidp():
A = VOIDP
db = LowLevelDatabase()
assert db.gettype(A) == "void *@"
def test_intlong_unique():
A = INT_real
B = Signed
db = LowLevelDatabase()
assert db.gettype(A) == "int @"
assert db.gettype(B) == "Signed @"
def test_recursive_struct():
S = GcForwardReference()
S.become(GcStruct('testing', ('p', Ptr(S))))
p = malloc(S)
p.p = p
db = LowLevelDatabase()
db.get(p)
db.complete()
dump_on_stdout(db)
def test_typedef():
A = Typedef(Signed, 'test4')
db = LowLevelDatabase()
assert db.gettype(A) == "test4 @"
PA = CArrayPtr(A)
assert db.gettype(PA) == "test4 *@"
F = FuncType((A,), A)
assert db.gettype(F) == "test4 (@)(test4)"
|
# from twilio.rest import Client
# # Your Account SID from twilio.com/console
# account_sid = "AC4100c72954a1f9949fc4700a8d0594bb"
# # Your Auth Token from twilio.com/console
# auth_token = "e1529115d0f1a57b6b8e6b17644f6087"
# client = Client(account_sid, auth_token)
# message = client.messages \
# .create(
# to="+919930035998",
# from_="+19496196487",
# body="Hello from Python!")
# print(message.sid)
def call1():
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'AC09c050b96951b1bbed41f71ab5f2f472'
auth_token = '8630c09dff748daf78bcc1436ba2e34a'
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="The user wants to connect",
from_='+14156896062',
to='+918655232275'
)
print(message.sid)
def call2():
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'AC09c050b96951b1bbed41f71ab5f2f472'
auth_token = '8630c09dff748daf78bcc1436ba2e34a'
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="The user is requesting a call",
from_='+14156896062',
to='+918879490461'
)
print(message.sid)
|
#
# Copyright 2017 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import charmhelpers.core as core
import charmhelpers.core.host as ch_host
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.unitdata as unitdata
import charmhelpers.contrib.openstack.templating as os_templating
import charmhelpers.contrib.openstack.utils as os_utils
import charms_openstack.charm
import charms_openstack.adapters
import os
import subprocess
from lxml import etree
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
# release detection is done via keystone package given that
# openstack-origin is not present in the subordinate charm
# see https://github.com/juju/charm-helpers/issues/83
from charms_openstack.charm.core import (
register_os_release_selector
)
OPENSTACK_RELEASE_KEY = 'charmers.openstack-release-version'
CONFIGS = (IDP_METADATA, SP_METADATA, SP_PRIVATE_KEY,
SP_LOCATION_CONFIG,) = [
os.path.join('/etc/apache2/mellon',
f.format(hookenv.service_name())) for f in [
'idp-meta.{}.xml',
'sp-meta.{}.xml',
'sp-pk.{}.pem',
'sp-location.{}.conf']]
@register_os_release_selector
def select_release():
"""Determine the release based on the keystone package version.
Note that this function caches the release after the first install so
that it doesn't need to keep going and getting it from the package
information.
"""
release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)
if release_version is None:
release_version = os_utils.os_release('keystone')
unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)
return release_version
class KeystoneSAMLMellonConfigurationAdapter(
charms_openstack.adapters.ConfigurationAdapter):
def __init__(self, charm_instance=None):
super().__init__(charm_instance=charm_instance)
self._idp_metadata = None
self._sp_private_key = None
self._sp_signing_keyinfo = None
self._validation_errors = {}
@property
def validation_errors(self):
return {k: v for k, v in
self._validation_errors.items() if v}
@property
def remote_id_attribute(self):
# Mellon module environment variables are prefixed with MELLON_
# and mod_auth_mellon has a default setting of: MellonIdP "IDP"
return "MELLON_IDP"
@property
def idp_metadata_file(self):
return IDP_METADATA
@property
def sp_metadata_file(self):
return SP_METADATA
@property
def sp_private_key_file(self):
return SP_PRIVATE_KEY
@property
def sp_location_config(self):
return SP_LOCATION_CONFIG
@property
def keystone_host(self):
return unitdata.kv().get('hostname')
@property
def keystone_port(self):
return unitdata.kv().get('port')
@property
def tls_enabled(self):
return unitdata.kv().get('tls-enabled')
@property
def keystone_base_url(self):
scheme = 'https' if self.tls_enabled else 'http'
return ('{}://{}:{}'.format(scheme, self.keystone_host,
self.keystone_port))
@property
def sp_idp_path(self):
return ('/v3/OS-FEDERATION/identity_providers/{}'
.format(self.idp_name))
@property
def sp_protocol_path(self):
return ('{}/protocols/{}'
.format(self.sp_idp_path, self.protocol_name))
@property
def sp_auth_path(self):
return '{}/auth'.format(self.sp_protocol_path)
@property
def mellon_endpoint_path(self):
return '{}/mellon'.format(self.sp_auth_path)
@property
def websso_auth_protocol_path(self):
return ('/v3/auth/OS-FEDERATION/websso/{}'
.format(self.protocol_name))
@property
def websso_auth_idp_protocol_path(self):
return ('/v3/auth/OS-FEDERATION/identity_providers'
'/{}/protocols/{}/websso'.format(
self.idp_name,
self.protocol_name
))
@property
def sp_post_response_path(self):
return '{}/postResponse'.format(self.mellon_endpoint_path)
@property
def sp_logout_path(self):
return '{}/logout'.format(self.mellon_endpoint_path)
@property
def sp_auth_url(self):
return '{}{}'.format(self.keystone_base_url,
self.sp_auth_path)
@property
def sp_logout_url(self):
return '{}{}'.format(self.keystone_base_url,
self.sp_logout_path)
@property
def sp_post_response_url(self):
return '{}{}'.format(self.keystone_base_url,
self.sp_post_response_path)
@property
def mellon_subject_confirmation_data_address_check(self):
return ('On' if self.subject_confirmation_data_address_check
else 'Off')
@property
def supported_nameid_formats(self):
return self.nameid_formats.split(',')
IDP_METADATA_INVALID = ('idp-metadata resource is not a well-formed'
' xml file')
@property
def idp_metadata(self):
idp_metadata_path = hookenv.resource_get('idp-metadata')
if os.path.exists(idp_metadata_path) and not self._idp_metadata:
with open(idp_metadata_path) as f:
content = f.read()
try:
etree.fromstring(content)
self._idp_metadata = content
self._validation_errors['idp-metadata'] = None
except etree.XMLSyntaxError:
self._idp_metadata = ''
self._validation_errors['idp-metadata'] = (
self.IDP_METADATA_INVALID)
return self._idp_metadata
SP_SIGNING_KEYINFO_INVALID = ('sp-signing-keyinfo resource is not a'
' well-formed xml file')
@property
def sp_signing_keyinfo(self):
info_path = hookenv.resource_get('sp-signing-keyinfo')
if os.path.exists(info_path) and not self._sp_signing_keyinfo:
self._sp_signing_keyinfo = None
with open(info_path) as f:
content = f.read()
try:
etree.fromstring(content)
self._sp_signing_keyinfo = content
self._validation_errors['sp-signing-keyinfo'] = None
except etree.XMLSyntaxError:
self._sp_signing_keyinfo = ''
self._validation_errors['sp-signing-keyinfo'] = (
self.SP_SIGNING_KEYINFO_INVALID)
return self._sp_signing_keyinfo
SP_PRIVATE_KEY_INVALID = ('resource is not a well-formed'
' RFC 5958 (PKCS#8) key')
@property
def sp_private_key(self):
pk_path = hookenv.resource_get('sp-private-key')
if os.path.exists(pk_path) and not self._sp_private_key:
with open(pk_path) as f:
content = f.read()
try:
serialization.load_pem_private_key(
content.encode(),
password=None,
backend=default_backend()
)
self._sp_private_key = content
self._validation_errors['sp-private-key'] = None
except ValueError:
self._sp_private_key = ''
self._validation_errors['sp-private-key'] = (
self.SP_PRIVATE_KEY_INVALID)
return self._sp_private_key
class KeystoneSAMLMellonCharm(charms_openstack.charm.OpenStackCharm):
# Internal name of charm
service_name = name = 'keystone-saml-mellon'
# Package to derive application version from
version_package = 'keystone'
# First release supported
release = 'mitaka'
# List of packages to install for this charm
packages = ['libapache2-mod-auth-mellon']
configuration_class = KeystoneSAMLMellonConfigurationAdapter
# render idP metadata provided out of band to establish
# SP -> idP trust. A domain name config parameter is evaluated at
# class definition time but this happens every event execution,
# including config-changed. Changing domain-name dynamically is not
# a real use-case anyway and it should be defined deployment time.
string_templates = {
IDP_METADATA: ('options', 'idp_metadata'),
SP_PRIVATE_KEY: ('options', 'sp_private_key'),
}
def configuration_complete(self):
"""Determine whether sufficient configuration has been provided
via charm config options and resources.
:returns: boolean indicating whether configuration is complete
"""
required_config = {
'idp-name': self.options.idp_name,
'protocol-name': self.options.protocol_name,
'user-facing-name': self.options.user_facing_name,
'idp-metadata': self.options.idp_metadata,
'sp-private-key': self.options.sp_private_key,
'sp-signing-keyinfo': self.options.sp_signing_keyinfo,
'nameid-formats': self.options.nameid_formats,
}
return all(required_config.values())
def assess_status(self):
"""Determine the current application status for the charm"""
hookenv.application_version_set(self.application_version)
if not self.configuration_complete():
errors = [
'{}: {}'.format(k, v)
for k, v in self.options.validation_errors.items() if v]
status_msg = 'Configuration is incomplete. {}'.format(
','.join(errors))
hookenv.status_set('blocked', status_msg)
else:
hookenv.status_set('active',
'Unit is ready')
def render_config(self):
"""
Render Service Provider configuration file to be used by Apache
and provided to idP out of band to establish mutual trust.
"""
owner = 'root'
group = 'www-data'
# group read and exec is needed for mellon to read the rendered
# files, otherwise it will fail in a cryptic way
dperms = 0o650
# file permissions are a bit more restrictive than defaults in
# charm-helpers but directory permissions are the main protection
# mechanism in this case
fileperms = 0o440
# ensure that a directory we need is there
ch_host.mkdir('/etc/apache2/mellon', perms=dperms, owner=owner,
group=group)
self.render_configs(self.string_templates.keys())
core.templating.render(
source='mellon-sp-metadata.xml',
template_loader=os_templating.get_loader(
'templates/', self.release),
target=self.options.sp_metadata_file,
context=self.adapters_instance,
owner=owner,
group=group,
perms=fileperms
)
core.templating.render(
source='apache-mellon-location.conf',
template_loader=os_templating.get_loader(
'templates/', self.release),
target=self.options.sp_location_config,
context=self.adapters_instance,
owner=owner,
group=group,
perms=fileperms
)
def remove_config(self):
for f in CONFIGS:
if os.path.exists(f):
os.unlink(f)
def enable_module(self):
subprocess.check_call(['a2enmod', 'auth_mellon'])
def disable_module(self):
subprocess.check_call(['a2dismod', 'auth_mellon'])
|
"""
.. module:: volume
:synopsis: Volume Indicators.
.. moduleauthor:: Dario Lopez Padial (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin, ema
class AccDistIndexIndicator(IndicatorMixin):
"""Accumulation/Distribution Index (ADI)
Acting as leading indicator of price movements.
https://school.stockcharts.com/doku.php?id=technical_indicators:accumulation_distribution_line
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series, volume: pd.Series, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
clv = ((self._close - self._low) - (self._high - self._close)) / (self._high - self._low)
clv = clv.fillna(0.0) # float division by zero
ad = clv * self._volume
self._ad = ad.cumsum()
def acc_dist_index(self) -> pd.Series:
"""Accumulation/Distribution Index (ADI)
Returns:
pandas.Series: New feature generated.
"""
ad = self._check_fillna(self._ad, value=0)
return pd.Series(ad, name='adi')
class OnBalanceVolumeIndicator(IndicatorMixin):
"""On-balance volume (OBV)
It relates price and volume in the stock market. OBV is based on a
cumulative total volume.
https://en.wikipedia.org/wiki/On-balance_volume
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, volume: pd.Series, fillna: bool = False):
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
obv = np.where(self._close < self._close.shift(1), -self._volume, self._volume)
self._obv = pd.Series(obv, index=self._close.index).cumsum()
def on_balance_volume(self) -> pd.Series:
"""On-balance volume (OBV)
Returns:
pandas.Series: New feature generated.
"""
obv = self._check_fillna(self._obv, value=0)
return pd.Series(obv, name='obv')
class ChaikinMoneyFlowIndicator(IndicatorMixin):
"""Chaikin Money Flow (CMF)
It measures the amount of Money Flow Volume over a specific period.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:chaikin_money_flow_cmf
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, close: pd.Series,
volume: pd.Series, n: int = 20, fillna: bool = False):
self._high = high
self._low = low
self._close = close
self._volume = volume
self._n = n
self._fillna = fillna
self._run()
def _run(self):
mfv = ((self._close - self._low) - (self._high - self._close)) / (self._high - self._low)
mfv = mfv.fillna(0.0) # float division by zero
mfv *= self._volume
self._cmf = mfv.rolling(self._n, min_periods=0).sum() / self._volume.rolling(self._n, min_periods=0).sum()
def chaikin_money_flow(self) -> pd.Series:
"""Chaikin Money Flow (CMF)
Returns:
pandas.Series: New feature generated.
"""
cmf = self._check_fillna(self._cmf, value=0)
return pd.Series(cmf, name='cmf')
class ForceIndexIndicator(IndicatorMixin):
"""Force Index (FI)
It illustrates how strong the actual buying or selling pressure is. High
positive values mean there is a strong rising trend, and low values signify
a strong downward trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:force_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, volume: pd.Series, n: int = 13, fillna: bool = False):
self._close = close
self._volume = volume
self._n = n
self._fillna = fillna
self._run()
def _run(self):
fi = (self._close - self._close.shift(1)) * self._volume
self._fi = ema(fi, self._n, fillna=self._fillna)
def force_index(self) -> pd.Series:
"""Force Index (FI)
Returns:
pandas.Series: New feature generated.
"""
fi = self._check_fillna(self._fi, value=0)
return pd.Series(fi, name=f'fi_{self._n}')
class EaseOfMovementIndicator(IndicatorMixin):
"""Ease of movement (EoM, EMV)
It relate an asset's price change to its volume and is particularly useful
for assessing the strength of a trend.
https://en.wikipedia.org/wiki/Ease_of_movement
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, high: pd.Series, low: pd.Series, volume: pd.Series, n: int = 14, fillna: bool = False):
self._high = high
self._low = low
self._volume = volume
self._n = n
self._fillna = fillna
self._run()
def _run(self):
self._emv = (self._high.diff(1) + self._low.diff(1)) * (self._high - self._low) / (2 * self._volume)
self._emv *= 100000000
def ease_of_movement(self) -> pd.Series:
"""Ease of movement (EoM, EMV)
Returns:
pandas.Series: New feature generated.
"""
emv = self._check_fillna(self._emv, value=0)
return pd.Series(emv, name=f'eom_{self._n}')
def sma_ease_of_movement(self) -> pd.Series:
"""Signal Ease of movement (EoM, EMV)
Returns:
pandas.Series: New feature generated.
"""
emv = self._emv.rolling(self._n, min_periods=0).mean()
emv = self._check_fillna(emv, value=0)
return pd.Series(emv, name=f'sma_eom_{self._n}')
class VolumePriceTrendIndicator(IndicatorMixin):
"""Volume-price trend (VPT)
Is based on a running cumulative volume that adds or substracts a multiple
of the percentage change in share price trend and current volume, depending
upon the investment's upward or downward movements.
https://en.wikipedia.org/wiki/Volume%E2%80%93price_trend
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, volume: pd.Series, fillna: bool = False):
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
vpt = (self._volume * ((self._close - self._close.shift(1, fill_value=self._close.mean()))
/ self._close.shift(1, fill_value=self._close.mean())))
self._vpt = vpt.shift(1, fill_value=vpt.mean()) + vpt
def volume_price_trend(self) -> pd.Series:
"""Volume-price trend (VPT)
Returns:
pandas.Series: New feature generated.
"""
vpt = self._check_fillna(self._vpt, value=0)
return pd.Series(vpt, name='vpt')
class NegativeVolumeIndexIndicator(IndicatorMixin):
"""Negative Volume Index (NVI)
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:negative_volume_inde
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values with 1000.
"""
def __init__(self, close: pd.Series, volume: pd.Series, fillna: bool = False):
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
price_change = self._close.pct_change()
vol_decrease = (self._volume.shift(1) > self._volume)
self._nvi = pd.Series(data=np.nan, index=self._close.index, dtype='float64', name='nvi')
self._nvi.iloc[0] = 1000
for i in range(1, len(self._nvi)):
if vol_decrease.iloc[i]:
self._nvi.iloc[i] = self._nvi.iloc[i - 1] * (1.0 + price_change.iloc[i])
else:
self._nvi.iloc[i] = self._nvi.iloc[i - 1]
def negative_volume_index(self) -> pd.Series:
"""Negative Volume Index (NVI)
Returns:
pandas.Series: New feature generated.
"""
# IDEA: There shouldn't be any na; might be better to throw exception
nvi = self._check_fillna(self._nvi, value=1000)
return pd.Series(nvi, name='nvi')
def acc_dist_index(high, low, close, volume, fillna=False):
"""Accumulation/Distribution Index (ADI)
Acting as leading indicator of price movements.
https://en.wikipedia.org/wiki/Accumulation/distribution_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return AccDistIndexIndicator(high=high, low=low, close=close, volume=volume, fillna=fillna).acc_dist_index()
def on_balance_volume(close, volume, fillna=False):
"""On-balance volume (OBV)
It relates price and volume in the stock market. OBV is based on a
cumulative total volume.
https://en.wikipedia.org/wiki/On-balance_volume
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return OnBalanceVolumeIndicator(close=close, volume=volume, fillna=fillna).on_balance_volume()
def chaikin_money_flow(high, low, close, volume, n=20, fillna=False):
"""Chaikin Money Flow (CMF)
It measures the amount of Money Flow Volume over a specific period.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:chaikin_money_flow_cmf
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return ChaikinMoneyFlowIndicator(
high=high, low=low, close=close, volume=volume, n=n, fillna=fillna).chaikin_money_flow()
def force_index(close, volume, n=13, fillna=False):
"""Force Index (FI)
It illustrates how strong the actual buying or selling pressure is. High
positive values mean there is a strong rising trend, and low values signify
a strong downward trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:force_index
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return ForceIndexIndicator(close=close, volume=volume, n=n, fillna=fillna).force_index()
def ease_of_movement(high, low, volume, n=14, fillna=False):
"""Ease of movement (EoM, EMV)
It relate an asset's price change to its volume and is particularly useful
for assessing the strength of a trend.
https://en.wikipedia.org/wiki/Ease_of_movement
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return EaseOfMovementIndicator(
high=high, low=low, volume=volume, n=n, fillna=fillna).ease_of_movement()
def sma_ease_of_movement(high, low, volume, n=14, fillna=False):
"""Ease of movement (EoM, EMV)
It relate an asset's price change to its volume and is particularly useful
for assessing the strength of a trend.
https://en.wikipedia.org/wiki/Ease_of_movement
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return EaseOfMovementIndicator(
high=high, low=low, volume=volume, n=n, fillna=fillna).sma_ease_of_movement()
def volume_price_trend(close, volume, fillna=False):
"""Volume-price trend (VPT)
Is based on a running cumulative volume that adds or substracts a multiple
of the percentage change in share price trend and current volume, depending
upon the investment's upward or downward movements.
https://en.wikipedia.org/wiki/Volume%E2%80%93price_trend
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return VolumePriceTrendIndicator(close=close, volume=volume, fillna=fillna).volume_price_trend()
def negative_volume_index(close, volume, fillna=False):
"""Negative Volume Index (NVI)
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:negative_volume_inde
The Negative Volume Index (NVI) is a cumulative indicator that uses the
change in volume to decide when the smart money is active. Paul Dysart
first developed this indicator in the 1930s. [...] Dysart's Negative Volume
Index works under the assumption that the smart money is active on days
when volume decreases and the not-so-smart money is active on days when
volume increases.
The cumulative NVI line was unchanged when volume increased from one
period to the other. In other words, nothing was done. Norman Fosback, of
Stock Market Logic, adjusted the indicator by substituting the percentage
price change for Net Advances.
This implementation is the Fosback version.
If today's volume is less than yesterday's volume then:
nvi(t) = nvi(t-1) * ( 1 + (close(t) - close(t-1)) / close(t-1) )
Else
nvi(t) = nvi(t-1)
Please note: the "stockcharts.com" example calculation just adds the
percentange change of price to previous NVI when volumes decline; other
sources indicate that the same percentage of the previous NVI value should
be added, which is what is implemented here.
Args:
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
fillna(bool): if True, fill nan values with 1000.
Returns:
pandas.Series: New feature generated.
See also:
https://en.wikipedia.org/wiki/Negative_volume_index
"""
return NegativeVolumeIndexIndicator(close=close, volume=volume, fillna=fillna).negative_volume_index()
# TODO
def put_call_ratio():
"""Put/Call ratio (PCR)
https://en.wikipedia.org/wiki/Put/call_ratio
"""
# TODO
pass
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Customer(models.Model):
"""create customer model based on the default user"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255, null=True)
email = models.CharField(max_length=255, null=True)
def __str__(self):
return self.name
@receiver(post_save, sender=User)
def user_is_created(sender, instance, created, **kwargs):
"""send a signal to create a customer after creating user"""
if created:
Customer.objects.create(user=instance)
else:
instance.customer.save()
class Product(models.Model):
"""create product & check if it digital or not"""
user = models.ForeignKey(User, on_delete=models.CASCADE,
null=True, blank=True)
name = models.CharField(max_length=255, null=True)
price = models.FloatField()
digital = models.BooleanField(default=False, null=True, blank=True)
image = models.ImageField(null=True, blank=True, upload_to='images/roducts')
def __str__(self):
return self.name
@property
def imageURL(self):
"""this method check if product has image or not"""
try:
url = self.image.url
except:
url = ''
return url
class Order(models.Model):
"""add order with transaction id to follow it"""
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL,
blank=True, null=True)
date_ordered = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False, null=True, blank=True)
transaction_id = models.CharField(max_length=255, null=True)
def __str__(self):
return str(self.id)
@property
def shipping(self):
"""check if the product is digital or not"""
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
"""calculate the total price int the whole cart"""
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
"""claculate the total for specific item"""
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
"""add order items with it's detail"""
product = models.ForeignKey(Product, on_delete=models.SET_NULL,
blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL,
blank=True, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
"""claculate the total for specific item"""
total = self.product.price * self.quantity
return total
def __str__(self):
return self.product.name
class ShippingAddress(models.Model):
"""add order information and address"""
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL,
blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL,
blank=True, null=True)
address = models.CharField(max_length=255, null=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zipcode = models.CharField(max_length=255, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `bitmex_trio_websocket` package."""
from bitmex_trio_websocket.exceptions import BitMEXWebsocketApiError
import os
from random import random
import pytest
from async_generator import aclosing
import pendulum
from trio_websocket import ConnectionRejected, WebSocketConnection, ConnectionClosed
from bitmex_trio_websocket import open_bitmex_websocket, BitMEXWebsocket
from slurry import Pipeline
from slurry.sections import Group
async def test_auth_fail():
with pytest.raises(ConnectionRejected):
async with open_bitmex_websocket('testnet', 'abcd1234', 'efgh5678') as bws:
async with aclosing(bws.listen('position')) as aiter:
async for item in aiter:
assert False
# async def test_auth_success():
# bitmex_websocket = BitMEXWebsocket()
# try:
# async with bitmex_websocket._connect('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET'), False):
# async with aclosing(bitmex_websocket._websocket_parser()) as agen:
# assert isinstance(bitmex_websocket._ws, WebSocketConnection)
# await bitmex_websocket._ws.send_message(ujson.dumps({'op': 'subscribe', 'args': ['margin', 'position', 'order', 'execution']}))
# async for msg in agen:
# assert isinstance(msg, dict)
# assert 'action' in msg
# await bitmex_websocket._ws.aclose()
# except ConnectionClosed as e:
# assert e.reason.code == 1000
# async def test_multisymbol():
# bitmex_websocket = BitMEXWebsocket()
# try:
# async with bitmex_websocket._connect('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET'), False):
# count = 0
# async with aclosing(bitmex_websocket._websocket_parser()) as agen:
# await bitmex_websocket._ws.send_message(ujson.dumps({'op': 'subscribe', 'args': ['instrument:XBTUSD', 'instrument:ETHUSD']}))
# async for msg in agen:
# assert isinstance(msg, dict)
# count += 1
# if count >= 3:
# print(count)
# await bitmex_websocket._ws.aclose()
# except ConnectionClosed as e:
# assert e.reason.code == 1000
# async def test_context_manager():
# async with open_bitmex_websocket('testnet', os.getenv('TESTNET_API_KEY'), os.getenv('TESTNET_API_SECRET')) as bitmex_ws:
# count = 0
# async with aclosing(bitmex_ws.listen('instrument', 'XBTUSD')) as agen:
# async for msg in agen:
# count += 1
# if count >= 3:
# break
# assert True
async def test_orderbook():
async with open_bitmex_websocket('testnet') as bws:
async with aclosing(bws.listen('orderBookL2', 'XBTUSD')) as agen:
async for msg in agen:
assert len(msg) == 2
break
async def test_network_argument():
async with open_bitmex_websocket('mainnet') as s:
assert getattr(s, 'listen', None) is not None
async with open_bitmex_websocket('testnet') as s:
assert getattr(s, 'listen', None) is not None
with pytest.raises(ValueError):
async with open_bitmex_websocket('incorrect') as s:
assert False, 'BitMEXWebsocket.connect accepted erroneous network argument.'
async def test_funding():
async with open_bitmex_websocket('testnet') as ws:
async with Pipeline.create(
Group(2, ws.listen('funding'))
) as pipeline, pipeline.tap() as aiter:
async for bundle in aiter:
for funding in bundle:
funding['timestamp'] = pendulum.parse(funding['timestamp'])
funding['fundingInterval'] = pendulum.parse(funding['fundingInterval'])
assert isinstance(bundle, tuple)
assert len(bundle) > 1
return
assert False, 'This should not happen.'
async def test_spam_requests():
with pytest.raises(BitMEXWebsocketApiError):
async with open_bitmex_websocket('testnet') as ws:
async with Pipeline.create(
ws.listen('instrument', 'PAROTCOIN')
) as pipeline, pipeline.tap() as aiter:
async for bundle in aiter:
break
|
"""
Sopan Kurkute
University of Saskatchewan
plotwrf.py
Python 2.x
Python script to plot various WRF model output. Plots are saved as PNG.
example usage: plotwrf.py --infile filename.nc --sfc --tunit C --ppn -punit mm --td
Will plot surface chart and dewpoint in Celcius and precipitation in mm.
Use plotwrf.py --help to list all options
Last modified: 05/05/16
Skew-T plotting with the pyMeteo package available at: https://github.com/cwebster2/pyMeteo
Credit to Casey Webster
Skew-t plotting with SHARPpy package available at: https://github.com/sharppy/SHARPpy
Credit to: Patrick Marsh (SPC), Kelton Halbert (OU School of Meteorology), Greg Blumberg (OU/CIMMS), Tim Supinie (OU School of Meteorology)
"""
import matplotlib
#matplotlib.use('Agg') # UNCOMMENT THIS ONLY WHEN INVOKING FROM CRON SCRIPT
from scipy.io import netcdf # USE SCIPY MODULE
#from netCDF4 import Dataset # UNCOMMENT TO USE NETCDF 4 MODULE
from mpl_toolkits.basemap import Basemap
from matplotlib import cm
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import datetime
from optparse import OptionParser
import os.path
import sys
import conversions as conv
import calc_vars as calc
import plot_funcs as pltfuncs
import funcs
import colormaps as cmap
# option parser
usage="usage: %prog [options] \n example usage: plotwrf.py --infile filename.nc --sfc --tunit C --td --ppn --punit mm"
parser = OptionParser(usage=usage, version="%prog 6.0 by Sopan Kurkute")
parser.add_option("--sfc", dest="sfc",action="store_true",help="Plot surface chart with 2m temp, wind barbs and MSLP")
parser.add_option("--t2", dest="t2", action="store_true", help="Plot 2m temp and wind barbs only")
parser.add_option("--mslp", dest="mslp", action="store_true", help="Plot MSLP only")
parser.add_option("--ppnaccum", dest="ppnaccum", action="store_true", help="Plot total accumulated precipitation")
parser.add_option("--ppn", dest="ppn", action="store_true", help="Plot total precipitation")
parser.add_option("--convppn", dest="convppn", action="store_true", help="Plot convective precipitation")
parser.add_option("--td", dest="td", action="store_true", help="Plot 2m dew point temperature")
parser.add_option("--rh", dest="rh", action="store_true", help="Plot relative humidity")
parser.add_option("--snow", dest="snow", action="store_true", help="Plot snow accumulation")
parser.add_option("--hail", dest="hail", action="store_true", help="Plot hail accumulaton")
parser.add_option("--simdbz", dest="simdbz", action="store_true", help="Plot simulated reflectivity")
parser.add_option("--compdbz", dest="compdbz", action="store_true", help="Plot composite reflectivity")
parser.add_option("--lcl", dest="lcl", action="store_true", help="Plot LCL (lifted condensation level)")
parser.add_option("--thetae", dest="thetae", action="store_true", help="Plot Theta-e (equivalent potential temperature)")
parser.add_option("--ua", dest="ua", action="store_true", help="Plot geopotential height, temperature and wind barbs at given pressure levels (hPa), --lvl")
parser.add_option("--lvl", dest="lvl", help="Pressure levels to interpolate to for upper level charts option --ua, --vv. Comma seperated e.g 250,500", default="500")
parser.add_option("--run", dest="run", type="string", help="Model initialisation time", default="00")
parser.add_option("--indir", dest="indir", type="string", help="Directory of the NetCDF file", default="")
parser.add_option("--outdir", dest="outdir", type="string", help="Directory to save plots too", default="")
parser.add_option("--infile", dest="infile", type="string", help="NetCDF filename", default="")
parser.add_option("--thin", dest="thin", type="int", help="Thinning factor for wind barbs", default=5)
parser.add_option("--tunit", dest="tunit", type="string", help="Unit of temperature (C or F)", default="C")
parser.add_option("--punit", dest="punit", type="string", help="Unit of precipitation (mm or inches)", default="mm")
parser.add_option("--save", dest="save", action="store_true", help="Save plots as png files")
parser.add_option("--v", dest="verbose", action="store_true", help="Enable verbose")
parser.add_option("--auto", dest="auto", action="store_true", help="Enable auto file input for daily WRF runs")
parser.add_option("--barbsize", dest="barbsize", type="int", help="Set the length of the wind barbs", default=7)
parser.add_option("--75lr", dest="lr75", action="store_true", help="Plot the H7-H5 lapse rates")
parser.add_option("--vort500", dest="vort500", action="store_true", help="Plot the 500mb absolute vorticity")
parser.add_option("--shear06", dest="shear06", action="store_true", help="Plot the 0-6km shear")
parser.add_option("--vv", dest="vv", action="store_true", help="Plot vertical velocity at specified levels --lvl")
parser.add_option("--irtemp", dest="irtemp", action="store_true", help="Plot IR Brightness Temperature")
parser.add_option("--skewt", dest="skewt", action="store_true", help="Plot Skew-t for a location. Uses pyMeteo package.")
parser.add_option("--slat", dest="slat", type="int", help="Latitude for Skew-t")
parser.add_option("--slon", dest="slon", type="int", help="Longitude for Skew-t")
parser.add_option("--getij", dest="getij", action="store_true", help="Get i,j and nearest Lat/Lon for entered Lat/Lon")
parser.add_option("--skewt2", dest="skewt2", action="store_true", help="Plot Skew-t for a location using SHARPpy")
parser.add_option("--uh25", dest="uh25", action="store_true", help="Plot 2-5km Updraft Helicity")
(opt, arg) = parser.parse_args()
indir = opt.indir # dir of input file
filein = opt.infile
if opt.auto: # for auto file input for daily runs
run = opt.run # model init time
filein = 'wrfout_d01_'+datetime.datetime.utcnow().strftime('%Y-%m-%d')+'_'+run+':00:00' # auto filename for current days run
while os.path.isfile(indir+filein) is False and not opt.auto: #if file doesnt exist get filename
print "File", filein, "not found! in directory:", indir
indir = raw_input("Please enter a directory (blank for current dir): ")
filein = raw_input("Please enter a filename: ")
try: #check if file exists and read in
print "Reading in file: ", indir+filein
#nc = Dataset(indir+filein) # for netcdf 4
nc = netcdf.netcdf_file(indir+filein,'r') # for scipy
except: # quit if cant read file
print "Something went wrong reading in the file"
print "QUITTING"
sys.exit()
outdir = opt.outdir # output image dir
## BASEMAP STUFF
#thin factor for wind barbs
thin = opt.thin
#get lats and lons for map projection
cen_lat = float(nc.CEN_LAT)
cen_lon = float(nc.CEN_LON)
truelat1 = float(nc.TRUELAT1)
truelat2 = float(nc.TRUELAT2)
standlon = float(nc.STAND_LON)
xlat = nc.variables['XLAT']
xlong = nc.variables['XLONG']
map_proj = int(nc.MAP_PROJ)
# dimensions of domain
x_dim = len(xlat[0,0,:])
y_dim = len(xlong[0,:,0])
# Get dx and dy. Grid size
dx = float(nc.DX)
dy = float(nc.DY)
#calculate plot width and height from grid size * dimension. Domain size
width_meters = dx * (x_dim - 1)
height_meters = dy * (y_dim - 1)
# Define gridlines
parallels = np.arange(-90,90,10)
meridians = np.arange(0,360,10)
# find projection and create map. Only LCC tested.
if map_proj == 1: #lambert conformal.
proj = 'lcc'
projname = 'Lambert Conformal'
elif map_proj == 2: # polar stereographic
proj = 'npstere'
projname = 'Polar Stereographic'
elif map_proj == 3: # mercator
proj = 'merc'
projname = 'Mercator'
else: # not supported and quit
print "Projection ", map_proj, "unknown"
print "QUITTING"
sys.exit()
# make map
m = Basemap(resolution='i',projection=proj,width=width_meters,height=height_meters,lat_0=cen_lat,lon_0=cen_lon,lat_1=truelat1,lat_2=truelat2)
#m = Basemap(resolution='i',projection=proj,llcrnrlon=xlong[0,0,0],llcrnrlat=xlat[0,0,0],urcrnrlon=xlong[0,-1,-1],urcrnrlat=xlat[0,-1,-1],lat_0=cen_lat,lon_0=cen_lon)
#x, y = m(xlong[0,:,:],xlat[0,:,:])
# get lat/lons of ny by nx evenly space grid
# make lons, lats and x, y co ordinates
lons, lats = m.makegrid(x_dim, y_dim)
x, y = m(lons, lats) # compute map proj coordinates.
print "Using map projection: ", projname
## GET THIS DATA FOR NOW
times = nc.variables['Times'] #each time output in wrf nc file
t2 = nc.variables['T2'] #temp at 2m / Kelvin
u10 = nc.variables['U10'] #u10 wind / ms/s
v10 = nc.variables['V10'] #v10 wind / ms/s
psfc = nc.variables['PSFC'] #surface pressure / Pascals
rainc = nc.variables['RAINC'] # accumulated total cumulus precip
rainnc = nc.variables['RAINNC'] # accumulated total grid scale precip
thgt = nc.variables['HGT'] #terrain height
# general info
init = str(''.join(times[0])).replace('_',' ') # model init time
alltimes = [] #list to hold all times
### BEGIN PLOT FUNCTIONS ###
# savefile and makeplot and the functions for putting data on maps may stay here for now #
def savefile(filename): #save plot image as png
print "Saving file: ", filename
#print filename
plt.savefig(outdir+filename)
def makeplot(data,title,cblabel,clevs,cbticks,ftitle): # function to make plots
fig = plt.gcf() #get current fig
ax = plt.gca() #get current axis
#ax = fig.add_axes([0.1,0.1,0.8,0.8])
# draw parallels and meridians
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
# draw coastlines, state and country boundaries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# set plot title
#ax.set_title(title+currtime)
ax.text(0,1.01*height_meters,title+'\nValid:'+currtime,fontsize=14)
ax.text(0.65*width_meters,1.01*height_meters,'Init: '+init, fontsize=12)
#fig.suptitle('Init: '+init+'', fontsize=12) #init title
if clevs is False:
# No color bar
pass
else: #create color bar
cbar = m.colorbar(data,location='bottom',pad="5%")
cbar.set_label(cblabel)
if cbticks:
cbar.set_ticks(clevs)
cbar.ax.tick_params(labelsize=8)
if opt.save:
#create filename for image and save file
filename = ftitle+filetime+'.png'
#filename = ftitle+str(time)+'.png' #save file with number instead of date and time
savefile(filename) #save image file
else:
plt.show()
def t2wind(): # plot t2 and wind barbs
# create figure
plt.figure(figsize=(8,8))
temps = t2[time] # temps in K
if opt.tunit == 'F':
t2f = conv.k_to_f(temps) # convert to F
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
elif opt.tunit == 'C':
t2c = conv.k_to_c(temps) # convert to C
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temperature and Wind Barbs (kts)"
ftitle = "t2-wind-"
if opt.tunit == 'C':
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
cblabel = r'$\degree$F'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def mslponly(): # plot MSLP only
#create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) #smooth wiggles
#find local min and local max
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
clevs = np.arange(900,1055,2.)
cs = m.contour(x,y,mslp,clevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold', ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
title = "MSLP (hPa)"
ftitle = 'mslp-'
cblabel = ''
clevs = False # no color bar levels
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precipaccum(): # plot total precip accumulation
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] #ppn / mm
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
ppn = conv.mm_to_in(ppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,ppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation Accumulation"
ftitle = 'ppnaccum-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precip(): # plot current precip at each time
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] # total ppn / mm
currppn = np.array(ppn.shape)
if time == 0: # initial amount
currppn = ppn
else: # current amount
prev = rainc[time-1]+rainnc[time-1]
currppn = ppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation"
ftitle = 'ppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def convprecip(): # plot current convective precip at each time
# create figure
plt.figure(figsize=(8,8))
convppn = rainc[time] #ppn / mm
currppn = np.array(convppn.shape)
if time == 0:
currppn = convppn
else:
prev = rainc[time-1]
currppn = convppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Convective Precipitation"
ftitle = 'convppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def tdrh(): # plot td and rh
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time]) # pres to hPa
es = calc.calc_es(t2c[time]) # calc es
ws = calc.calc_ws(es, psfchpa) # calc ws
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
if opt.rh:
rh = calc.calc_rh(q2, ws) #calc rh
clevs = np.arange(0,105,5)
cs = m.contourf(x,y,rh,clevs,cmap=cm.get_cmap('jet')) #plot RH
cblabel='RH \ %'
title = "Relative Humidity \n Valid: "
ftitle = 'rh-'
cbticks = True
elif opt.td:
rh = calc.calc_rh(q2, ws) # calc rh
td = calc.calc_dewpoint(es, rh) # calc td (deg C)
title = "2m Dew Point"
ftitle = 'td-'
if opt.tunit == 'C':
clevs = np.arange(-30,65,5) # levels / degC
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
clevs = np.arange(-20,125,5) # levels / degF
td = conv.c_to_f(td) #convert celcius to fahrenheit
cblabel = r'$\degree$F'
cs = m.contourf(x,y,td,clevs,cmap=cm.get_cmap('gist_ncar')) #plot Td
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
cbticks=True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def upperair(): # plot upper air chart for given level. geopotential height, wind bards and temp
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = nc.variables['U'][time] # U wind component
V = nc.variables['V'][time] # V wind component
Unew = funcs.unstagger(U,'U') # unstagger u
Vnew = funcs.unstagger(V,'V') # unstagger v
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT = conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temps in C
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
#interp data for level
gphgt = funcs.linear_interp(totalgp,totalp,level)
totalTfinal = funcs.linear_interp(totalT,totalp,level)
uinterp = funcs.linear_interp(Unew,totalp,level)
vinterp = funcs.linear_interp(Vnew,totalp,level)
Ufinal = conv.ms_to_kts(uinterp) #convert to kts
Vfinal = conv.ms_to_kts(vinterp)
#speed = calc.calc_wspeed(Ufinal, Vfinal)
gphgt = conv.gphgt_to_hgt(gphgt) # convert to height (m)
gphgt = gaussian_filter(gphgt, sigma=3) # smooth wiggles
totalTfinal = gaussian_filter(totalTfinal, sigma=2)
# set gpheight levels for common pressure levels
if level == 250:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 500:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 700:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 850:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 925:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
else: # use generic 30m spacing
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
#plot all this up
cs = m.contour(x,y,gphgt,gpclevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
tclevs = np.arange(np.min(totalTfinal),np.max(totalTfinal),4)
cs2 = m.contour(x,y,totalTfinal,tclevs,colors='r',linestyles='-',linewidths=2.)
plt.clabel(cs2,inline=True,fmt='%1.0f',fontsize=12,colors='r')
m.barbs(x[::thin,::thin], y[::thin,::thin], Ufinal[::thin,::thin], Vfinal[::thin,::thin],length=opt.barbsize) #plot barbs
level = str(level)
title = level+'mb Height (m), Temp (C), Wind Barbs (kts)'
ftitle = level+'mb-'
cblabel = 'kts'
clevs = False
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def surface(): # plot surface chart. t2, wind barbs and mslp
# create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
if opt.tunit == 'F':
t2f = conv.c_to_f(t2c) #convert celcius to fahrenheit
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$F'
elif opt.tunit == 'C':
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$C'
cbticks = True
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) # smooth wiggles
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temp, Wind Barbs (kts), MSLP (hPa)"
ftitle = 'sfc-'
pclevs = np.arange(900,1055,2.)
pcs = m.contour(x,y,mslp,pclevs,colors='k',linewidths=2.)
plt.clabel(pcs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold',
ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def snowaccum(): # plot snow accumulation
# create figure
plt.figure(figsize=(8,8))
snow = nc.variables['SNOWNC'][time] # total accumulated grid scale snow and ice / mm at each time
if opt.punit == 'mm':
clevs = [0,0.5,1,2.5,3,4,5,8,10,15,20,30,40,50,80,100,150,200,250,500]
cblabel = 'mm'
elif opt.punit == 'in':
snow = conv.mm_to_in(snow) # convert to inches
clevs = [0.25,0.5,0.75,1,1.5,2,2.5,3,4,5,6,8,10,12,14,16,18,20,22,24]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 19) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,snow,clevs,norm=norm,cmap=cmap.snow_colormap)
title = "Snow Accumulation"
ftitle = 'snow-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def hailaccum(): # plot hail accumulation
# create figure
plt.figure(figsize=(8,8))
hail = nc.variables['HAILNC'][time] # accimulated total grid scale hail / mm at each time
if opt.punit == 'mm':
clevs = [0.5,1.,1.5,2.,2.5,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.]
cblabel = 'mm'
elif opt.punit == 'in':
hail = conv.mm_to_in(hail) # convert to inches
clevs = [0.01,0.02,0.04,0.06,0.08,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 14) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,hail,clevs,norm=norm,cmap=cmap.hail_colormap)
title = "Hail Accumulation"
ftitle = 'hail-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def simudbz(): # plot simulated reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
qrain = nc.variables['QRAIN'] # rain water mixing ratio
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
Qrain = qrain[time,1] # rain mixing ratio
Qrain = np.nan_to_num(Qrain) # change NaN to zeroes, changge infs to nums
try: #depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(qrain)) # else create zeros array same shape as qrain
Qsnow = Qsn[time,1] # snow mixing ratio
Qsnow = np.nan_to_num(Qsnow) # change NaN to zeros
dBZ = calc.calc_dbz(t2c, rhoa, Qrain, Qsnow)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Simulated Reflectivity"
ftitle = 'simdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def compodbz(): # plot composite reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
try: #get refl from do_radar_ref=1
refl = nc.variables['REFL_10CM'][time]
dBZ = np.zeros(refl[0,0].shape)
dBZ = np.max(refl, axis=0)
#for i in range(len(refl[1,:,1])):
# for j in range(len(refl[1,1,:])):
# dBZ[i,j]=np.max(refl[:,i,j])
except: # calculate reflectivity
Qrainall = nc.variables['QRAIN'][time] # rain water mixing ratio at all levels
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
try: # depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(Qrainall)) # else create zeros array same shape as qrain
Qsnowall = Qsn[time] # get all Qsnow values at all levels for each time
Qrainmax = np.max(Qrainall, axis=0) #max rain QV
Qsnowmax = np.max(Qsnowall, axis=0) #max snow QV
dBZ = calc.calc_dbz(t2c, rhoa, Qrainmax, Qsnowmax)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Composite Reflectivity"
ftitle = 'compdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def lclhgt(): # plot lcl height
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time])
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
rh = calc.calc_rh(q2, ws)
td = calc.calc_dewpoint(es, rh)
lcl = calc.calc_lcl(t2c, td)
clevs = np.arange(0,6000,500)
cs = m.contourf(x,y,lcl,clevs,cmap=cmap.lcl_colormap)
title = "LCL Height"
ftitle = 'lcl-'
cblabel = 'm'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def thetaE(): # plot theta-e
# create figure
plt.figure(figsize=(8,8))
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
theta = theta[0] + theta0 # total theta
psfchpa = conv.pa_to_hpa(psfc[time])
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
thetae = calc.calc_thetae(theta, t2[time], ws)
clevs = np.arange(260,372,4) # set by max and min of data
cs = m.contourf(x,y,thetae,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "Theta-e"
ftitle = 'thetae-'
cblabel = 'K'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def h75lr(): # 700-500mb lapse rates
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT= conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temp in deg C
# interp temps to levels
totalT700 = funcs.linear_interp(totalT,totalp,700)
totalT500 = funcs.linear_interp(totalT,totalp,500)
# calc h7-h5 lapse rates
lr = totalT700 - totalT500
clevs = np.arange(5,10.5,.5) # conditionally unstable levels
cs = m.contourf(x,y,lr,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "H7-H5 Lapse Rates"
ftitle = 'h75lr-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def absvort500(): # plot 500mb absolute vorticity
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
fcoriolis = calc.calc_fcoriolis(xlat[0])
uinterp = funcs.linear_interp(U,totalp,500) #interp to 500mb
vinterp = funcs.linear_interp(V,totalp,500)
vertvort = calc.calc_vertvort(uinterp, vinterp, dx)
avort = vertvort + fcoriolis # absolute vorticity
avort = np.multiply(avort, 1e5) # scale up for levels
clevs = np.arange(-6, 52, 2)
cs = m.contourf(x,y,avort,clevs,cmap=cm.get_cmap('gist_ncar'))
title = '500mb Absolute Vorticity'
ftitle = '500absvort-'
cblabel = r'$10^{-5} s^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def shr06(): # plot the 0-6km shear vector
# create figure
plt.figure(figsize=(8,8))
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
u10kts = conv.ms_to_kts(u10[time]) # sfc wind in kts
v10kts = conv.ms_to_kts(v10[time])
u6 = funcs.interp_generic(6000, (totalgp/9.81), U) # interp to 6km
v6 = funcs.interp_generic(6000, (totalgp/9.81), V)
u6kts = conv.ms_to_kts(u6) # convert 6km wind to kts
v6kts = conv.ms_to_kts(v6)
#using 10m wind as sfc wind
ushr = u6kts - u10kts # calc 0-6 shr in kts
vshr = v6kts - v10kts
speed = calc.calc_wspeed(ushr, vshr)
# plot data
clevs = np.arange(20,145,5)
cs = m.contourf(x, y, speed, clevs, cmap=cm.get_cmap('gist_ncar'))
m.barbs(x[::thin,::thin], y[::thin,::thin], ushr[::thin,::thin], vshr[::thin,::thin],length=opt.barbsize) #plot barbs
title = '0-6km Shear'
ftitle = 'shr06-'
cblabel = 'kts'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def vertvol(): # plot the vertical velocity at levels. NEEDS CORRECTING TO VERTICAL MOTION OMEGA EQUATION
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
Wfinal = funcs.linear_interp(W,totalp,level) # interpolate W to levels
clevs = np.arange(-2.0,2.2,0.2)
cs = m.contourf(x,y,Wfinal,clevs,cmap=cm.get_cmap('gist_ncar'))
level = str(level)
title = level+'mb Vertical Velocity'
ftitle = level+'mbvv-'
cblabel = r'$ms^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def olr_to_temp(): # convert OLR to IR temp
plt.figure(figsize=(8,8))
olr = nc.variables['OLR'][time]
olrtemp = np.power(olr / 5.67e-8, 0.25) - 273.15 # calc temp using Stefan-Boltzman law and convert to deg C
clevs = np.arange(-80, 36 ,4)
cs = m.contourf(x,y,olrtemp,clevs,cmap=cmap.irsat_colormap)
title = 'IR Brightness Temp'
ftitle = 'irtemp-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def pymeteo_skewt(): # uses pyMeteo package (https://github.com/cwebster2/pyMeteo) to plot skew-t for lat/lon. Credit Casey Webster
import pymeteo.skewt as skewt
try:
skewt.plot_wrf(filein,opt.slat,opt.slon,time,'skewt'+str(time)+'.png')
except:
print "LAT/LON NOT IN DOMAIN. QUITTING"
sys.exit()
def plot_skewt(): # plot skew-t by writing data to file and use SHARPpy available at: https://github.com/sharppy/SHARPpy
i, j = funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
inlat = xlat[0,i,j]
inlon = xlong[0,i,j]
pb = nc.variables['PB'][time,:,i,j] #base state pressure, Pa
p = nc.variables['P'][time,:,i,j] # perturbation pressure, Pa
totalp = p + pb # total pressure
ph = nc.variables['PH'][time,:,i,j] #perturbation geopotential
phb = nc.variables['PHB'][time,:,i,j] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = nc.variables['U'][time,:,i,j] # U wind component
V = nc.variables['V'][time,:,i,j] # V wind component
theta = nc.variables['T'][time,:,i,j] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totaltheta = theta+theta0 # total potential temp
qvapor = nc.variables['QVAPOR'][time,:,i,j] #water vapor mixing ratio kg/kg
#need to calc these variables for skewt
level = conv.pa_to_hpa(totalp) # levels in hPa
height = conv.gphgt_to_hgt(totalgp) # heights in m
temps = calc.theta_to_temp(totaltheta, totalp) # temps in degK
tempc = conv.k_to_c(temps) # temps in degC
es = calc.calc_es(tempc) # calc es
ws = calc.calc_ws(es, level) # calc ws
rh = calc.calc_rh(qvapor, ws) # calc rh
dwpt = calc.calc_dewpoint(es, rh) # calc dewpoint in degC
winddir = calc.calc_wdir(U, V) # calc wind dir
wspd = conv.ms_to_kts(calc.calc_wspeed(U, V)) # calc wind spd
skewt_data = funcs.skewt_data(timestamp, level, height, tempc, dwpt, winddir, wspd, inlat, inlon) # write the data to SPC file format
pltfuncs.do_sharppy(skewt_data) # use SHARPpy to plot skew-t
def updraft_hel(): # plot the 2-5km updraft helicity
plt.figure(figsize=(8,8))
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
heights = totalgp / 9.81
levels = 6 # no of levels in between bottom and top of a layer (add extra one to get to very top of layer)
depth = 1000 # depth of layer
dz = depth / (levels-1) # increment / m
#create arrays to hold all the values at each level
u2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v2km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v3km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v4km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
#u5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#v5km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
w2km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w3km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w4km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
#w5km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
zeta2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#zeta5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
for i in range(0,levels): # loop through to interpolate to levels and store in array
print "Interpolating...doing loop ", i, "of ", (levels-1)
increment = i*dz
u2km[i] = funcs.interp_generic(2000+increment, heights, U)
v2km[i] = funcs.interp_generic(2000+increment, heights, V)
u3km[i] = funcs.interp_generic(3000+increment, heights, U)
v3km[i] = funcs.interp_generic(3000+increment, heights, V)
u4km[i] = funcs.interp_generic(4000+increment, heights, U)
v4km[i] = funcs.interp_generic(4000+increment, heights, V)
#u5km[i] = funcs.interp_generic(5000+increment, heights, U)
#v5km[i] = funcs.interp_generic(5000+increment, heights, V)
w2km[i] = funcs.interp_generic(2000+increment, heights, W)
w3km[i] = funcs.interp_generic(3000+increment, heights, W)
w4km[i] = funcs.interp_generic(4000+increment, heights, W)
#w5km[i] = funcs.interp_generic(2000+increment, heights, W)
zeta2km[i] = calc.calc_vertvort(u2km[i], v2km[i], dx)
zeta3km[i] = calc.calc_vertvort(u3km[i], v3km[i], dx)
zeta4km[i] = calc.calc_vertvort(u4km[i], v4km[i], dx)
#zeta5km[i] = calc.calc_vertvort(u5km[i], v5km[i], dx)
# calc the layer mean
w2to3 = np.mean(w2km, axis=0)
w3to4 = np.mean(w3km, axis=0)
w4to5 = np.mean(w4km, axis=0)
zeta2to3 = np.mean(zeta2km, axis=0)
zeta3to4 = np.mean(zeta3km, axis=0)
zeta4to5 = np.mean(zeta4km, axis=0)
# calc the 2-5km UH
UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
#u2km = funcs.interp_generic(2000, heights, U)
#v2km = funcs.interp_generic(2000, heights, V)
#u3km = funcs.interp_generic(3000, heights, U)
#v3km = funcs.interp_generic(3000, heights, V)
#u4km = funcs.interp_generic(4000, heights, U)
#v4km = funcs.interp_generic(4000, heights, V)
#u5km = funcs.interp_generic(5000, heights, U)
#v5km = funcs.interp_generic(5000, heights, V)
#w2km = funcs.interp_generic(2000, heights, W)
#w3km = funcs.interp_generic(2000, heights, W)
#w4km = funcs.interp_generic(2000, heights, W)
#w5km = funcs.interp_generic(2000, heights, W)
#w2to3 = 0.5 * ( w2km + w3km )
#w3to4 = 0.5 * ( w3km + w4km )
#w4to5 = 0.5 * ( w4km + w5km )
#zeta2km = calc.calc_vertvort(u2km, v2km, dx)
#zeta3km = calc.calc_vertvort(u3km, v3km, dx)
#zeta4km = calc.calc_vertvort(u4km, v4km, dx)
#zeta5km = calc.calc_vertvort(u5km, v5km, dx)
#zeta2to3 = 0.5 * ( zeta2km + zeta3km )
#zeta3to4 = 0.5 * ( zeta3km + zeta4km )
#zeta4to5 = 0.5 * ( zeta4km + zeta5km )
#UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
clevs = np.arange(0,210,10)
cs = m.contourf(x,y,UH,clevs,cmap=cmap.uh_colormap)
title = '2-5km Updraft Helicity'
ftitle = 'uh-'
cblabel = r'$m^{2}s^{-2}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
### END PLOT FUNCTIONS ###
flag = False # to check for plotting options
#### BEGIN TIME LOOP ####
for time in range(times.shape[0]):
currtime = str(''.join(times[time])).replace('_', ' ') #get current model time
filetime = currtime.translate(None, ':').replace(' ', '_') # time for filename
alltimes.append(currtime) # all times in output
timestamp = currtime[8:10]+currtime[5:7]+currtime[2:4]+'/'+currtime[11:13]+currtime[14:16]
if opt.t2: #plot 2m temp and wind barbs
print "Plotting Temperature and Wind Barbs for time: ", currtime
t2wind()
flag = True
if opt.mslp: #plot surface pressure only
print "Plotting MSLP for time: ", currtime
mslponly()
flag = True
if opt.ppnaccum: #plot total precipitation
print "Plotting Precipitation Accumulation for time: ", currtime
precipaccum()
flag = True
if opt.ppn: # plot current ppn
print "Plotting Precipitation for time: ", currtime
precip()
flag = True
if opt.convppn: # plot convective ppn
print "Plotting Convective Precipitation for time: ", currtime
convprecip()
flag = True
if opt.td or opt.rh: #plot dew point or RH
flag = True
if opt.td:
print "Plotting Dew Point for time: ", currtime
elif opt.rh:
print "Plotting RH for time: ", currtime
tdrh()
if opt.ua: #plot upper air charts
print "Plotting upper level chart for time: ", currtime
upperair()
flag = True
if opt.sfc: #plot surface chart. t2, wind and mslp
print "Plotting Surface Chart for time: ", currtime
surface()
flag = True
if opt.snow: #plot snow accumulation
print "Plotting Snow Accumulation for time: ", currtime
snowaccum()
flag = True
if opt.hail: #plot hail accumulation
print "Plotting Hail Accumulation for time: ", currtime
hailaccum()
flag = True
if opt.simdbz: #simulated reflectivity
print "Plotting Simulated Reflectivity for time: ", currtime
simudbz()
flag = True
if opt.compdbz: #composite reflectivity
print "Plotting Composite Reflectivity for time: ", currtime
compodbz()
flag = True
if opt.lcl: #plot LCL
print "Plotting LCL for time: ", currtime
lclhgt()
flag = True
if opt.thetae: #plot theta-e
print "Plotting Theta-e for time: ", currtime
thetaE()
flag= True
if opt.lr75: #plot h7-h5 lapse rates
print "Plotting H7-H5 lapse rates for time: ", currtime
h75lr()
flag = True
if opt.vort500: # plot 500mb absolute vorticity
print "Plotting 500mb absolute vorticity for time: ", currtime
absvort500()
flag = True
if opt.shear06:
print "Plotting 0-6km Shear for time: ", currtime
shr06()
flag = True
if opt.vv:
print "Plotting vertical velocity for time: ", currtime
vertvol()
flag = True
if opt.irtemp:
print "Plotting IR Brightness Temp for time: ", currtime
olr_to_temp()
flag = True
if opt.skewt:
print "Plotting Skew-t for time: ", currtime
pymeteo_skewt()
flag = True
if opt.getij:
print "Getting i, j for lat=",opt.slat, ', lon=',opt.slon
funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
#print "A less accurate method:"
#funcs.latlon_ij2(opt.slat, opt.slon, xlat, xlong)
flag = True
sys.exit()
if opt.skewt2:
print "Plotting Skew-t for time: ", currtime
plot_skewt()
flag = True
if opt.uh25:
print "Plotting 2-5km Updraft Helicity for time: ", currtime
updraft_hel()
flag = True
if flag is False: # do this when no options given
print "Please provide options to plot. Use plotwrf.py --help"
print "QUITTING"
sys.exit()
#pass
#### END TIME LOOP ####
if opt.verbose: #verbose output
print "\n*VERBOSE OUTPUT*"
print "\nindir= ", indir
print "infile= ", filein
print "outdir=", outdir
print "Model initialisation time: ", init
print "Timestep: ", nc.variables['ITIMESTEP'][1]
print "Times in file: ", alltimes
print "west_east: ", x_dim
print "south_north: ", y_dim
print "Model dimentions (metres): ", width_meters, height_meters
print "dx, dy: ", dx, dy
print "Center lat: ", cen_lat
print "Center lon: ", cen_lon
print "Model top: ", nc.variables['P_TOP'][0]
print "Map projection: ", proj, '-' , projname
nc.close() # close netcdf file
|
# Copyright 2017 Hugh Salimbeni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import gpflow
from gpflow.params import Parameter, Parameterized
from gpflow.conditionals import conditional, Kuu
from gpflow.features import InducingPoints
from gpflow.kullback_leiblers import gauss_kl
from gpflow.priors import Gaussian as Gaussian_prior
from gpflow import transforms
from gpflow import settings
from gpflow.models.gplvm import BayesianGPLVM
from gpflow.expectations import expectation
from gpflow.probability_distributions import DiagonalGaussian
from gpflow.logdensities import multivariate_normal
from gpflow import conditionals
from doubly_stochastic_dgp.utils import reparameterize
class Layer(Parameterized):
def __init__(self, input_prop_dim=None, **kwargs):
"""
A base class for GP layers. Basic functionality for multisample conditional, and input propagation
:param input_prop_dim: the first dimensions of X to propagate. If None (or zero) then no input prop
:param kwargs:
"""
Parameterized.__init__(self, **kwargs)
self.input_prop_dim = input_prop_dim
def conditional_ND(self, X, full_cov=False):
raise NotImplementedError
def KL(self):
return tf.cast(0., dtype=settings.float_type)
def conditional_SND(self, X, full_cov=False):
"""
A multisample conditional, where X is shape (S,N,D_out), independent over samples S
if full_cov is True
mean is (S,N,D_out), var is (S,N,N,D_out)
if full_cov is False
mean and var are both (S,N,D_out)
:param X: The input locations (S,N,D_in)
:param full_cov: Whether to calculate full covariance or just diagonal
:return: mean (S,N,D_out), var (S,N,D_out or S,N,N,D_out)
"""
if full_cov is True:
f = lambda a: self.conditional_ND(a, full_cov=full_cov)
mean, var = tf.map_fn(f, X, dtype=(settings.float_type, settings.float_type))
return tf.stack(mean), tf.stack(var)
else:
X_shape = tf.shape(X)
S, N, D = X_shape[0], X_shape[1], X_shape[2]
X_flat = tf.reshape(X, [S * N, D])
mean, var = self.conditional_ND(X_flat)
return [tf.reshape(m, [S, N, self.num_outputs]) for m in [mean, var]]
def sample_from_conditional(self, X, z=None, full_cov=False):
"""
Calculates self.conditional and also draws a sample, adding input propagation if necessary
If z=None then the tensorflow random_normal function is used to generate the
N(0, 1) samples, otherwise z are used for the whitened sample points
:param X: Input locations (S,N,D_in)
:param full_cov: Whether to compute correlations between outputs
:param z: None, or the sampled points in whitened representation
:return: mean (S,N,D), var (S,N,N,D or S,N,D), samples (S,N,D)
"""
mean, var = self.conditional_SND(X, full_cov=full_cov)
# set shapes
S = tf.shape(X)[0]
N = tf.shape(X)[1]
D = self.num_outputs
mean = tf.reshape(mean, (S, N, D))
if full_cov:
var = tf.reshape(var, (S, N, N, D))
else:
var = tf.reshape(var, (S, N, D))
if z is None:
z = tf.random_normal(tf.shape(mean), dtype=settings.float_type)
samples = reparameterize(mean, var, z, full_cov=full_cov)
if self.input_prop_dim:
shape = [tf.shape(X)[0], tf.shape(X)[1], self.input_prop_dim]
X_prop = tf.reshape(X[:, :, :self.input_prop_dim], shape)
samples = tf.concat([X_prop, samples], 2)
mean = tf.concat([X_prop, mean], 2)
if full_cov:
shape = (tf.shape(X)[0], tf.shape(X)[1], tf.shape(X)[1], tf.shape(var)[3])
zeros = tf.zeros(shape, dtype=settings.float_type)
var = tf.concat([zeros, var], 3)
else:
var = tf.concat([tf.zeros_like(X_prop), var], 2)
return samples, mean, var
class SVGP_Layer(Layer):
def __init__(self, kern, num_outputs, mean_function,
Z=None,
feature=None,
white=False, input_prop_dim=None,
q_mu=None,
q_sqrt=None, **kwargs):
"""
A sparse variational GP layer in whitened representation. This layer holds the kernel,
variational parameters, inducing points and mean function.
The underlying model at inputs X is
f = Lv + mean_function(X), where v \sim N(0, I) and LL^T = kern.K(X)
The variational distribution over the inducing points is
q(v) = N(q_mu, q_sqrt q_sqrt^T)
The layer holds D_out independent GPs with the same kernel and inducing points.
:param kern: The kernel for the layer (input_dim = D_in)
:param Z: Inducing points (M, D_in)
:param num_outputs: The number of GP outputs (q_mu is shape (M, num_outputs))
:param mean_function: The mean function
:return:
"""
Layer.__init__(self, input_prop_dim, **kwargs)
if feature is None:
feature = InducingPoints(Z)
self.num_inducing = len(feature)
self.feature = feature
self.kern = kern
self.mean_function = mean_function
self.num_outputs = num_outputs
self.white = white
if q_mu is None:
q_mu = np.zeros((self.num_inducing, num_outputs), dtype=settings.float_type)
self.q_mu = Parameter(q_mu)
if q_sqrt is None:
if not self.white: # initialize to prior
with gpflow.params_as_tensors_for(feature):
Ku = conditionals.Kuu(feature, self.kern, jitter=settings.jitter)
Lu = tf.linalg.cholesky(Ku)
Lu = self.enquire_session().run(Lu)
q_sqrt = np.tile(Lu[None, :, :], [num_outputs, 1, 1])
else:
q_sqrt = np.tile(np.eye(self.num_inducing, dtype=settings.float_type)[None, :, :], [num_outputs, 1, 1])
transform = transforms.LowerTriangular(self.num_inducing, num_matrices=num_outputs)
self.q_sqrt = Parameter(q_sqrt, transform=transform)
self.needs_build_cholesky = True
def build_cholesky_if_needed(self):
# make sure we only compute this once
if self.needs_build_cholesky:
self.Ku = conditionals.Kuu(self.feature, self.kern, jitter=settings.jitter)
self.Lu = tf.cholesky(self.Ku)
self.Ku_tiled = tf.tile(self.Ku[None, :, :], [self.num_outputs, 1, 1])
self.Lu_tiled = tf.tile(self.Lu[None, :, :], [self.num_outputs, 1, 1])
self.needs_build_cholesky = False
def conditional_ND(self, X, full_cov=False):
self.build_cholesky_if_needed()
Kuf = conditionals.Kuf(self.feature, self.kern, X)
A = tf.matrix_triangular_solve(self.Lu, Kuf, lower=True)
if not self.white:
A = tf.matrix_triangular_solve(tf.transpose(self.Lu), A, lower=False)
mean = tf.matmul(A, self.q_mu, transpose_a=True)
A_tiled = tf.tile(A[None, :, :], [self.num_outputs, 1, 1])
I = tf.eye(self.num_inducing, dtype=settings.float_type)[None, :, :]
if self.white:
SK = -I
else:
SK = -self.Ku_tiled
if self.q_sqrt is not None:
SK += tf.matmul(self.q_sqrt, self.q_sqrt, transpose_b=True)
B = tf.matmul(SK, A_tiled)
if full_cov:
# (num_latent, num_X, num_X)
delta_cov = tf.matmul(A_tiled, B, transpose_a=True)
Kff = self.kern.K(X)
else:
# (num_latent, num_X)
delta_cov = tf.reduce_sum(A_tiled * B, 1)
Kff = self.kern.Kdiag(X)
# either (1, num_X) + (num_latent, num_X) or (1, num_X, num_X) + (num_latent, num_X, num_X)
var = tf.expand_dims(Kff, 0) + delta_cov
var = tf.transpose(var)
return mean + self.mean_function(X), var
def KL(self):
"""
The KL divergence from the variational distribution to the prior
:return: KL divergence from N(q_mu, q_sqrt) to N(0, I), independently for each GP
"""
# if self.white:
# return gauss_kl(self.q_mu, self.q_sqrt)
# else:
# return gauss_kl(self.q_mu, self.q_sqrt, self.Ku)
self.build_cholesky_if_needed()
KL = -0.5 * self.num_outputs * self.num_inducing
KL -= 0.5 * tf.reduce_sum(tf.log(tf.matrix_diag_part(self.q_sqrt) ** 2))
if not self.white:
KL += tf.reduce_sum(tf.log(tf.matrix_diag_part(self.Lu))) * self.num_outputs
KL += 0.5 * tf.reduce_sum(tf.square(tf.matrix_triangular_solve(self.Lu_tiled, self.q_sqrt, lower=True)))
Kinv_m = tf.cholesky_solve(self.Lu, self.q_mu)
KL += 0.5 * tf.reduce_sum(self.q_mu * Kinv_m)
else:
KL += 0.5 * tf.reduce_sum(tf.square(self.q_sqrt))
KL += 0.5 * tf.reduce_sum(self.q_mu**2)
return KL
class SGPMC_Layer(SVGP_Layer):
def __init__(self, *args, **kwargs):
"""
A sparse layer for sampling over the inducing point values
"""
SVGP_Layer.__init__(self, *args, **kwargs)
self.q_mu.prior = Gaussian_prior(0., 1.)
del self.q_sqrt
self.q_sqrt = None
def KL(self):
return tf.cast(0., dtype=settings.float_type)
class GPMC_Layer(Layer):
def __init__(self, kern, X, num_outputs, mean_function, input_prop_dim=None, **kwargs):
"""
A dense layer with fixed inputs. NB X does not change here, and must be the inputs. Minibatches not possible
"""
Layer.__init__(self, input_prop_dim, **kwargs)
self.num_data = X.shape[0]
q_mu = np.zeros((self.num_data, num_outputs))
self.q_mu = Parameter(q_mu)
self.q_mu.prior = Gaussian_prior(0., 1.)
self.kern = kern
self.mean_function = mean_function
self.num_outputs = num_outputs
Ku = self.kern.compute_K_symm(X) + np.eye(self.num_data) * settings.jitter
self.Lu = tf.constant(np.linalg.cholesky(Ku))
self.X = tf.constant(X)
def build_latents(self):
f = tf.matmul(self.Lu, self.q_mu)
f += self.mean_function(self.X)
if self.input_prop_dim:
f = tf.concat([self.X[:, :self.input_prop_dim], f], 1)
return f
def conditional_ND(self, Xnew, full_cov=False):
mu, var = conditional(Xnew, self.X, self.kern, self.q_mu,
full_cov=full_cov,
q_sqrt=None, white=True)
return mu + self.mean_function(Xnew), var
class Collapsed_Layer(Layer):
"""
Extra functions for a collapsed layer
"""
def set_data(self, X_mean, X_var, Y, lik_variance):
self._X_mean = X_mean
self._X_var = X_var
self._Y = Y
self._lik_variance = lik_variance
def build_likelihood(self):
raise NotImplementedError
class GPR_Layer(Collapsed_Layer):
def __init__(self, kern, mean_function, num_outputs, **kwargs):
"""
A dense GP layer with a Gaussian likelihood, where the GP is integrated out
"""
Collapsed_Layer.__init__(self, **kwargs)
self.kern = kern
self.mean_function = mean_function
self.num_outputs = num_outputs
def conditional_ND(self, Xnew, full_cov=False):
## modified from GPR
Kx = self.kern.K(self._X_mean, Xnew)
K = self.kern.K(self._X_mean) + tf.eye(tf.shape(self._X_mean)[0], dtype=settings.float_type) * self._lik_variance
L = tf.cholesky(K)
A = tf.matrix_triangular_solve(L, Kx, lower=True)
V = tf.matrix_triangular_solve(L, self._Y - self.mean_function(self._X_mean))
fmean = tf.matmul(A, V, transpose_a=True) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(A, A, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self._Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, tf.shape(self._Y)[1]])
return fmean, fvar
def build_likelihood(self):
## modified from GPR
K = self.kern.K(self._X_mean) + tf.eye(tf.shape(self._X_mean)[0], dtype=settings.float_type) * self._lik_variance
L = tf.cholesky(K)
m = self.mean_function(self._X_mean)
return tf.reduce_sum(multivariate_normal(self._Y, m, L))
class SGPR_Layer(Collapsed_Layer):
def __init__(self, kern, Z, num_outputs, mean_function, **kwargs):
"""
A sparse variational GP layer with a Gaussian likelihood, where the
GP is integrated out
:kern: The kernel for the layer (input_dim = D_in)
:param Z: Inducing points (M, D_in)
:param mean_function: The mean function
:return:
"""
Collapsed_Layer.__init__(self, **kwargs)
self.feature = InducingPoints(Z)
self.kern = kern
self.mean_function = mean_function
self.num_outputs = num_outputs
def conditional_ND(self, Xnew, full_cov=False):
return gplvm_build_predict(self, Xnew, self._X_mean, self._X_var, self._Y, self._lik_variance, full_cov=full_cov)
def build_likelihood(self):
return gplvm_build_likelihood(self, self._X_mean, self._X_var, self._Y, self._lik_variance)
################## From gpflow (with KL removed)
def gplvm_build_likelihood(self, X_mean, X_var, Y, variance):
if X_var is None:
# SGPR
num_inducing = len(self.feature)
num_data = tf.cast(tf.shape(Y)[0], settings.float_type)
output_dim = tf.cast(tf.shape(Y)[1], settings.float_type)
err = Y - self.mean_function(X_mean)
Kdiag = self.kern.Kdiag(X_mean)
Kuf = conditionals.Kuf(self.feature, self.kern, X_mean)
Kuu = conditionals.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level)
L = tf.cholesky(Kuu)
sigma = tf.sqrt(variance)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += tf.negative(output_dim) * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
bound -= 0.5 * num_data * output_dim * tf.log(variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.matrix_diag_part(AAT))
return bound
else:
X_cov = tf.matrix_diag(X_var)
pX = DiagonalGaussian(X_mean, X_var)
num_inducing = len(self.feature)
if hasattr(self.kern, 'X_input_dim'):
psi0 = tf.reduce_sum(self.kern.eKdiag(X_mean, X_cov))
psi1 = self.kern.eKxz(self.feature.Z, X_mean, X_cov)
psi2 = tf.reduce_sum(self.kern.eKzxKxz(self.feature.Z, X_mean, X_cov), 0)
else:
psi0 = tf.reduce_sum(expectation(pX, self.kern))
psi1 = expectation(pX, (self.kern, self.feature))
psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0)
Kuu = conditionals.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level)
L = tf.cholesky(Kuu)
sigma2 = variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
log_det_B = 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
c = tf.matrix_triangular_solve(LB, tf.matmul(A, Y), lower=True) / sigma
# KL[q(x) || p(x)]
# dX_var = self.X_var if len(self.X_var.get_shape()) == 2 else tf.matrix_diag_part(self.X_var)
# NQ = tf.cast(tf.size(self.X_mean), settings.float_type)
D = tf.cast(tf.shape(Y)[1], settings.float_type)
# KL = -0.5 * tf.reduce_sum(tf.log(dX_var)) \
# + 0.5 * tf.reduce_sum(tf.log(self.X_prior_var)) \
# - 0.5 * NQ \
# + 0.5 * tf.reduce_sum((tf.square(self.X_mean - self.X_prior_mean) + dX_var) / self.X_prior_var)
# compute log marginal bound
ND = tf.cast(tf.size(Y), settings.float_type)
bound = -0.5 * ND * tf.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(Y)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 -
tf.reduce_sum(tf.matrix_diag_part(AAT)))
# bound -= KL # don't need this term
return bound
############# Exactly from gpflow
def gplvm_build_predict(self, Xnew, X_mean, X_var, Y, variance, full_cov=False):
if X_var is None:
# SGPR
num_inducing = len(self.feature)
err = Y - self.mean_function(X_mean)
Kuf = conditionals.Kuf(self.feature, self.kern, X_mean)
Kuu = conditionals.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level)
Kus = conditionals.Kuf(self.feature, self.kern, Xnew)
sigma = tf.sqrt(variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
else:
# gplvm
pX = DiagonalGaussian(X_mean, X_var)
num_inducing = len(self.feature)
X_cov = tf.matrix_diag(X_var)
if hasattr(self.kern, 'X_input_dim'):
psi1 = self.kern.eKxz(self.feature.Z, X_mean, X_cov)
psi2 = tf.reduce_sum(self.kern.eKzxKxz(self.feature.Z, X_mean, X_cov), 0)
else:
psi1 = expectation(pX, (self.kern, self.feature))
psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0)
Kuu = conditionals.Kuu(self.feature, self.kern, jitter=settings.numerics.jitter_level)
Kus = conditionals.Kuf(self.feature, self.kern, Xnew)
sigma2 = variance
sigma = tf.sqrt(sigma2)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, Y), lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
|
from .. import loader
import logging, random
logger = logging.getLogger(__name__)
def register(cb):
cb(InsultMod())
class InsultMod(loader.Module):
"""Shouts at people"""
def __init__(self):
self.commands = {'insult':self.insultcmd}
self.config = {}
self.name = "Insulter"
async def insultcmd(self, message):
adjectives_start = ["salty", "fat", "fucking", "shitty", "stupid", "retarded", "gay","self conscious","tiny"]
adjectives_mid = ["little", "vitamin D deficient", "idiotic", "incredibly stupid"]
nouns = ["cunt", "pig", "pedophile", "beta male","bottom" "retard", "ass licker", "cunt nugget", "PENIS", "dickhead", "flute","idiot","motherfucker",
"loner"]
starts = ["You're a", "You", "Fuck off you","Actually die you", "Listen up you", "What the fuck is wrong with you, you"]
ends = ["!!!!", "!", ""]
start = random.choice(starts)
adjective_start = random.choice(adjectives_start)
adjective_mid = random.choice(adjectives_mid)
noun = random.choice(nouns)
end = random.choice(ends)
insult = start + " " + adjective_start + " " + adjective_mid + (" " if adjective_mid else "") + noun + end
logger.debug(insult)
await message.edit(insult)
|
from . import mongo_status
from . import mongo_connection
__all__ = [
'mongo_status',
'mongo_connection'
]
|
#!/bin/env python2.7
## SCCwatcher 2.0 ##
## ##
## sccwatcher.py ##
## ##
## Everything starts here ##
############################
import sys
import re
from settings_ui import *
from PyQt4 import QtGui, QtCore
#This is required to override the closeEvent
class SCCMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(SCCMainWindow, self).__init__(parent)
self._user_accept_close = False
self.setAcceptDrops(True)
self.ui = None
def closeEvent(self, event):
#We first emit the closing signal, then we actually close
self.emit(QtCore.SIGNAL("appClosing"))
if self._user_accept_close is True:
super(SCCMainWindow, self).closeEvent(event)
else:
event.ignore()
def dropEvent(self, event):
#Got a file drop!
filepath = str(event.mimeData().urls()[0].path())
#Check if we have a windows path and remove the prepended forward slash if necessary
if re.search("^/[a-zA-Z]:", filepath):
#Technically, because of the regex we should already know index 0 is a forward slash, but meh can't hurt.
if filepath[0] == "/":
filepath = filepath[1:]
#Now we emit a signal so our main app can handle it
self.emit(QtCore.SIGNAL("gotFileDrop"), filepath)
def dragEnterEvent(self, event):
#We don't relly need to do any checks here for file type since the loader function does it all for us.
event.acceptProposedAction()
def main():
app = QtGui.QApplication(sys.argv)
Window = SCCMainWindow()
#Window.setAcceptDrops(True)
ui = Ui_sccw_SettingsUI()
Window.ui = ui
ui.setupUi(Window)
Window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
class NodeDisconnectException(Exception):
"""This exception is thrown when Protocoin detects a
disconnection from the node it is connected."""
pass
|
# -*- coding: utf-8 -*-
'''
File name: code\cyclical_figurate_numbers\sol_61.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #61 :: Cyclical figurate numbers
#
# For more information see:
# https://projecteuler.net/problem=61
# Problem Statement
'''
Triangle, square, pentagonal, hexagonal, heptagonal, and octagonal numbers are all figurate (polygonal) numbers and are generated by the following formulae:
Triangle
P3,n=n(n+1)/2
1, 3, 6, 10, 15, ...
Square
P4,n=n2
1, 4, 9, 16, 25, ...
Pentagonal
P5,n=n(3n−1)/2
1, 5, 12, 22, 35, ...
Hexagonal
P6,n=n(2n−1)
1, 6, 15, 28, 45, ...
Heptagonal
P7,n=n(5n−3)/2
1, 7, 18, 34, 55, ...
Octagonal
P8,n=n(3n−2)
1, 8, 21, 40, 65, ...
The ordered set of three 4-digit numbers: 8128, 2882, 8281, has three interesting properties.
The set is cyclic, in that the last two digits of each number is the first two digits of the next number (including the last number with the first).
Each polygonal type: triangle (P3,127=8128), square (P4,91=8281), and pentagonal (P5,44=2882), is represented by a different number in the set.
This is the only set of 4-digit numbers with this property.
Find the sum of the only ordered set of six cyclic 4-digit numbers for which each polygonal type: triangle, square, pentagonal, hexagonal, heptagonal, and octagonal, is represented by a different number in the set.
'''
# Solution
# Solution Approach
'''
'''
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Link(Base):
"""This object holds the LACP link configuration.
The Link class encapsulates a list of link resources that are managed by the user.
A list of resources can be retrieved from the server using the Link.find() method.
The list can be managed by using the Link.add() and Link.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'link'
_SDM_ATT_MAP = {
'ActorKey': 'actorKey',
'ActorPortNumber': 'actorPortNumber',
'ActorPortPriority': 'actorPortPriority',
'ActorSystemId': 'actorSystemId',
'ActorSystemPriority': 'actorSystemPriority',
'AdministrativeKey': 'administrativeKey',
'AggregationFlagState': 'aggregationFlagState',
'AutoPickPortMac': 'autoPickPortMac',
'CollectingFlag': 'collectingFlag',
'CollectorMaxDelay': 'collectorMaxDelay',
'DistributingFlag': 'distributingFlag',
'Enabled': 'enabled',
'InterMarkerPduDelay': 'interMarkerPduDelay',
'LacpActivity': 'lacpActivity',
'LacpTimeout': 'lacpTimeout',
'LacpduPeriodicTimeInterval': 'lacpduPeriodicTimeInterval',
'MarkerRequestMode': 'markerRequestMode',
'MarkerResponseWaitTime': 'markerResponseWaitTime',
'PortMac': 'portMac',
'SendMarkerRequestOnLagChange': 'sendMarkerRequestOnLagChange',
'SendPeriodicMarkerRequest': 'sendPeriodicMarkerRequest',
'SupportRespondingToMarker': 'supportRespondingToMarker',
'SyncFlag': 'syncFlag',
'UpdateRequired': 'updateRequired',
}
def __init__(self, parent):
super(Link, self).__init__(parent)
@property
def ActorKey(self):
"""
Returns
-------
- number: The operational Key value assigned to the port by the Actor. This is a 2 byte field with a default of 1. Minimum value is 0, maximum value is 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActorKey'])
@ActorKey.setter
def ActorKey(self, value):
self._set_attribute(self._SDM_ATT_MAP['ActorKey'], value)
@property
def ActorPortNumber(self):
"""
Returns
-------
- number: The port number assigned to the port by the Actor (the System sending the PDU). It is a 2 byte field with a default of 1. Min: 0, Max: 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActorPortNumber'])
@ActorPortNumber.setter
def ActorPortNumber(self, value):
self._set_attribute(self._SDM_ATT_MAP['ActorPortNumber'], value)
@property
def ActorPortPriority(self):
"""
Returns
-------
- number: This field specifies the port priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActorPortPriority'])
@ActorPortPriority.setter
def ActorPortPriority(self, value):
self._set_attribute(self._SDM_ATT_MAP['ActorPortPriority'], value)
@property
def ActorSystemId(self):
"""
Returns
-------
- str: This field specifies the system identifier for the link Actor. It is a 6 byte field, with a default of 00-00-00-00-00-01. Min: 00-00-00-00-00-00, Max: FF-FF-FF-FF-FF-FF.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActorSystemId'])
@ActorSystemId.setter
def ActorSystemId(self, value):
self._set_attribute(self._SDM_ATT_MAP['ActorSystemId'], value)
@property
def ActorSystemPriority(self):
"""
Returns
-------
- number: This field specifies the system priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActorSystemPriority'])
@ActorSystemPriority.setter
def ActorSystemPriority(self, value):
self._set_attribute(self._SDM_ATT_MAP['ActorSystemPriority'], value)
@property
def AdministrativeKey(self):
"""
Returns
-------
- number: This field controls the aggregation of ports of the same system with similar Actor Key.
"""
return self._get_attribute(self._SDM_ATT_MAP['AdministrativeKey'])
@AdministrativeKey.setter
def AdministrativeKey(self, value):
self._set_attribute(self._SDM_ATT_MAP['AdministrativeKey'], value)
@property
def AggregationFlagState(self):
"""
Returns
-------
- str(disable | auto): If enabled, sets the port status to automatically allow aggregation.
"""
return self._get_attribute(self._SDM_ATT_MAP['AggregationFlagState'])
@AggregationFlagState.setter
def AggregationFlagState(self, value):
self._set_attribute(self._SDM_ATT_MAP['AggregationFlagState'], value)
@property
def AutoPickPortMac(self):
"""
Returns
-------
- bool: If true the source MAC is the interface MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['AutoPickPortMac'])
@AutoPickPortMac.setter
def AutoPickPortMac(self, value):
self._set_attribute(self._SDM_ATT_MAP['AutoPickPortMac'], value)
@property
def CollectingFlag(self):
"""
Returns
-------
- bool: If true, the actor port state Collecting is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent
"""
return self._get_attribute(self._SDM_ATT_MAP['CollectingFlag'])
@CollectingFlag.setter
def CollectingFlag(self, value):
self._set_attribute(self._SDM_ATT_MAP['CollectingFlag'], value)
@property
def CollectorMaxDelay(self):
"""
Returns
-------
- number: The maximum time in microseconds that the Frame Collector may delay the delivery of a frame received from an Aggregator to its MAC client. This is a 2 byte field with a default 0. Min: 0, Max: 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['CollectorMaxDelay'])
@CollectorMaxDelay.setter
def CollectorMaxDelay(self, value):
self._set_attribute(self._SDM_ATT_MAP['CollectorMaxDelay'], value)
@property
def DistributingFlag(self):
"""
Returns
-------
- bool: If true, the actor port state Distributing is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['DistributingFlag'])
@DistributingFlag.setter
def DistributingFlag(self, value):
self._set_attribute(self._SDM_ATT_MAP['DistributingFlag'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: If true, the link is enabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def InterMarkerPduDelay(self):
"""
Returns
-------
- str: The time gap in seconds between two consecutive Marker PDUs when transmitted periodically.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterMarkerPduDelay'])
@InterMarkerPduDelay.setter
def InterMarkerPduDelay(self, value):
self._set_attribute(self._SDM_ATT_MAP['InterMarkerPduDelay'], value)
@property
def LacpActivity(self):
"""
Returns
-------
- str(active | passive): Sets the value of LACPs Actor activity, either passive or active.
"""
return self._get_attribute(self._SDM_ATT_MAP['LacpActivity'])
@LacpActivity.setter
def LacpActivity(self, value):
self._set_attribute(self._SDM_ATT_MAP['LacpActivity'], value)
@property
def LacpTimeout(self):
"""
Returns
-------
- number: This timer is used to detect whether received protocol information has expired. The user can provide a custom value from 1 to 65535.
"""
return self._get_attribute(self._SDM_ATT_MAP['LacpTimeout'])
@LacpTimeout.setter
def LacpTimeout(self, value):
self._set_attribute(self._SDM_ATT_MAP['LacpTimeout'], value)
@property
def LacpduPeriodicTimeInterval(self):
"""
Returns
-------
- number: This field defines how frequently LACPDUs are sent to the link partner. The user can provide a custom values from 1 to 65535, in seconds
"""
return self._get_attribute(self._SDM_ATT_MAP['LacpduPeriodicTimeInterval'])
@LacpduPeriodicTimeInterval.setter
def LacpduPeriodicTimeInterval(self, value):
self._set_attribute(self._SDM_ATT_MAP['LacpduPeriodicTimeInterval'], value)
@property
def MarkerRequestMode(self):
"""
Returns
-------
- str(fixed | random): Sets the marker request mode for the Actor link.In either case, the mode parameters are specified in Marker Request Frequency.
"""
return self._get_attribute(self._SDM_ATT_MAP['MarkerRequestMode'])
@MarkerRequestMode.setter
def MarkerRequestMode(self, value):
self._set_attribute(self._SDM_ATT_MAP['MarkerRequestMode'], value)
@property
def MarkerResponseWaitTime(self):
"""
Returns
-------
- number: The number of seconds to wait for Marker Response after sending a Marker Request. After this time, the Marker Response Timeout Count is incremented. If a marker response does arrive for the request after this timeout, it is not considered as a legitimate response.
"""
return self._get_attribute(self._SDM_ATT_MAP['MarkerResponseWaitTime'])
@MarkerResponseWaitTime.setter
def MarkerResponseWaitTime(self, value):
self._set_attribute(self._SDM_ATT_MAP['MarkerResponseWaitTime'], value)
@property
def PortMac(self):
"""
Returns
-------
- str: specifies the port MAC address.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortMac'])
@PortMac.setter
def PortMac(self, value):
self._set_attribute(self._SDM_ATT_MAP['PortMac'], value)
@property
def SendMarkerRequestOnLagChange(self):
"""
Returns
-------
- bool: If true, this checkbox causes LACP to send a Marker PDU on the following situations: 1) System Priority has been modified; 2) System Id has been modified; 3) Actor Key has been modified; 4) Port Number/Port Priority has been modified while we are in Individual mode.
"""
return self._get_attribute(self._SDM_ATT_MAP['SendMarkerRequestOnLagChange'])
@SendMarkerRequestOnLagChange.setter
def SendMarkerRequestOnLagChange(self, value):
self._set_attribute(self._SDM_ATT_MAP['SendMarkerRequestOnLagChange'], value)
@property
def SendPeriodicMarkerRequest(self):
"""
Returns
-------
- bool: If true, Marker Request PDUs are periodically after both actor and partner are IN SYNC and our state is aggregated. The moment we come out of this state, the periodic sending of Marker will be stopped.
"""
return self._get_attribute(self._SDM_ATT_MAP['SendPeriodicMarkerRequest'])
@SendPeriodicMarkerRequest.setter
def SendPeriodicMarkerRequest(self, value):
self._set_attribute(self._SDM_ATT_MAP['SendPeriodicMarkerRequest'], value)
@property
def SupportRespondingToMarker(self):
"""
Returns
-------
- bool: If true, LACP doesn't respond to MARKER request PDUs from the partner.
"""
return self._get_attribute(self._SDM_ATT_MAP['SupportRespondingToMarker'])
@SupportRespondingToMarker.setter
def SupportRespondingToMarker(self, value):
self._set_attribute(self._SDM_ATT_MAP['SupportRespondingToMarker'], value)
@property
def SyncFlag(self):
"""
Returns
-------
- str(disable | auto): If enabled, the actor port state is set to True based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['SyncFlag'])
@SyncFlag.setter
def SyncFlag(self, value):
self._set_attribute(self._SDM_ATT_MAP['SyncFlag'], value)
@property
def UpdateRequired(self):
"""
Returns
-------
- bool: (read only) If true, an update LAPDU is required for the link.
"""
return self._get_attribute(self._SDM_ATT_MAP['UpdateRequired'])
def update(self, ActorKey=None, ActorPortNumber=None, ActorPortPriority=None, ActorSystemId=None, ActorSystemPriority=None, AdministrativeKey=None, AggregationFlagState=None, AutoPickPortMac=None, CollectingFlag=None, CollectorMaxDelay=None, DistributingFlag=None, Enabled=None, InterMarkerPduDelay=None, LacpActivity=None, LacpTimeout=None, LacpduPeriodicTimeInterval=None, MarkerRequestMode=None, MarkerResponseWaitTime=None, PortMac=None, SendMarkerRequestOnLagChange=None, SendPeriodicMarkerRequest=None, SupportRespondingToMarker=None, SyncFlag=None):
"""Updates link resource on the server.
Args
----
- ActorKey (number): The operational Key value assigned to the port by the Actor. This is a 2 byte field with a default of 1. Minimum value is 0, maximum value is 65535.
- ActorPortNumber (number): The port number assigned to the port by the Actor (the System sending the PDU). It is a 2 byte field with a default of 1. Min: 0, Max: 65535.
- ActorPortPriority (number): This field specifies the port priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- ActorSystemId (str): This field specifies the system identifier for the link Actor. It is a 6 byte field, with a default of 00-00-00-00-00-01. Min: 00-00-00-00-00-00, Max: FF-FF-FF-FF-FF-FF.
- ActorSystemPriority (number): This field specifies the system priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- AdministrativeKey (number): This field controls the aggregation of ports of the same system with similar Actor Key.
- AggregationFlagState (str(disable | auto)): If enabled, sets the port status to automatically allow aggregation.
- AutoPickPortMac (bool): If true the source MAC is the interface MAC address.
- CollectingFlag (bool): If true, the actor port state Collecting is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent
- CollectorMaxDelay (number): The maximum time in microseconds that the Frame Collector may delay the delivery of a frame received from an Aggregator to its MAC client. This is a 2 byte field with a default 0. Min: 0, Max: 65535.
- DistributingFlag (bool): If true, the actor port state Distributing is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
- Enabled (bool): If true, the link is enabled.
- InterMarkerPduDelay (str): The time gap in seconds between two consecutive Marker PDUs when transmitted periodically.
- LacpActivity (str(active | passive)): Sets the value of LACPs Actor activity, either passive or active.
- LacpTimeout (number): This timer is used to detect whether received protocol information has expired. The user can provide a custom value from 1 to 65535.
- LacpduPeriodicTimeInterval (number): This field defines how frequently LACPDUs are sent to the link partner. The user can provide a custom values from 1 to 65535, in seconds
- MarkerRequestMode (str(fixed | random)): Sets the marker request mode for the Actor link.In either case, the mode parameters are specified in Marker Request Frequency.
- MarkerResponseWaitTime (number): The number of seconds to wait for Marker Response after sending a Marker Request. After this time, the Marker Response Timeout Count is incremented. If a marker response does arrive for the request after this timeout, it is not considered as a legitimate response.
- PortMac (str): specifies the port MAC address.
- SendMarkerRequestOnLagChange (bool): If true, this checkbox causes LACP to send a Marker PDU on the following situations: 1) System Priority has been modified; 2) System Id has been modified; 3) Actor Key has been modified; 4) Port Number/Port Priority has been modified while we are in Individual mode.
- SendPeriodicMarkerRequest (bool): If true, Marker Request PDUs are periodically after both actor and partner are IN SYNC and our state is aggregated. The moment we come out of this state, the periodic sending of Marker will be stopped.
- SupportRespondingToMarker (bool): If true, LACP doesn't respond to MARKER request PDUs from the partner.
- SyncFlag (str(disable | auto)): If enabled, the actor port state is set to True based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ActorKey=None, ActorPortNumber=None, ActorPortPriority=None, ActorSystemId=None, ActorSystemPriority=None, AdministrativeKey=None, AggregationFlagState=None, AutoPickPortMac=None, CollectingFlag=None, CollectorMaxDelay=None, DistributingFlag=None, Enabled=None, InterMarkerPduDelay=None, LacpActivity=None, LacpTimeout=None, LacpduPeriodicTimeInterval=None, MarkerRequestMode=None, MarkerResponseWaitTime=None, PortMac=None, SendMarkerRequestOnLagChange=None, SendPeriodicMarkerRequest=None, SupportRespondingToMarker=None, SyncFlag=None):
"""Adds a new link resource on the server and adds it to the container.
Args
----
- ActorKey (number): The operational Key value assigned to the port by the Actor. This is a 2 byte field with a default of 1. Minimum value is 0, maximum value is 65535.
- ActorPortNumber (number): The port number assigned to the port by the Actor (the System sending the PDU). It is a 2 byte field with a default of 1. Min: 0, Max: 65535.
- ActorPortPriority (number): This field specifies the port priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- ActorSystemId (str): This field specifies the system identifier for the link Actor. It is a 6 byte field, with a default of 00-00-00-00-00-01. Min: 00-00-00-00-00-00, Max: FF-FF-FF-FF-FF-FF.
- ActorSystemPriority (number): This field specifies the system priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- AdministrativeKey (number): This field controls the aggregation of ports of the same system with similar Actor Key.
- AggregationFlagState (str(disable | auto)): If enabled, sets the port status to automatically allow aggregation.
- AutoPickPortMac (bool): If true the source MAC is the interface MAC address.
- CollectingFlag (bool): If true, the actor port state Collecting is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent
- CollectorMaxDelay (number): The maximum time in microseconds that the Frame Collector may delay the delivery of a frame received from an Aggregator to its MAC client. This is a 2 byte field with a default 0. Min: 0, Max: 65535.
- DistributingFlag (bool): If true, the actor port state Distributing is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
- Enabled (bool): If true, the link is enabled.
- InterMarkerPduDelay (str): The time gap in seconds between two consecutive Marker PDUs when transmitted periodically.
- LacpActivity (str(active | passive)): Sets the value of LACPs Actor activity, either passive or active.
- LacpTimeout (number): This timer is used to detect whether received protocol information has expired. The user can provide a custom value from 1 to 65535.
- LacpduPeriodicTimeInterval (number): This field defines how frequently LACPDUs are sent to the link partner. The user can provide a custom values from 1 to 65535, in seconds
- MarkerRequestMode (str(fixed | random)): Sets the marker request mode for the Actor link.In either case, the mode parameters are specified in Marker Request Frequency.
- MarkerResponseWaitTime (number): The number of seconds to wait for Marker Response after sending a Marker Request. After this time, the Marker Response Timeout Count is incremented. If a marker response does arrive for the request after this timeout, it is not considered as a legitimate response.
- PortMac (str): specifies the port MAC address.
- SendMarkerRequestOnLagChange (bool): If true, this checkbox causes LACP to send a Marker PDU on the following situations: 1) System Priority has been modified; 2) System Id has been modified; 3) Actor Key has been modified; 4) Port Number/Port Priority has been modified while we are in Individual mode.
- SendPeriodicMarkerRequest (bool): If true, Marker Request PDUs are periodically after both actor and partner are IN SYNC and our state is aggregated. The moment we come out of this state, the periodic sending of Marker will be stopped.
- SupportRespondingToMarker (bool): If true, LACP doesn't respond to MARKER request PDUs from the partner.
- SyncFlag (str(disable | auto)): If enabled, the actor port state is set to True based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
Returns
-------
- self: This instance with all currently retrieved link resources using find and the newly added link resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained link resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ActorKey=None, ActorPortNumber=None, ActorPortPriority=None, ActorSystemId=None, ActorSystemPriority=None, AdministrativeKey=None, AggregationFlagState=None, AutoPickPortMac=None, CollectingFlag=None, CollectorMaxDelay=None, DistributingFlag=None, Enabled=None, InterMarkerPduDelay=None, LacpActivity=None, LacpTimeout=None, LacpduPeriodicTimeInterval=None, MarkerRequestMode=None, MarkerResponseWaitTime=None, PortMac=None, SendMarkerRequestOnLagChange=None, SendPeriodicMarkerRequest=None, SupportRespondingToMarker=None, SyncFlag=None, UpdateRequired=None):
"""Finds and retrieves link resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve link resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all link resources from the server.
Args
----
- ActorKey (number): The operational Key value assigned to the port by the Actor. This is a 2 byte field with a default of 1. Minimum value is 0, maximum value is 65535.
- ActorPortNumber (number): The port number assigned to the port by the Actor (the System sending the PDU). It is a 2 byte field with a default of 1. Min: 0, Max: 65535.
- ActorPortPriority (number): This field specifies the port priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- ActorSystemId (str): This field specifies the system identifier for the link Actor. It is a 6 byte field, with a default of 00-00-00-00-00-01. Min: 00-00-00-00-00-00, Max: FF-FF-FF-FF-FF-FF.
- ActorSystemPriority (number): This field specifies the system priority of the link Actor. It is a 2 byte field, with a default or 1. Min: 0, Max: 65535.
- AdministrativeKey (number): This field controls the aggregation of ports of the same system with similar Actor Key.
- AggregationFlagState (str(disable | auto)): If enabled, sets the port status to automatically allow aggregation.
- AutoPickPortMac (bool): If true the source MAC is the interface MAC address.
- CollectingFlag (bool): If true, the actor port state Collecting is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent
- CollectorMaxDelay (number): The maximum time in microseconds that the Frame Collector may delay the delivery of a frame received from an Aggregator to its MAC client. This is a 2 byte field with a default 0. Min: 0, Max: 65535.
- DistributingFlag (bool): If true, the actor port state Distributing is set to true based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
- Enabled (bool): If true, the link is enabled.
- InterMarkerPduDelay (str): The time gap in seconds between two consecutive Marker PDUs when transmitted periodically.
- LacpActivity (str(active | passive)): Sets the value of LACPs Actor activity, either passive or active.
- LacpTimeout (number): This timer is used to detect whether received protocol information has expired. The user can provide a custom value from 1 to 65535.
- LacpduPeriodicTimeInterval (number): This field defines how frequently LACPDUs are sent to the link partner. The user can provide a custom values from 1 to 65535, in seconds
- MarkerRequestMode (str(fixed | random)): Sets the marker request mode for the Actor link.In either case, the mode parameters are specified in Marker Request Frequency.
- MarkerResponseWaitTime (number): The number of seconds to wait for Marker Response after sending a Marker Request. After this time, the Marker Response Timeout Count is incremented. If a marker response does arrive for the request after this timeout, it is not considered as a legitimate response.
- PortMac (str): specifies the port MAC address.
- SendMarkerRequestOnLagChange (bool): If true, this checkbox causes LACP to send a Marker PDU on the following situations: 1) System Priority has been modified; 2) System Id has been modified; 3) Actor Key has been modified; 4) Port Number/Port Priority has been modified while we are in Individual mode.
- SendPeriodicMarkerRequest (bool): If true, Marker Request PDUs are periodically after both actor and partner are IN SYNC and our state is aggregated. The moment we come out of this state, the periodic sending of Marker will be stopped.
- SupportRespondingToMarker (bool): If true, LACP doesn't respond to MARKER request PDUs from the partner.
- SyncFlag (str(disable | auto)): If enabled, the actor port state is set to True based on Tx and Rx state machines. Otherwise, the flag in LACPDU remains reset for all packets sent.
- UpdateRequired (bool): (read only) If true, an update LAPDU is required for the link.
Returns
-------
- self: This instance with matching link resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of link data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the link resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
__author__ = 'renhao.cui'
import utilities
from sklearn import cross_validation
import combinedMapping as cm
import modelUtility
def alchemyTrainInfer(alchemy_train, alchemy_test, label_train, label_test, trainProbFlag):
# model from A to B: model[A] = {B: score}
(model, cand, candProb) = cm.mappingTrainer4(alchemy_train, label_train)
predictions = utilities.outputMappingResult3_fullList(model, cand, candProb, alchemy_test)
predictionsTrain = {}
if trainProbFlag:
predictionsTrain = utilities.outputMappingResult3_fullList(model, cand, candProb, alchemy_train)
correct = 0.0
total = 0.0
for index, label in enumerate(label_test):
pred = predictions[index][1].keys()[0]
if pred == label:
correct += 1.0
total += 1.0
return correct/total, predictions, predictionsTrain
def run():
brandList = ['Elmers', 'Chilis', 'BathAndBodyWorks', 'Dominos', 'Triclosan']
outputFile = open('results/alchemy.result', 'w')
for brand in brandList:
print brand
topics, alchemyOutput = modelUtility.readData2('HybridData/Original/' + brand + '.keyword', 'HybridData/Original/' + brand + '.alchemy')
accuracySum = 0.0
for i in range(5):
alchemy_train, alchemy_test, label_train, label_test = cross_validation.train_test_split(alchemyOutput, topics, test_size=0.2, random_state=0)
accuracy, testOutput, trainOutput = alchemyTrainInfer(alchemy_train, alchemy_test, label_train, label_test, True)
accuracySum += accuracy
print accuracySum / 5
outputFile.write(brand+'\t'+str(accuracySum/5)+'\n')
outputFile.close()
|
"""Tests for perfkitbenchmarker.providers.aws.aws_dynamodb."""
import json
import unittest
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker.providers.aws import aws_dynamodb
from perfkitbenchmarker.providers.aws import util
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
_DESCRIBE_TABLE_OUTPUT = """
{
"Table": {
"AttributeDefinitions": [
{
"AttributeName": "test",
"AttributeType": "S"
}
],
"TableName": "test",
"KeySchema": [
{
"AttributeName": "test",
"KeyType": "HASH"
}
],
"TableStatus": "ACTIVE",
"CreationDateTime": 1611605356.518,
"ProvisionedThroughput": {
"NumberOfDecreasesToday": 0,
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 0
},
"TableSizeBytes": 0,
"ItemCount": 0,
"TableArn": "arn:aws:dynamodb:us-east-2:835761027970:table/test",
"TableId": "ecf0a60a-f18d-4666-affc-525ca6e1d207"
}
}
"""
@flagsaver.flagsaver
def GetTestDynamoDBInstance(table_name='test_table'):
FLAGS.zone = ['us-east-1a']
return aws_dynamodb.AwsDynamoDBInstance(table_name)
class AwsDynamodbTest(pkb_common_test_case.PkbCommonTestCase):
def assertArgumentInCommand(self, mock_cmd, arg):
"""Given an AWS command, checks that the argument is present."""
command = ' '.join(mock_cmd.call_args[0][0])
self.assertIn(arg, command)
@flagsaver.flagsaver
def testInitTableName(self):
test_instance = GetTestDynamoDBInstance('dynamo_test_table')
self.assertEqual(test_instance.table_name, 'dynamo_test_table')
@flagsaver.flagsaver
def testInitLocation(self):
FLAGS.zone = ['us-east-1a']
test_instance = aws_dynamodb.AwsDynamoDBInstance('test_table')
self.assertEqual(test_instance.zone, 'us-east-1a')
self.assertEqual(test_instance.region, 'us-east-1')
@flagsaver.flagsaver
def testInitKeysAndAttributes(self):
FLAGS.aws_dynamodb_primarykey = 'test_primary_key'
FLAGS.aws_dynamodb_sortkey = 'test_sort_key'
FLAGS.aws_dynamodb_attributetype = 'test_attribute_type'
test_instance = GetTestDynamoDBInstance()
self.assertEqual(test_instance.primary_key,
'{"AttributeName": "test_primary_key","KeyType": "HASH"}')
self.assertEqual(test_instance.sort_key,
'{"AttributeName": "test_sort_key","KeyType": "RANGE"}')
self.assertEqual(
test_instance.part_attributes,
'{"AttributeName": "test_primary_key","AttributeType": "test_attribute_type"}'
)
self.assertEqual(
test_instance.sort_attributes,
'{"AttributeName": "test_sort_key","AttributeType": "test_attribute_type"}'
)
@flagsaver.flagsaver
def testInitThroughput(self):
FLAGS.aws_dynamodb_read_capacity = 1
FLAGS.aws_dynamodb_write_capacity = 2
test_instance = GetTestDynamoDBInstance()
self.assertEqual(test_instance.throughput,
'ReadCapacityUnits=1,WriteCapacityUnits=2')
@flagsaver.flagsaver
def testGetResourceMetadata(self):
FLAGS.zone = ['us-east-1a']
FLAGS.aws_dynamodb_primarykey = 'test_primary_key'
FLAGS.aws_dynamodb_use_sort = 'test_use_sort'
FLAGS.aws_dynamodb_sortkey = 'test_sortkey'
FLAGS.aws_dynamodb_attributetype = 'test_attribute_type'
FLAGS.aws_dynamodb_read_capacity = 1
FLAGS.aws_dynamodb_write_capacity = 2
FLAGS.aws_dynamodb_lsi_count = 3
FLAGS.aws_dynamodb_gsi_count = 4
FLAGS.aws_dynamodb_ycsb_consistentReads = 5
FLAGS.aws_dynamodb_connectMax = 6
test_instance = aws_dynamodb.AwsDynamoDBInstance('test_table')
actual_metadata = test_instance.GetResourceMetadata()
expected_metadata = {
'aws_dynamodb_primarykey': 'test_primary_key',
'aws_dynamodb_use_sort': 'test_use_sort',
'aws_dynamodb_sortkey': 'test_sortkey',
'aws_dynamodb_attributetype': 'test_attribute_type',
'aws_dynamodb_read_capacity': 1,
'aws_dynamodb_write_capacity': 2,
'aws_dynamodb_lsi_count': 3,
'aws_dynamodb_gsi_count': 4,
'aws_dynamodb_consistentReads': 5,
'aws_dynamodb_connectMax': 6,
}
self.assertEqual(actual_metadata, expected_metadata)
@parameterized.named_parameters({
'testcase_name': 'ValidOutput',
'output': json.loads(_DESCRIBE_TABLE_OUTPUT)['Table'],
'expected': True
}, {
'testcase_name': 'EmptyOutput',
'output': {},
'expected': False
})
def testExists(self, output, expected):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(
test_instance,
'_DescribeTable',
return_value=output))
actual = test_instance._Exists()
self.assertEqual(actual, expected)
def testSetThroughput(self):
test_instance = GetTestDynamoDBInstance(table_name='throughput_table')
cmd = self.enter_context(
mock.patch.object(
util,
'IssueRetryableCommand'))
self.enter_context(mock.patch.object(test_instance, '_IsReady'))
test_instance.SetThroughput(5, 5)
self.assertArgumentInCommand(cmd, '--table-name throughput_table')
self.assertArgumentInCommand(cmd, '--region us-east-1')
self.assertArgumentInCommand(
cmd,
'--provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5')
def testGetThroughput(self):
test_instance = GetTestDynamoDBInstance()
output = json.loads(_DESCRIBE_TABLE_OUTPUT)['Table']
self.enter_context(
mock.patch.object(
test_instance,
'_DescribeTable',
return_value=output))
actual_rcu, actual_wcu = test_instance._GetThroughput()
self.assertEqual(actual_rcu, 5)
self.assertEqual(actual_wcu, 0)
def testTagResourceFailsWithNonExistentResource(self):
test_instance = GetTestDynamoDBInstance()
# Mark instance as non-existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=False))
with self.assertRaises(errors.Resource.CreationError):
test_instance._GetTagResourceCommand(['test', 'tag'])
def testUpdateWithDefaultTags(self):
test_instance = GetTestDynamoDBInstance()
test_instance.resource_arn = 'test_arn'
cmd = self.enter_context(mock.patch.object(util, 'IssueRetryableCommand'))
# Mark instance as existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=True))
test_instance.UpdateWithDefaultTags()
self.assertArgumentInCommand(cmd, '--region us-east-1')
self.assertArgumentInCommand(cmd, '--resource-arn test_arn')
def testUpdateTimeout(self):
test_instance = GetTestDynamoDBInstance()
test_instance.resource_arn = 'test_arn'
# Mock the aws util tags function.
self.enter_context(
mock.patch.object(
util,
'MakeDefaultTags',
autospec=True,
return_value={'timeout_utc': 60}))
# Mock the actual call to the CLI
cmd = self.enter_context(mock.patch.object(util, 'IssueRetryableCommand'))
# Mark instance as existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=True))
test_instance.UpdateTimeout(timeout_minutes=60)
self.assertArgumentInCommand(cmd, '--tags Key=timeout_utc,Value=60')
@parameterized.named_parameters(
{
'testcase_name': 'OnlyRcu',
'rcu': 5,
'wcu': 500,
}, {
'testcase_name': 'OnlyWcu',
'rcu': 500,
'wcu': 5,
}, {
'testcase_name': 'Both',
'rcu': 500,
'wcu': 500,
})
def testFreezeLowersThroughputToFreeTier(self, rcu, wcu):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(
test_instance, '_GetThroughput', return_value=(rcu, wcu)))
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, 'SetThroughput', autospec=True))
test_instance._Freeze()
mock_set_throughput.assert_called_once_with(
rcu=aws_dynamodb._FREE_TIER_RCU, wcu=aws_dynamodb._FREE_TIER_WCU)
def testFreezeDoesNotLowerThroughputIfAlreadyAtFreeTier(self):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(test_instance, '_GetThroughput', return_value=(5, 5)))
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, 'SetThroughput', autospec=True))
test_instance._Freeze()
mock_set_throughput.assert_not_called()
def testRestoreSetsThroughputBackToOriginalLevels(self):
test_instance = GetTestDynamoDBInstance()
test_instance.rcu = 5000
test_instance.wcu = 1000
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, 'SetThroughput', autospec=True))
test_instance._Restore()
mock_set_throughput.assert_called_once_with(
rcu=5000, wcu=1000)
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkcloudpipeline.v2.model.batch_show_pipelines_status_request import BatchShowPipelinesStatusRequest
from huaweicloudsdkcloudpipeline.v2.model.batch_show_pipelines_status_response import BatchShowPipelinesStatusResponse
from huaweicloudsdkcloudpipeline.v2.model.constraint import Constraint
from huaweicloudsdkcloudpipeline.v2.model.create_pipeline_by_template_request import CreatePipelineByTemplateRequest
from huaweicloudsdkcloudpipeline.v2.model.create_pipeline_by_template_response import CreatePipelineByTemplateResponse
from huaweicloudsdkcloudpipeline.v2.model.extended_props import ExtendedProps
from huaweicloudsdkcloudpipeline.v2.model.flow_item import FlowItem
from huaweicloudsdkcloudpipeline.v2.model.list_templates_request import ListTemplatesRequest
from huaweicloudsdkcloudpipeline.v2.model.list_templates_response import ListTemplatesResponse
from huaweicloudsdkcloudpipeline.v2.model.param_type_limits import ParamTypeLimits
from huaweicloudsdkcloudpipeline.v2.model.pipeline_param import PipelineParam
from huaweicloudsdkcloudpipeline.v2.model.pipeline_parameter import PipelineParameter
from huaweicloudsdkcloudpipeline.v2.model.pipeline_state_status import PipelineStateStatus
from huaweicloudsdkcloudpipeline.v2.model.register_agent_request import RegisterAgentRequest
from huaweicloudsdkcloudpipeline.v2.model.register_agent_response import RegisterAgentResponse
from huaweicloudsdkcloudpipeline.v2.model.remove_pipeline_request import RemovePipelineRequest
from huaweicloudsdkcloudpipeline.v2.model.remove_pipeline_response import RemovePipelineResponse
from huaweicloudsdkcloudpipeline.v2.model.show_agent_status_request import ShowAgentStatusRequest
from huaweicloudsdkcloudpipeline.v2.model.show_agent_status_response import ShowAgentStatusResponse
from huaweicloudsdkcloudpipeline.v2.model.show_instance_status_request import ShowInstanceStatusRequest
from huaweicloudsdkcloudpipeline.v2.model.show_instance_status_response import ShowInstanceStatusResponse
from huaweicloudsdkcloudpipeline.v2.model.show_pipleine_status_request import ShowPipleineStatusRequest
from huaweicloudsdkcloudpipeline.v2.model.show_pipleine_status_response import ShowPipleineStatusResponse
from huaweicloudsdkcloudpipeline.v2.model.show_template_detail_request import ShowTemplateDetailRequest
from huaweicloudsdkcloudpipeline.v2.model.show_template_detail_response import ShowTemplateDetailResponse
from huaweicloudsdkcloudpipeline.v2.model.slave_register import SlaveRegister
from huaweicloudsdkcloudpipeline.v2.model.source import Source
from huaweicloudsdkcloudpipeline.v2.model.stages import Stages
from huaweicloudsdkcloudpipeline.v2.model.start_new_pipeline_request import StartNewPipelineRequest
from huaweicloudsdkcloudpipeline.v2.model.start_new_pipeline_response import StartNewPipelineResponse
from huaweicloudsdkcloudpipeline.v2.model.start_pipeline_build_params import StartPipelineBuildParams
from huaweicloudsdkcloudpipeline.v2.model.start_pipeline_parameters import StartPipelineParameters
from huaweicloudsdkcloudpipeline.v2.model.start_pipeline_request import StartPipelineRequest
from huaweicloudsdkcloudpipeline.v2.model.start_pipeline_response import StartPipelineResponse
from huaweicloudsdkcloudpipeline.v2.model.state_item import StateItem
from huaweicloudsdkcloudpipeline.v2.model.stop_pipeline_request import StopPipelineRequest
from huaweicloudsdkcloudpipeline.v2.model.stop_pipeline_response import StopPipelineResponse
from huaweicloudsdkcloudpipeline.v2.model.template_cddl import TemplateCddl
from huaweicloudsdkcloudpipeline.v2.model.template_param import TemplateParam
from huaweicloudsdkcloudpipeline.v2.model.template_state import TemplateState
from huaweicloudsdkcloudpipeline.v2.model.template_view import TemplateView
from huaweicloudsdkcloudpipeline.v2.model.workflow import Workflow
|
import pytest
import os
import sys
import json
from click.testing import CliRunner
from ...cli.main import cli
from ...core.project import Project
remotetest = pytest.mark.skipif('TEST_DSBFILE' not in os.environ,
reason="Environment variable 'TEST_DSBFILE' is required")
def get_test_project():
dsbfile = os.environ['TEST_DSBFILE']
return Project.from_file(dsbfile)
def invoke(*args):
dsbfile = os.environ['TEST_DSBFILE']
args = list(args)
args.extend(['--file', dsbfile])
runner = CliRunner()
return runner.invoke(cli, args, catch_exceptions=False, input=sys.stdin)
def check_all_true(salt_output, none_is_ok=False):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_values = minion.values()[0]
for id_, value in minion_values.items():
if none_is_ok:
assert value['result'] is not False, (id_, value)
else:
assert value['result'] is True, (id_, value)
def check_all_cmd_retcode0(salt_output):
minions = []
for minion_output in salt_output.split('\n'):
minions.append(json.loads(minion_output))
for minion in minions:
minion_output = minion.values()[0]
assert minion_output['retcode'] == 0, (minion_output)
|
import unittest
from queue import Queue
from modi.module.input_module.ir import Ir
class TestIr(unittest.TestCase):
"""Tests for 'Ir' package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.send_q = Queue()
mock_args = (-1, -1, self.send_q)
self.ir = Ir(*mock_args)
def tearDown(self):
"""Tear down test fixtures, if any."""
del self.ir
def test_get_proximity(self):
"""Test get_proximity method."""
_ = self.ir.proximity
self.assertEqual(
self.send_q.get(),
Ir.request_property(-1, Ir.PropertyType.PROXIMITY))
if __name__ == "__main__":
unittest.main()
|
from datetime import timedelta
import logging
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
from django.db import transaction
from django.utils import timezone
from django.utils.module_loading import import_string
from keycloak.exceptions import KeycloakClientError
from django_keycloak.services.exceptions import TokensExpired
from django_keycloak.remote_user import KeycloakRemoteUser
import django_keycloak.services.realm
logger = logging.getLogger(__name__)
def get_openid_connect_profile_model():
"""
Return the OpenIdConnectProfile model that is active in this project.
"""
try:
return django_apps.get_model(settings.KEYCLOAK_OIDC_PROFILE_MODEL,
require_ready=False)
except ValueError:
raise ImproperlyConfigured(
"KEYCLOAK_OIDC_PROFILE_MODEL must be of the form "
"'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"KEYCLOAK_OIDC_PROFILE_MODEL refers to model '%s' that has not "
"been installed" % settings.KEYCLOAK_OIDC_PROFILE_MODEL)
def get_remote_user_model():
"""
Return the User model that is active in this project.
"""
if not hasattr(settings, 'KEYCLOAK_REMOTE_USER_MODEL'):
# By default return the standard KeycloakRemoteUser model
return KeycloakRemoteUser
try:
return import_string(settings.KEYCLOAK_REMOTE_USER_MODEL)
except ImportError:
raise ImproperlyConfigured(
"KEYCLOAK_REMOTE_USER_MODEL refers to non-existing class"
)
def get_or_create_from_id_token(client, id_token):
"""
Get or create OpenID Connect profile from given id_token.
:param django_keycloak.models.Client client:
:param str id_token:
:rtype: django_keycloak.models.OpenIdConnectProfile
"""
issuer = django_keycloak.services.realm.get_issuer(client.realm)
id_token_object = client.openid_api_client.decode_token(
token=id_token,
key=client.realm.certs,
algorithms=client.openid_api_client.well_known[
'id_token_signing_alg_values_supported'],
issuer=issuer
)
return update_or_create_user_and_oidc_profile(
client=client, id_token_object=id_token_object)
def update_or_create_user_and_oidc_profile(client, id_token_object):
"""
:param client:
:param id_token_object:
:return:
"""
OpenIdConnectProfileModel = get_openid_connect_profile_model()
if OpenIdConnectProfileModel.is_remote:
oidc_profile, _ = OpenIdConnectProfileModel.objects.\
update_or_create(
sub=id_token_object['sub'],
defaults={
'realm': client.realm
}
)
UserModel = get_remote_user_model()
oidc_profile.user = UserModel(id_token_object)
return oidc_profile
with transaction.atomic():
UserModel = get_user_model()
email_field_name = UserModel.get_email_field_name()
user, _ = UserModel.objects.update_or_create(
username=id_token_object['sub'],
defaults={
email_field_name: id_token_object.get('email', ''),
'first_name': id_token_object.get('given_name', ''),
'last_name': id_token_object.get('family_name', '')
}
)
oidc_profile, _ = OpenIdConnectProfileModel.objects.update_or_create(
sub=id_token_object['sub'],
defaults={
'realm': client.realm,
'user': user
}
)
return oidc_profile
def get_remote_user_from_profile(oidc_profile):
"""
:param oidc_profile:
:return:
"""
try:
userinfo = oidc_profile.realm.client.openid_api_client.userinfo(
token=oidc_profile.access_token
)
except KeycloakClientError:
return None
# Get the user from the KEYCLOAK_REMOTE_USER_MODEL in the settings
UserModel = get_remote_user_model()
# Create the object of type UserModel from the constructor of it's class
# as the included details can vary per model
user = UserModel(userinfo)
return user
def update_or_create_from_code(code, client, redirect_uri):
"""
Update or create an user based on an authentication code.
Response as specified in:
https://tools.ietf.org/html/rfc6749#section-4.1.4
:param django_keycloak.models.Client client:
:param str code: authentication code
:param str redirect_uri
:rtype: django_keycloak.models.OpenIdConnectProfile
"""
# Define "initiate_time" before getting the access token to calculate
# before which time it expires.
initiate_time = timezone.now()
token_response = client.openid_api_client.authorization_code(
code=code, redirect_uri=redirect_uri)
return _update_or_create(client=client, token_response=token_response,
initiate_time=initiate_time)
def update_or_create_from_password_credentials(username, password, client):
"""
Update or create an user based on username and password.
Response as specified in:
https://tools.ietf.org/html/rfc6749#section-4.3.3
:param str username: the username to authenticate with
:param str password: the password to authenticate with
:param django_keycloak.models.Client client:
:rtype: django_keycloak.models.OpenIdConnectProfile
"""
# Define "initiate_time" before getting the access token to calculate
# before which time it expires.
initiate_time = timezone.now()
token_response = client.openid_api_client.password_credentials(
username=username, password=password)
return _update_or_create(client=client, token_response=token_response,
initiate_time=initiate_time)
def _update_or_create(client, token_response, initiate_time):
"""
Update or create an user based on a token response.
`token_response` contains the items returned by the OpenIDConnect Token API
end-point:
- id_token
- access_token
- expires_in
- refresh_token
- refresh_expires_in
:param django_keycloak.models.Client client:
:param dict token_response:
:param datetime.datetime initiate_time:
:rtype: django_keycloak.models.OpenIdConnectProfile
"""
issuer = django_keycloak.services.realm.get_issuer(client.realm)
token_response_key = 'id_token' if 'id_token' in token_response \
else 'access_token'
token_object = client.openid_api_client.decode_token(
token=token_response[token_response_key],
key=client.realm.certs,
algorithms=client.openid_api_client.well_known[
'id_token_signing_alg_values_supported'],
issuer=issuer,
access_token=token_response['access_token'] #todo review the implications of this change
)
oidc_profile = update_or_create_user_and_oidc_profile(
client=client,
id_token_object=token_object)
return update_tokens(token_model=oidc_profile,
token_response=token_response,
initiate_time=initiate_time)
def update_tokens(token_model, token_response, initiate_time):
"""
Update tokens on the OpenID Connect profile
:param django_keycloak.models.TokenModelAbstract token_model:
:param dict token_response: response from OIDC token API end-point
:param datetime.datetime initiate_time: timestamp before the token request
:rtype: django_keycloak.models.OpenIdConnectProfile
"""
expires_before = initiate_time + timedelta(
seconds=token_response['expires_in'])
refresh_expires_before = initiate_time + timedelta(
seconds=token_response['refresh_expires_in'])
token_model.access_token = token_response['access_token']
token_model.expires_before = expires_before
token_model.refresh_token = token_response['refresh_token']
token_model.refresh_expires_before = refresh_expires_before
token_model.save(update_fields=['access_token',
'expires_before',
'refresh_token',
'refresh_expires_before'])
return token_model
def get_active_access_token(oidc_profile):
"""
Give access_token and refresh when required.
:param django_keycloak.models.KeycloakOpenIDProfile openid_profile:
:rtype: string
:raise: django_keycloak.services.exceptions.TokensExpired
"""
initiate_time = timezone.now()
if oidc_profile.refresh_expires_before is None \
or initiate_time > oidc_profile.refresh_expires_before:
raise TokensExpired()
if initiate_time > oidc_profile.expires_before:
# Refresh token
token_response = oidc_profile.realm.client.openid_api_client\
.refresh_token(refresh_token=oidc_profile.refresh_token)
oidc_profile = update_tokens(token_model=oidc_profile,
token_response=token_response,
initiate_time=initiate_time)
return oidc_profile.access_token
def get_entitlement(oidc_profile):
"""
Get entitlement.
http://www.keycloak.org/docs/latest/authorization_services/index.html#_service_entitlement_api
:param django_keycloak.models.KeycloakOpenIDProfile oidc_profile:
:rtype: dict
:return: Decoded RPT
"""
access_token = get_active_access_token(oidc_profile=oidc_profile)
rpt = oidc_profile.realm.client.authz_api_client.entitlement(
token=access_token)
rpt_decoded = oidc_profile.realm.client.openid_api_client.decode_token(
token=rpt['rpt'],
key=oidc_profile.realm.certs,
options={
'verify_signature': True,
'exp': True,
'iat': True,
'aud': True
})
return rpt_decoded
def get_decoded_jwt(oidc_profile):
"""
:param django_keycloak.models.KeycloakOpenIDProfile oidc_profile:
:rtype dict
"""
client = oidc_profile.realm.client
active_access_token = get_active_access_token(oidc_profile=oidc_profile)
return client.openid_api_client.decode_token(
token=active_access_token,
key=client.realm.certs,
algorithms=client.openid_api_client.well_known[
'id_token_signing_alg_values_supported']
)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RestartNodeDescription(Model):
"""Describes the parameters to restart a Service Fabric node.
All required parameters must be populated in order to send to Azure.
:param node_instance_id: Required. The instance ID of the target node. If
instance ID is specified the node is restarted only if it matches with the
current instance of the node. A default value of "0" would match any
instance ID. The instance ID can be obtained using get node query. Default
value: "0" .
:type node_instance_id: str
:param create_fabric_dump: Specify True to create a dump of the fabric
node process. This is case sensitive. Possible values include: 'False',
'True'. Default value: "False" .
:type create_fabric_dump: str or
~azure.servicefabric.models.CreateFabricDump
"""
_validation = {
'node_instance_id': {'required': True},
}
_attribute_map = {
'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'},
'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'},
}
def __init__(self, *, node_instance_id: str="0", create_fabric_dump="False", **kwargs) -> None:
super(RestartNodeDescription, self).__init__(**kwargs)
self.node_instance_id = node_instance_id
self.create_fabric_dump = create_fabric_dump
|
# orm/strategies.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import path_registry
from . import properties
from . import query
from . import relationships
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .context import _column_descriptions
from .context import ORMCompileState
from .context import ORMSelectCompileState
from .context import QueryContext
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..sql.selectable import Select
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref and prop._effective_sync_backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
check_for_adapt=False,
**kwargs
):
for c in self.columns:
if adapter:
if check_for_adapt:
c = adapter.adapt_check_present(c)
if c is None:
return
else:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or (
mapper.version_id_col is not None
and mapper._columntoproperty.get(mapper.version_id_col, None)
is self.parent_property
)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
# compare to the "default" expression that is mapped in
# the column. If it's sql.null, we don't need to render
# unless an expr is passed in the options.
null = sql.null().label(None)
self._have_default_expression = any(
not c.compare(null) for c in self.parent_property.columns
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
columns = None
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
elif self._have_default_expression:
columns = self.parent_property.columns
if columns is None:
return
for c in columns:
if adapter:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(
deferred=True, instrument=True, raiseload=True
)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.raiseload = self.strategy_opts.get("raiseload", False)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# for a DeferredColumnLoader, this method is only used during a
# "row processor only" query; see test_deferred.py ->
# tests with "rowproc_only" in their name. As of the 1.0 series,
# loading._instance_processor doesn't use a "row processing" function
# to populate columns, instead it uses data in the "populators"
# dictionary. Normally, the DeferredColumnLoader.setup_query()
# sets up that data in the "memoized_populators" dictionary
# and "create_row_processor()" here is never invoked.
if (
context.refresh_state
and context.query._compile_options._only_load_props
and self.key in context.query._compile_options._only_load_props
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
elif not self.is_class_level:
if self.raiseload:
set_deferred_for_local_state = (
self.parent_property._raise_column_loader
)
else:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
load_on_unexpire=False,
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
compile_state.compile_options._render_for_subquery
and self.parent_property._renders_in_subqueries
)
or (
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
elif not self.raiseload:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
else:
memoized_populators[self.parent_property] = _RAISE_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
if self.raiseload:
self._invoke_raise_load(state, passive, "raise")
if (
loading.load_on_ident(
session,
sql.select(localparent).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
state.key,
only_load_props=group,
refresh_state=state,
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to raiseload=True" % (self,)
)
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key, raiseload=False):
self.key = key
self.raiseload = raiseload
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
if self.raiseload:
strategy_key = (
("deferred", True),
("instrument", True),
("raiseload", True),
)
else:
strategy_key = (("deferred", True), ("instrument", True))
strategy = prop._get_strategy(strategy_key)
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@relationships.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="noload")
@relationships.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
def invoke_no_load(state, dict_, row):
if self.uselist:
attributes.init_state_collection(state, dict_, self.key)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy=True)
@relationships.RelationshipProperty.strategy_for(lazy="select")
@relationships.RelationshipProperty.strategy_for(lazy="raise")
@relationships.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@relationships.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"_lazyload_reverse_option",
"_order_by",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
if self.parent_property.order_by:
self._order_by = [
sql_util._deep_annotate(elem, {"_orm_adapt": True})
for elem in util.to_list(self.parent_property.order_by)
]
else:
self._order_by = None
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use Session.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
_legacy_inactive_history_style = (
self.parent_property._legacy_inactive_history_style
)
if self.parent_property.active_history:
active_history = True
_deferred_history = False
elif (
self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
):
if _legacy_inactive_history_style:
active_history = True
_deferred_history = False
else:
active_history = False
_deferred_history = True
else:
active_history = _deferred_history = False
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
_deferred_history=_deferred_history,
)
def _memoized_attr__simple_lazy_clause(self):
lazywhere = sql_util._deep_annotate(
self._lazywhere, {"_orm_adapt": True}
)
criterion, bind_to_col = (lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive, loadopt=None, extra_criteria=()):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
use_get = self.use_get and (not loadopt or not loadopt._extra_criteria)
if (not passive & attributes.SQL_OK and not use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
if (
self.key in state.dict
and not passive & attributes.DEFERRED_HISTORY_LOAD
):
return attributes.ATTR_WAS_SET
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
if instance is attributes.PASSIVE_CLASS_MISMATCH:
return None
else:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.preload_module("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self,
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
):
strategy_options = util.preloaded.orm_strategy_options
clauseelement = self.entity.__clause_element__()
stmt = Select._create_raw_select(
_raw_columns=[clauseelement],
_propagate_attrs=clauseelement._propagate_attrs,
_label_style=LABEL_STYLE_TABLENAME_PLUS_COL,
_compile_options=ORMCompileState.default_compile_options,
)
load_options = QueryContext.default_load_options
load_options += {
"_invoke_all_eagers": False,
"_lazy_loaded_from": state,
}
if self.parent_property.secondary is not None:
stmt = stmt.select_from(
self.mapper, self.parent_property.secondary
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
stmt._execution_options = util.immutabledict({"autoflush": False})
use_get = self.use_get
if state.load_options or (loadopt and loadopt._extra_criteria):
effective_path = state.load_path[self.parent_property]
opts = tuple(state.load_options)
if loadopt and loadopt._extra_criteria:
use_get = False
opts += (
orm_util.LoaderCriteriaOption(self.entity, extra_criteria),
)
stmt._with_options = opts
else:
# this path is used if there are not already any options
# in the query, but an event may want to add them
effective_path = state.mapper._path_registry[self.parent_property]
stmt._compile_options += {"_current_path": effective_path}
if use_get:
if self._raise_on_sql and not passive & attributes.NO_RAISE:
self._invoke_raise_load(state, passive, "raise_on_sql")
return loading.load_on_pk_identity(
session, stmt, primary_key_identity, load_options=load_options
)
if self._order_by:
stmt._order_by_clauses = self._order_by
def _lazyload_reverse(compile_context):
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
strategy_options.Load.for_existing_path(
compile_context.compile_options._current_path[
rev.parent
]
).lazyload(rev).process_compile_state(compile_context)
stmt._with_context_options += (
(_lazyload_reverse, self.parent_property),
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
execution_options = {
"_sa_orm_load_options": load_options,
}
if (
self.key in state.dict
and not passive & attributes.DEFERRED_HISTORY_LOAD
):
return attributes.ATTR_WAS_SET
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql and not passive & attributes.NO_RAISE:
self._invoke_raise_load(state, passive, "raise_on_sql")
stmt._where_criteria = (lazy_clause,)
result = session.execute(
stmt, params, execution_options=execution_options
)
result = result.unique().scalars().all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
key = self.key
if not self.is_class_level or (loadopt and loadopt._extra_criteria):
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(
mapper.class_manager,
LoadLazyAttribute(
key,
self,
loadopt,
loadopt._generate_extra_criteria(context)
if loadopt._extra_criteria
else None,
),
key,
)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""semi-serializable loader object used by LazyLoader
Historically, this object would be carried along with instances that
needed to run lazyloaders, so it had to be serializable to support
cached instances.
this is no longer a general requirement, and the case where this object
is used is exactly the case where we can't really serialize easily,
which is when extra criteria in the loader option is present.
We can't reliably serialize that as it refers to mapped entities and
AliasedClass objects that are local to the current process, which would
need to be matched up on deserialize e.g. the sqlalchemy.ext.serializer
approach.
"""
def __init__(self, key, initiating_strategy, loadopt, extra_criteria):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
self.loadopt = loadopt
self.extra_criteria = extra_criteria
def __getstate__(self):
if self.extra_criteria is not None:
util.warn(
"Can't reliably serialize a lazyload() option that "
"contains additional criteria; please use eager loading "
"for this case"
)
return {
"key": self.key,
"strategy_key": self.strategy_key,
"loadopt": self.loadopt,
"extra_criteria": (),
}
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(
state,
passive,
loadopt=self.loadopt,
extra_criteria=self.extra_criteria,
)
class PostLoader(AbstractRelationshipLoader):
"""A relationship loader that emits a second SELECT statement."""
def _check_recursive_postload(self, context, path, join_depth=None):
effective_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
if loading.PostLoad.path_exists(
context, effective_path, self.parent_property
):
return True
path_w_prop = path[self.parent_property]
effective_path_w_prop = effective_path[self.parent_property]
if not path_w_prop.contains(context.attributes, "loader"):
if join_depth:
if effective_path_w_prop.length / 2 > join_depth:
return True
elif effective_path_w_prop.contains_mapper(self.mapper):
return True
return False
def _immediateload_create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
@relationships.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(PostLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_, flags)
if self._check_recursive_postload(context, path):
# this will not emit SQL and will only emit for a many-to-one
# "use get" load. the "_RELATED" part means it may return
# instance even if its expired, since this is a mutually-recursive
# load operation.
flags = attributes.PASSIVE_NO_FETCH_RELATED | attributes.NO_RAISE
else:
flags = attributes.PASSIVE_OFF | attributes.NO_RAISE
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(PostLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def _get_leftmost(
self,
orig_query_entity_index,
subq_path,
current_compile_state,
is_root,
):
given_subq_path = subq_path
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
if is_root:
# the subq_path is also coming from cached state, so when we start
# building up this path, it has to also be converted to be in terms
# of the current state. this is for the specific case of the entity
# is an AliasedClass against a subquery that's not otherwise going
# to adapt
new_subq_path = current_compile_state._entities[
orig_query_entity_index
].entity_zero._path_registry[leftmost_prop]
additional = len(subq_path) - len(new_subq_path)
if additional:
new_subq_path += path_registry.PathRegistry.coerce(
subq_path[-additional:]
)
else:
new_subq_path = given_subq_path
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
new_subq_path.path[0].entity,
leftmost_mapper._columntoproperty[c].key,
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop, new_subq_path
def _generate_from_original_query(
self,
orig_compile_state,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# LEGACY: make a Query back from the select() !!
# This suits at least two legacy cases:
# 1. applications which expect before_compile() to be called
# below when we run .subquery() on this query (Keystone)
# 2. applications which are doing subqueryload with complex
# from_self() queries, as query.subquery() / .statement
# has to do the full compile context for multiply-nested
# from_self() (Neutron) - see test_subqload_from_self
# for demo.
q2 = query.Query.__new__(query.Query)
q2.__dict__.update(q.__dict__)
q = q2
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._enable_assertions = False
q.select_from.non_generative(
q,
*{
ent["entity"]
for ent in _column_descriptions(
orig_query, compile_state=orig_compile_state
)
if ent["entity"] is not None
}
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove other
# columns from the query that might suggest the right entity which is
# why we do set select_from above. The attributes we have are
# coerced and adapted using the original query's adapter, which is
# needed only for the case of adapting a subclass column to
# that of a polymorphic selectable, e.g. we have
# Engineer.primary_language and the entity is Person. All other
# adaptations, e.g. from_self, select_entity_from(), will occur
# within the new query when it compiles, as the compile_state we are
# using here is only a partial one. If the subqueryload is from a
# with_polymorphic() or other aliased() object, left_attr will already
# be the correct attributes so no adaptation is needed.
target_cols = orig_compile_state._adapt_col_list(
[
sql.coercions.expect(sql.roles.ColumnsClauseRole, o)
for o in leftmost_attr
],
orig_compile_state._get_current_adapter(),
)
q._raw_columns = target_cols
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
# don't need ORDER BY if no limit/offset
if not q._has_row_limiting_clause:
q._order_by_clauses = ()
if q._distinct is True and q._order_by_clauses:
# the logic to automatically add the order by columns to the query
# when distinct is True is deprecated in the query
to_add = sql_util.expand_column_list_from_order_by(
target_cols, q._order_by_clauses
)
if to_add:
q._set_entities(target_cols + to_add)
# the original query now becomes a subquery
# which we'll join onto.
# LEGACY: as "q" is a Query, the before_compile() event is invoked
# here.
embed_q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr)
return q
def _setup_options(
self,
context,
q,
subq_path,
rewritten_path,
orig_query,
effective_entity,
loadopt,
):
# note that because the subqueryload object
# does not re-use the cached query, instead always making
# use of the current invoked query, while we have two queries
# here (orig and context.query), they are both non-cached
# queries and we can transfer the options as is without
# adjusting for new criteria. Some work on #6881 / #6889
# brought this into question.
new_options = orig_query._with_options
if loadopt and loadopt._extra_criteria:
new_options += (
orm_util.LoaderCriteriaOption(
self.entity,
loadopt._generate_extra_criteria(context),
),
)
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(rewritten_path)
q = q.options(*new_options)
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
def _setup_outermost_orderby(compile_context):
compile_context.eager_order_by += tuple(
util.to_list(self.parent_property.order_by)
)
q = q._add_context_option(
_setup_outermost_orderby, self.parent_property
)
return q
class _SubqCollections(object):
"""Given a :class:`_query.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
__slots__ = (
"session",
"execution_options",
"load_options",
"params",
"subq",
"_data",
)
def __init__(self, context, subq):
# avoid creating a cycle by storing context
# even though that's preferable
self.session = context.session
self.execution_options = context.execution_options
self.load_options = context.load_options
self.params = context.params or {}
self.subq = subq
self._data = None
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = collections.defaultdict(list)
q = self.subq
assert q.session is None
q = q.with_session(self.session)
if self.load_options._populate_existing:
q = q.populate_existing()
# to work with baked query, the parameters may have been
# updated since this query was created, so take these into account
rows = list(q.params(self.params))
for k, v in itertools.groupby(rows, lambda x: x[1:]):
self._data[k].extend(vv[0] for vv in v)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def _setup_query_from_rowproc(
self,
context,
query_entity,
path,
entity,
loadopt,
adapter,
):
compile_state = context.compile_state
if (
not compile_state.compile_options._enable_eagerloads
or compile_state.compile_options._for_refresh_state
):
return
orig_query_entity_index = compile_state._entities.index(query_entity)
context.loaders_require_buffering = True
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
subq_path, rewritten_path = context.query._execution_options.get(
("subquery_paths", None),
(orm_util.PathRegistry.root, orm_util.PathRegistry.root),
)
is_root = subq_path is orm_util.PathRegistry.root
subq_path = subq_path + path
rewritten_path = rewritten_path + path
# if not via query option, check for
# a cycle
# TODO: why is this here??? this is now handled
# by the _check_recursive_postload call
if not path.contains(compile_state.attributes, "loader"):
if self.join_depth:
if (
(
compile_state.current_path.length
if compile_state.current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
# use the current query being invoked, not the compile state
# one. this is so that we get the current parameters. however,
# it means we can't use the existing compile state, we have to make
# a new one. other approaches include possibly using the
# compiled query but swapping the params, seems only marginally
# less time spent but more complicated
orig_query = context.query._execution_options.get(
("orig_query", SubqueryLoader), context.query
)
# make a new compile_state for the query that's probably cached, but
# we're sort of undoing a bit of that caching :(
compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
orig_query, "orm"
)
if orig_query._is_lambda_element:
if context.load_options._lazy_loaded_from is None:
util.warn(
'subqueryloader for "%s" must invoke lambda callable '
"at %r in "
"order to produce a new query, decreasing the efficiency "
"of caching for this statement. Consider using "
"selectinload() for more effective full-lambda caching"
% (self, orig_query)
)
orig_query = orig_query._resolved
# this is the more "quick" version, however it's not clear how
# much of this we need. in particular I can't get a test to
# fail if the "set_base_alias" is missing and not sure why that is.
orig_compile_state = compile_state_cls._create_entities_collection(
orig_query, legacy=False
)
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
rewritten_path,
) = self._get_leftmost(
orig_query_entity_index,
rewritten_path,
orig_compile_state,
is_root,
)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_compile_state,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = query.Query(effective_entity)
q._execution_options = q._execution_options.union(
{
("orig_query", SubqueryLoader): orig_query,
("subquery_paths", None): (subq_path, rewritten_path),
}
)
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(
context,
q,
subq_path,
rewritten_path,
orig_query,
effective_entity,
loadopt,
)
q = self._setup_outermost_orderby(q)
return q
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
# the subqueryloader does a similar check in setup_query() unlike
# the other post loaders, however we have this here for consistency
elif self._check_recursive_postload(context, path, self.join_depth):
return
elif not isinstance(context.compile_state, ORMSelectCompileState):
# issue 7505 - subqueryload() in 1.3 and previous would silently
# degrade for from_statement() without warning. this behavior
# is restored here
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
# a little dance here as the "path" is still something that only
# semi-tracks the exact series of things we are loading, still not
# telling us about with_polymorphic() and stuff like that when it's at
# the root.. the initial MapperEntity is more accurate for this case.
if len(path) == 1:
if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
return
elif not orm_util._entity_isa(path[-1], self.parent):
return
subq = self._setup_query_from_rowproc(
context,
query_entity,
path,
path[-1],
loadopt,
adapter,
)
if subq is None:
return
assert subq.session is None
path = path[self.parent_property]
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(context, subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, result, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, result, collections, local_cols, populators
)
def _create_collection_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_collection_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), ())
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), (None,))
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="joined")
@relationships.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not compile_state.compile_options._enable_eagerloads:
return
elif self.uselist:
compile_state.multi_row_eager_loaders = True
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, compile_state.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
compile_state,
query_entity,
path,
adapter,
user_defined_adapter,
)
else:
# if not via query option, check for
# a cycle
if not path.contains(compile_state.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
compile_state,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_entity is not None and None in set(
compile_state.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(
self, loadopt, compile_state, target_attributes
):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
compile_state.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(
compile_state.attributes, "path_with_polymorphic"
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = compile_state._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
target_attributes,
"user_defined_eager_row_processor",
adapter,
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable._anonymous_fromclause(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
compile_state,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
compile_state.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
to_adapt = self._gen_pooled_aliased_class(compile_state)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_DataRequired=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
compile_state.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
loadopt._extra_criteria if loadopt else (),
)
)
add_to_collection = compile_state.secondary_columns
path.set(compile_state.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
compile_state,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
extra_criteria,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
compile_state.multi_row_eager_loaders
and compile_state._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in compile_state.eager_joins
and not should_nest_selectable
and compile_state.from_clauses
):
indexes = sql_util.find_left_clause_that_matches_given(
compile_state.from_clauses, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = compile_state.from_clauses[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = compile_state.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
extra_join_criteria = extra_criteria
additional_entity_criteria = compile_state.global_attributes.get(
("additional_entity_criteria", self.mapper), ()
)
if additional_entity_criteria:
extra_join_criteria += tuple(
ae._resolve_where_criteria(self.mapper)
for ae in additional_entity_criteria
if ae.propagate_to_loaders
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
_extra_criteria=extra_join_criteria,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause, extra_join_criteria
)
compile_state.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
compile_state._append_dedupe_col_collection(
col, compile_state.primary_columns
)
if self.parent_property.order_by:
compile_state.eager_order_by += tuple(
(eagerjoin._target_adapter.copy_and_process)(
util.to_list(self.parent_property.order_by)
)
)
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, extra_criteria, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path,
join_obj.element,
clauses,
onclause,
extra_criteria,
splicing,
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
_extra_criteria=extra_criteria,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path,
join_obj.right,
clauses,
onclause,
extra_criteria,
join_obj._right_memo,
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path,
join_obj.left,
clauses,
onclause,
extra_criteria,
join_obj._left_memo,
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
compile_state = context.compile_state
user_defined_adapter = (
self._init_user_defined_eager_proc(
loadopt, compile_state, context.attributes
)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if compile_state.compound_eager_adapter and decorator:
decorator = decorator.wrap(
compile_state.compound_eager_adapter
)
elif compile_state.compound_eager_adapter:
decorator = compile_state.compound_eager_adapter
else:
decorator = path.get(
compile_state.attributes, "eager_row_processor"
)
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
if self.uselist:
context.loaders_require_uniquing = True
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
query_entity,
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
# note this must unconditionally clear out any existing collection.
# an existing collection would be present only in the case of
# populate_existing().
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@relationships.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(PostLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
elif self._check_recursive_postload(context, path, self.join_depth):
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
# a little dance here as the "path" is still something that only
# semi-tracks the exact series of things we are loading, still not
# telling us about with_polymorphic() and stuff like that when it's at
# the root.. the initial MapperEntity is more accurate for this case.
if len(path) == 1:
if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
return
elif not orm_util._entity_isa(path[-1], self.parent):
return
selectin_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
path_w_prop = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = inspect(with_poly_entity)
else:
effective_entity = self.entity
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.parent_property,
self._load_for_path,
effective_entity,
loadopt,
)
def _load_for_path(
self, context, path, states, load_only, effective_entity, loadopt
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
if effective_entity.is_aliased_class:
pk_cols = [
effective_entity._adapt_element(col) for col in pk_cols
]
in_expr = effective_entity._adapt_element(in_expr)
bundle_ent = orm_util.Bundle("pk", *pk_cols)
bundle_sql = bundle_ent.__clause_element__()
entity_sql = effective_entity.__clause_element__()
q = Select._create_raw_select(
_raw_columns=[bundle_sql, entity_sql],
_label_style=LABEL_STYLE_TABLENAME_PLUS_COL,
_compile_options=ORMCompileState.default_compile_options,
_propagate_attrs={
"compile_state_plugin": "orm",
"plugin_subject": effective_entity,
},
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q = q.select_from(effective_entity)
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
q = q.select_from(self._parent_alias).join(
getattr(self._parent_alias, self.parent_property.key).of_type(
effective_entity
)
)
q = q.filter(in_expr.in_(sql.bindparam("primary_keys")))
# a test which exercises what these comments talk about is
# test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic
#
# effective_entity above is given to us in terms of the cached
# statement, namely this one:
orig_query = context.compile_state.select_statement
# the actual statement that was requested is this one:
# context_query = context.query
#
# that's not the cached one, however. So while it is of the identical
# structure, if it has entities like AliasedInsp, which we get from
# aliased() or with_polymorphic(), the AliasedInsp will likely be a
# different object identity each time, and will not match up
# hashing-wise to the corresponding AliasedInsp that's in the
# cached query, meaning it won't match on paths and loader lookups
# and loaders like this one will be skipped if it is used in options.
#
# Now we want to transfer loader options from the parent query to the
# "selectinload" query we're about to run. Which query do we transfer
# the options from? We use the cached query, because the options in
# that query will be in terms of the effective entity we were just
# handed.
#
# But now the selectinload query we are running is *also*
# cached. What if it's cached and running from some previous iteration
# of that AliasedInsp? Well in that case it will also use the previous
# iteration of the loader options. If the query expires and
# gets generated again, it will be handed the current effective_entity
# and the current _with_options, again in terms of whatever
# compile_state.select_statement happens to be right now, so the
# query will still be internally consistent and loader callables
# will be correctly invoked.
effective_path = path[self.parent_property]
if orig_query is context.query:
options = new_options = orig_query._with_options
user_defined_options = []
else:
options = orig_query._with_options
# propagate compile state options from the original query,
# updating their "extra_criteria" as necessary.
# note this will create a different cache key than
# "orig" options if extra_criteria is present, because the copy
# of extra_criteria will have different boundparam than that of
# the QueryableAttribute in the path
new_options = [
orig_opt._adjust_for_extra_criteria(context)
if orig_opt._is_strategy_option
else orig_opt
for orig_opt in options
if orig_opt._is_compile_state or orig_opt._is_legacy_option
]
# propagate user defined options from the current query
user_defined_options = [
opt
for opt in context.query._with_options
if not opt._is_compile_state and not opt._is_legacy_option
]
if loadopt and loadopt._extra_criteria:
new_options += (
orm_util.LoaderCriteriaOption(
effective_entity,
loadopt._generate_extra_criteria(context),
),
)
q = q.options(*new_options)._update_compile_options(
{"_current_path": effective_path}
)
if user_defined_options:
q = q.options(*user_defined_options)
if context.populate_existing:
q = q.execution_options(populate_existing=True)
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if effective_entity.is_aliased_class:
eager_order_by = [
effective_entity._adapt_element(elem)
for elem in eager_order_by
]
q = q.order_by(*eager_order_by)
else:
def _setup_outermost_orderby(compile_context):
compile_context.eager_order_by += tuple(
util.to_list(self.parent_property.order_by)
)
q = q._add_context_option(
_setup_outermost_orderby, self.parent_property
)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in context.session.execute(
q,
params={
"primary_keys": [
key[0] if query_info.zero_idx else key
for key in chunk
]
},
).unique()
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = collections.defaultdict(list)
for k, v in itertools.groupby(
context.session.execute(
q, params={"primary_keys": primary_keys}
).unique(),
lambda x: x[0],
):
data[k].extend(vv[1] for vv in v)
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop),
code="bbf1",
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
|
from PyQt5.QtCore import pyqtSignal, QObject, pyqtSlot
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLineEdit, QLabel, QPushButton, QGroupBox, QSizePolicy, \
QFormLayout, QWidget, QCheckBox
class MainFunctionAbstract(QGroupBox):
send_data = pyqtSignal(str, dict)
flag_update_signal = pyqtSignal(str, str)
def __init__(
self, parent, button_names=(), flag_names=(),
flag_defaults=(), group_name="", comment=None, stack_vertically=True):
super().__init__(group_name, parent)
if stack_vertically:
layout = QVBoxLayout(self)
else:
layout = QHBoxLayout(self)
if comment is not None:
layout.addWidget(QLabel(comment, parent))
flags_hbox = QHBoxLayout()
self.flag_names = flag_names
self.flag_line_edits = {}
for flag, flag_default_value in zip(flag_names, flag_defaults):
flag_group_box = self.create_get_flags_groupbox(flag, flag_default_value)
flags_hbox.addWidget(flag_group_box)
layout.addLayout(flags_hbox)
buttons_hbox = QHBoxLayout()
self.buttons = {}
for button_name in button_names:
button = QPushButton(button_name)
self.buttons[button_name] = button
button.clicked.connect(self.collect_send_data)
buttons_hbox.addWidget(button)
layout.addLayout(buttons_hbox)
def create_get_flags_groupbox(self, flag, flag_default_value):
flag_group_box = QWidget(self)
flag_form_box = QFormLayout(flag_group_box)
flag_group_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
line_edit = QLineEdit(str(flag_default_value))
line_edit.setAccessibleName(flag)
line_edit.editingFinished.connect(self.flag_update_request)
flag_form_box.addRow(flag, line_edit)
self.flag_line_edits[flag] = line_edit
return flag_group_box
def collect_send_data(self):
sender = QObject.sender(self)
self.send_data.emit(
sender.text(),
{flag: flag_line_edit.text() for flag, flag_line_edit in self.flag_line_edits.items()})
def update_flag_defaults(self, flag_defaults):
for flag_name, flag_value in flag_defaults.items():
if flag_name in self.flag_names:
self.flag_line_edits[flag_name].setText(str(flag_value))
def flag_update_request(self):
sender_le = QObject.sender(self)
self.flag_update_signal.emit(sender_le.accessibleName(), sender_le.text())
def reset_flag(self, flag_name, flag_value):
self.flag_line_edits[flag_name].setText(str(flag_value))
class OverviewGenWidget(MainFunctionAbstract):
send_data = pyqtSignal(str, dict, bool, bool)
def __init__(self, parent, current_flags):
gen_overviews_box_flags = ["CTV_Method", "CTV_firstframe", "CTV_lastframe"]
super().__init__(
parent=parent, button_names=["Generate(new)"],
flag_names=gen_overviews_box_flags,
flag_defaults=[current_flags[f] for f in gen_overviews_box_flags],
group_name="Generate overview images",
stack_vertically=True
)
self.check_boxes = {}
self.deactivatable_flag_boxes = {}
extra_hbox = QHBoxLayout()
temp = {
"CTV_FeatureNumber": "Use all features?", "CTV_StimulusNumber": "Use all stimuli?"
}
for flag, check_box_name in temp.items():
flag_group_box = self.create_get_flags_groupbox(
flag=flag, flag_default_value=current_flags[flag])
self.deactivatable_flag_boxes[flag] = flag_group_box
extra_hbox.addWidget(flag_group_box)
check_box = QCheckBox(check_box_name, self)
check_box.setAccessibleName(flag)
check_box.stateChanged.connect(self.inactivate_flag)
self.check_boxes[flag] = check_box
extra_hbox.addWidget(check_box)
self.layout().insertLayout(1, extra_hbox)
@pyqtSlot(int, name="inactivate_flag")
def inactivate_flag(self, state):
sender = QObject.sender(self)
self.deactivatable_flag_boxes[sender.accessibleName()].setEnabled(state != 2)
def collect_send_data(self):
sender = QObject.sender(self)
self.send_data.emit(
sender.text(),
{flag: flag_line_edit.text() for flag, flag_line_edit in self.flag_line_edits.items()},
self.check_boxes["CTV_FeatureNumber"].isChecked(),
self.check_boxes["CTV_StimulusNumber"].isChecked()
)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video.upload_url'
db.add_column('videos_video', 'upload_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=200),
keep_default=False)
# Adding field 'Video.shortlink'
db.add_column('videos_video', 'shortlink',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
# Adding field 'Video.state'
db.add_column('videos_video', 'state',
self.gf('django.db.models.fields.CharField')(default='unsent', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video.upload_url'
db.delete_column('videos_video', 'upload_url')
# Deleting field 'Video.shortlink'
db.delete_column('videos_video', 'shortlink')
# Deleting field 'Video.state'
db.delete_column('videos_video', 'state')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'category': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['videos']
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c), AiiDA team and individual contributors. #
# All rights reserved. #
# This file is part of the AiiDA-wannier90 code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-wannier90 #
# For further information on the license, see the LICENSE.txt file #
################################################################################
import pytest
from aiida import orm
ENTRY_POINT_CALC_JOB = 'wannier90.wannier90'
ENTRY_POINT_PARSER = 'wannier90.wannier90'
@pytest.mark.parametrize("seedname", ("aiida", "wannier"))
def test_wannier_default(#pylint: disable=too-many-arguments
fixture_localhost, generate_calc_job_node, generate_parser,
generate_win_params_gaas, data_regression, seedname
):
"""Basic check of parsing a Wannier90 calculation."""
node = generate_calc_job_node(
entry_point_name=ENTRY_POINT_CALC_JOB,
computer=fixture_localhost,
test_name='gaas/seedname_{}'.format(seedname),
inputs=generate_win_params_gaas(),
seedname=seedname
)
parser = generate_parser(ENTRY_POINT_PARSER)
results, calcfunction = parser.parse_from_node(
node, store_provenance=False
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert not orm.Log.objects.get_logs_for(node)
assert 'output_parameters' in results
data_regression.check({
'output_parameters':
results['output_parameters'].get_dict(),
})
def test_no_kpoint_path(
fixture_localhost,
generate_calc_job_node,
generate_parser,
generate_win_params_gaas,
data_regression,
):
"""Check that parsing still works if the 'kpoint_path' is not set."""
inputs = generate_win_params_gaas()
del inputs['kpoint_path']
node = generate_calc_job_node(
entry_point_name=ENTRY_POINT_CALC_JOB,
computer=fixture_localhost,
test_name='gaas/seedname_aiida',
inputs=inputs,
)
parser = generate_parser(ENTRY_POINT_PARSER)
results, calcfunction = parser.parse_from_node(
node, store_provenance=False
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert not orm.Log.objects.get_logs_for(node)
assert 'output_parameters' in results
data_regression.check({
'output_parameters':
results['output_parameters'].get_dict(),
})
@pytest.mark.parametrize("band_parser", ("new", "legacy"))
def test_band_parser(#pylint: disable=too-many-arguments
fixture_localhost, generate_calc_job_node, generate_parser,
generate_win_params_o2sr, data_regression, band_parser
):
"""Check that band parser returns correct dimension and labels."""
inputs = generate_win_params_o2sr()
node = generate_calc_job_node(
entry_point_name=ENTRY_POINT_CALC_JOB,
computer=fixture_localhost,
test_name='o2sr/band_{}'.format(band_parser),
inputs=inputs
)
parser = generate_parser(ENTRY_POINT_PARSER)
results, calcfunction = parser.parse_from_node(
node, store_provenance=False
)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert not orm.Log.objects.get_logs_for(node)
assert 'output_parameters' in results
data_regression.check({
'output_parameters':
results['output_parameters'].get_dict(),
})
bands = results['interpolated_bands']
if band_parser == "new":
assert bands.get_kpoints().shape == (607, 3)
assert bands.get_bands().shape == (607, 21)
assert bands.labels == [(0, 'GAMMA'), (100, 'X'), (137, 'P'),
(208, 'N'), (288, 'GAMMA'), (362, 'M'),
(413, 'S'), (414, 'S_0'), (504, 'GAMMA'),
(505, 'X'), (533, 'R'), (534, 'G'), (606, 'M')]
elif band_parser == "legacy":
assert bands.get_kpoints().shape == (604, 3)
assert bands.get_bands().shape == (604, 21)
assert bands.labels == [(0, 'GAMMA'), (100, 'X'), (137, 'P'),
(208, 'N'), (288, 'GAMMA'), (362, 'M'),
(412, 'S'), (413, 'S_0'), (502, 'GAMMA'),
(503, 'X'), (530, 'R'), (531, 'G'), (603, 'M')]
|
import torch.nn as nn
from collections import OrderedDict
class C1(nn.Module):
def __init__(self):
super(C1, self).__init__()
self.c1 = nn.Sequential(OrderedDict([
('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))),
('relu1', nn.ReLU()),
('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))
]))
def forward(self, img):
output = self.c1(img)
return output
class C3(nn.Module):
def __init__(self):
super(C3, self).__init__()
self.c3 = nn.Sequential(OrderedDict([
('c3', nn.Conv2d(6, 16, kernel_size=(5, 5))),
('relu2', nn.ReLU()),
('s4', nn.MaxPool2d(kernel_size=(2, 2), stride=2))
]))
def forward(self, img):
output = self.c3(img)
return output
class C5(nn.Module):
def __init__(self):
super(C5, self).__init__()
self.c5 = nn.Sequential(OrderedDict([
('c5', nn.Conv2d(16, 120, kernel_size=(5, 5))),
('relu3', nn.ReLU())
]))
def forward(self, img):
output = self.c5(img)
return output
class F6(nn.Module):
def __init__(self):
super(F6, self).__init__()
self.f6 = nn.Sequential(OrderedDict([
('f6', nn.Linear(120, 84)),
('relu4', nn.ReLU())
]))
def forward(self, img):
output = self.f6(img)
return output
class FCoutput(nn.Module):
def __init__(self):
super(FCoutput, self).__init__()
self.fcoutput = nn.Sequential(OrderedDict([
('fcoutput7', nn.Linear(84, 10)),
('sig1', nn.LogSoftmax(dim=-1))
]))
def forward(self, img):
output = self.fcoutput(img)
return output
class LeNet5(nn.Module):
"""
Input - 1x32x32
Output - 10
"""
def __init__(self):
super(LeNet5, self).__init__()
self.c1 = C1()
self.c3 = C3()
self.c5 = C5()
self.f6 = F6()
self.fcoutput = FCoutput()
def forward(self, img):
# Conv Layer(C1)
# - input: 32x32x1
# - output: 28x28x6
# - weights: (5x5x1 + 1)x6
# Sub-sampling(S2)
# - input: 28x28x6
# - output: 14x14x6
# - weights: 2x2x1
output = self.c1(img)
# Conv Layer(C3)
# - input: 14x14x6
# - output: 10x10x16
# - weights: (5x5x6 + 1)x16
# Sub-sampling(S4)
# - input: 10x10x16
# - output: 5x5x16
# - weights: 2x2x1
output = self.c3(output)
# Conv Layer(C5)
# - input: 5x5x16
# - output: 1x1x120
# - weights: (5x5x16 + 1)x120
output = self.c5(output)
# Flatten Layer
output = output.view(img.size(0), -1)
# Fully Connected Layer(F6)
# - input: 120
# - output: 84
output = self.f6(output)
# Fully Connected Layer(F7)
# - input: 84
# - output: 10
output = self.fcoutput(output)
return output
|
#! /usr/bin/env python
"""API Wrapper for Bitcoin.de Trading API."""
import requests
import time
import json
import hmac
import hashlib
import logging
import codecs
import decimal
import inspect
import urllib
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlencode
logging.basicConfig()
log = logging.getLogger(__name__)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.propagate = True
__version__ = '2.3'
# disable unsecure SSL warning
requests.packages.urllib3.disable_warnings()
class ParameterBuilder(object):
'''To verify given parameters for API.'''
def __init__(self, avail_params, given_params, uri):
self.verify_keys_and_values(avail_params, given_params)
self.params = given_params
self.create_url(uri)
def verify_keys_and_values(self, avail_params, given_params):
for k, v in given_params.items():
if k not in avail_params:
list_string = ', '.join(avail_params)
raise KeyError("{} is not any of {}".format(k, list_string))
if k == 'trading_pair':
self.error_on_invalid_value(v, self.TRADING_PAIRS)
elif k == 'type':
self.error_on_invalid_value(v, self.ORDER_TYPES)
elif k == 'currency':
self.error_on_invalid_value(v, self.CURRENCIES)
elif k == 'seat_of_bank':
self.error_on_invalid_value(v, self.BANK_SEATS)
elif k in ['min_trust_level', 'trust_level']:
self.error_on_invalid_value(v, self.TRUST_LEVELS)
elif k == 'payment_option':
self.error_on_invalid_value(v, self.PAYMENT_OPTIONS)
elif k == 'state':
caller = inspect.stack()[2][3]
if caller in ["showMyOrders", "showMyOrderDetails"]:
self.error_on_invalid_value(v, self.ORDER_STATES)
elif caller in ["showMyTrades", "showMyTradesDetails"]:
self.error_on_invalid_value(v, self.TRADE_STATES)
def error_on_invalid_value(self, value, list):
if value not in list:
list_string = ', '.join(str(x) for x in list)
raise ValueError("{} is not any of {}".format(value, list_string))
def create_url(self, uri):
if self.params:
self.encoded_string = urlencode(self.params)
self.url = uri + '?' + self.encoded_string
else:
self.encoded_string = ''
self.url = uri
TRADING_PAIRS = ['btceur', 'bcheur', 'etheur', 'btgeur', 'bsveur']
ORDER_TYPES = ['buy', 'sell']
CURRENCIES = ['btc', 'bch', 'eth', 'btg', 'bsv']
BANK_SEATS = ['AT', 'BE', 'BG', 'CH', 'CY', 'CZ',
'DE', 'DK', 'EE', 'ES', 'FI', 'FR',
'GB', 'GR', 'HR', 'HU', 'IE', 'IS',
'IT', 'LI', 'LT', 'LU', 'LV', 'MT',
'MQ', 'NL', 'NO', 'PL', 'PT', 'RO',
'SE', 'SI', 'SK']
TRUST_LEVELS = ['bronze', 'silver', 'gold', 'platin']
TRADE_STATES = [-1, 0, 1]
ORDER_STATES = [-2, -1, 0]
PAYMENT_OPTIONS = [1, 2, 3]
TRADE_TYPES = ['all', 'buy', 'sell', 'inpayment',
'payout', 'affiliate', 'welcome_btc',
'buy_yubikey', 'buy_goldshop',
'buy_diamondshop', 'kickback',
'outgoing_fee_voluntary']
def HandleRequestsException(e):
"""Handle Exception from request."""
log.warning(e)
def HandleAPIErrors(r):
"""To handle Errors from BTCDE API."""
valid_status_codes = [200, 201, 204]
if r.status_code not in valid_status_codes:
content = r.json()
errors = content.get('errors')
log.warning('API Error Code: {}'.format(str(errors[0]['code'])))
log.warning('API Error Message: {}'.format(errors[0]['message']))
log.warning('API Error URL: {}'.format(r.url))
return False
else:
return True
class Connection(object):
"""To provide connection credentials to the trading API"""
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
# set initial self.nonce
self.nonce = int(time.time() * 1000000)
# Bitcoin.de API URI
self.apihost = 'https://api.bitcoin.de'
self.apiversion = 'v2'
self.orderuri = self.apihost + '/' + self.apiversion + '/' + 'orders'
self.tradeuri = self.apihost + '/' + self.apiversion + '/' + 'trades'
self.accounturi = self.apihost + '/' + self.apiversion + '/' + 'account'
def build_hmac_sign(self, md5string, method, url):
hmac_data = '{method}#{url}#{key}#{nonce}#{md5}'\
.format(method=method, url=url,
key=self.api_key, nonce=str(self.nonce),
md5=md5string)
hmac_signed = hmac.new(bytearray(self.api_secret.encode()), msg=hmac_data.encode(), digestmod=hashlib.sha256).hexdigest()
return hmac_signed
def set_header(self, url, method, encoded_string):
# raise self.nonce before using
self.nonce = int(time.time() * 1000000)
if method == 'POST':
md5_encoded_query_string = hashlib.md5(encoded_string.encode()).hexdigest()
else:
md5_encoded_query_string = hashlib.md5(b'').hexdigest()
hmac_signed = self.build_hmac_sign(md5_encoded_query_string,
method, url)
# set header
header = {'content-type':
'application/x-www-form-urlencoded; charset=utf-8',
'X-API-KEY': self.api_key,
'X-API-NONCE': str(self.nonce),
'X-API-SIGNATURE': hmac_signed }
return header
def send_request(self, url, method, header, encoded_string):
if method == 'GET':
r = requests.get(url, headers=(header),
stream=True, verify=False)
elif method == 'POST':
r = requests.post(url, headers=(header), data=encoded_string,
stream=True, verify=False)
elif method == 'DELETE':
r = requests.delete(url, headers=(header),
stream=True, verify=False)
return r
def APIConnect(self, method, params):
"""Transform Parameters to URL"""
header = self.set_header(params.url, method,
params.encoded_string)
log.debug('Set Header: {}'.format(header))
try:
r = self.send_request(params.url, method, header,
params.encoded_string)
# Handle API Errors
if HandleAPIErrors(r):
# get results
result = r.json(parse_float=decimal.Decimal)
else:
result = {}
except requests.exceptions.RequestException as e:
HandleRequestsException(e)
result = {}
return result
def showOrderbook(self, order_type, trading_pair, **args):
"""Search Orderbook for offers."""
params = {'type': order_type,
'trading_pair': trading_pair}
params.update(args)
avail_params = ['type', 'trading_pair', 'amount', 'price',
'order_requirements_fullfilled',
'only_kyc_full', 'only_express_orders',
'only_same_bankgroup', 'only_same_bic',
'seat_of_bank']
p = ParameterBuilder(avail_params, params, self.orderuri)
return self.APIConnect('GET', p)
def createOrder(self, order_type, trading_pair, max_amount, price, **args):
"""Create a new Order."""
# Build parameters
params = {'type': order_type,
'trading_pair': trading_pair,
'max_amount': max_amount,
'price': price}
params.update(args)
avail_params = ['type', 'trading_pair', 'max_amount', 'price',
'min_amount', 'new_order_for_remaining_amount',
'min_trust_level', 'only_kyc_full', 'payment_option',
'seat_of_bank']
p = ParameterBuilder(avail_params, params, self.orderuri)
return self.APIConnect('POST', p)
def deleteOrder(self, order_id, trading_pair):
"""Delete an Order."""
# Build parameters
params = {'order_id': order_id,
'trading_pair': trading_pair}
avail_params = ['order_id', 'trading_pair']
newuri = self.orderuri + "/" + order_id + "/" + trading_pair
p = ParameterBuilder(avail_params, params, newuri)
p.encoded_string = ''
p.url = newuri
return self.APIConnect('DELETE', p)
def showMyOrders(self, **args):
"""Query and Filter own Orders."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'date_start', 'date_end', 'page']
newuri = self.orderuri + '/my_own'
p = ParameterBuilder(avail_params, params, newuri)
return self.APIConnect('GET', p)
def showMyOrderDetails(self, order_id):
"""Details to an own Order."""
newuri = self.orderuri + '/' + order_id
p = ParameterBuilder({}, {}, newuri)
return self.APIConnect('GET', p)
def executeTrade(self, order_id, order_type, trading_pair, amount):
"""Buy/Sell on a specific Order."""
newuri = self.tradeuri + '/' + order_id
params = {'order_id': order_id,
'type': order_type,
'trading_pair': trading_pair,
'amount': amount}
avail_params = ['order_id', 'type', 'trading_pair',
'amount']
p = ParameterBuilder(avail_params, params, newuri)
return self.APIConnect('POST', p)
def showMyTrades(self, **args):
"""Query and Filter on past Trades."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'date_start', 'date_end', 'page']
p = ParameterBuilder(avail_params, params, self.tradeuri)
return self.APIConnect('GET', p)
def showMyTradeDetails(self, trade_id):
"""Details to a specific Trade."""
newuri = self.tradeuri + '/' + trade_id
params = {}
p = ParameterBuilder({}, {}, newuri)
return self.APIConnect('GET', p)
def showAccountInfo(self):
"""Query on Account Infos."""
p = ParameterBuilder({}, {}, self.accounturi)
return self.APIConnect('GET', p)
def showOrderbookCompact(self, trading_pair):
"""Bids and Asks in compact format."""
params = {'trading_pair': trading_pair}
# Build parameters
avail_params = ['trading_pair']
p = ParameterBuilder(avail_params, params,
self.orderuri + '/compact')
return self.APIConnect('GET', p)
def showPublicTradeHistory(self, trading_pair, **args):
"""All successful trades of the las 7 days."""
params = {'trading_pair': trading_pair}
params.update(args)
avail_params = ['trading_pair', 'since_tid']
p = ParameterBuilder(avail_params, params,
self.tradeuri + '/history')
return self.APIConnect('GET', p)
def showRates(self, trading_pair):
"""Query of the average rate last 3 and 12 hours."""
newuri = self.apihost + '/' + self.apiversion + '/rates'
params = {'trading_pair': trading_pair}
avail_params = ['trading_pair']
p = ParameterBuilder(avail_params, params, newuri)
return self.APIConnect('GET', p)
def showAccountLedger(self, currency, **args):
"""Query on Account statement."""
params = {'currency': currency}
params.update(args)
avail_params = ['currency', 'type',
'datetime_start', 'datetime_end', 'page']
p = ParameterBuilder(avail_params, params,
self.accounturi + '/ledger')
return self.APIConnect('GET', p)
|
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
class PLY_Manip:
def __init__(self, results_dir):
self.dir = results_dir
def insert_header(self, point_cloud_size, index):
number = str(index)
name = self.dir + 'out' + number + '.ply'
with open(name, 'wb') as file:
file.write((ply_header % dict(vert_num=point_cloud_size+1)).encode('utf-8'))
file.write('0 0 0 255 0 0\n'.encode('utf-8'))
def insert_point(self, x, y, z, b, g, r, index):
number = str(index)
name = self.dir + 'out' + number + '.ply'
with open(name, 'ab') as file:
file.write((str(x[0]) + ' ').encode('utf-8'))
file.write((str(y[0]) + ' ').encode('utf-8'))
file.write((str(z[0]) + ' ').encode('utf-8'))
file.write((str(b) + ' ').encode('utf-8'))
file.write((str(g) + ' ').encode('utf-8'))
file.write((str(r) + '\n').encode('utf-8'))
|
from ..de import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ('{{city_name}}', )
city_with_postcode_formats = ('{{postcode}} {{city}}', )
street_name_formats = (
'{{first_name}}-{{last_name}}-{{street_suffix_long}}',
'{{last_name}}{{street_suffix_short}}',
)
street_address_formats = ('{{street_name}} {{building_number}}', )
address_formats = ('{{street_address}}\n{{postcode}} {{city}}', )
building_number_formats = ('###', '##', '#', '#/#')
street_suffixes_long = (
'Gasse', 'Platz', 'Ring', 'Straße', 'Weg', 'Allee',
)
street_suffixes_short = (
'gasse', 'platz', 'ring', 'straße', 'str.', 'weg', 'allee',
)
postcode_formats = ('#####', )
cities = (
'Aachen', 'Ahaus', 'Altentreptow', 'Altötting', 'Amberg', 'Angermünde',
'Anklam', 'Ansbach', 'Apolda', 'Arnstadt', 'Artern', 'Aschaffenburg',
'Aue', 'Auerbach', 'Augsburg', 'Aurich', 'Backnang', 'Bad Brückenau',
'Bad Freienwalde', 'Bad Kissingen', 'Bad Kreuznach', 'Bad Langensalza',
'Bad Liebenwerda', 'Bad Mergentheim', 'Badalzungen', 'Badibling',
'Badoberan', 'Bamberg', 'Bautzen', 'Bayreuth', 'Beeskow', 'Beilngries',
'Belzig', 'Berchtesgaden', 'Bergzabern', 'Berlin', 'Bernburg',
'Bersenbrück', 'Biedenkopf', 'Bischofswerda', 'Bitterfeld', 'Bogen',
'Borken', 'Borna', 'Brand', 'Brandenburg', 'Bremen', 'Bremervörde',
'Brilon', 'Bruchsal', 'Burg', 'Burgdorf', 'Burglengenfeld',
'Böblingen', 'Büsingenm Hochrhein', 'Bützow', 'Calau', 'Calw', 'Celle',
'Chemnitz', 'Cloppenburg', 'Coburg', 'Cottbus', 'Crailsheim',
'Cuxhaven', 'Dachau', 'Darmstadt', 'Deggendorf', 'Delitzsch', 'Demmin',
'Dessau', 'Dieburg', 'Diepholz', 'Dinkelsbühl', 'Dinslaken',
'Donaueschingen', 'Dresden', 'Duderstadt', 'Döbeln', 'Düren',
'Ebermannstadt', 'Ebern', 'Ebersberg', 'Eberswalde', 'Eckernförde',
'Eggenfelden', 'Eichstätt', 'Eichstätt', 'Eilenburg', 'Einbeck',
'Eisenach', 'Eisenberg', 'Eisenhüttenstadt', 'Eisleben', 'Emmendingen',
'Erbisdorf', 'Erding', 'Erfurt', 'Erkelenz', 'Euskirchen', 'Eutin',
'Fallingbostel', 'Feuchtwangen', 'Finsterwalde', 'Flöha', 'Forchheim',
'Forst', 'Freising', 'Freital', 'Freudenstadt', 'Fulda',
'Fürstenfeldbruck', 'Fürstenwalde', 'Füssen', 'Gadebusch',
'Gardelegen', 'Garmisch-Partenkirchen', 'Geithain', 'Geldern',
'Gelnhausen', 'Genthin', 'Gera', 'Germersheim', 'Gerolzhofen',
'Gießen', 'Gifhorn', 'Goslar', 'Gotha', 'Grafenau', 'Gransee',
'Greifswald', 'Greiz', 'Grevenbroich', 'Grevesmühlen',
'Griesbach Rottal', 'Grimma', 'Grimmen', 'Groß-Gerau', 'Großenhain',
'Gräfenhainichen', 'Guben', 'Gunzenhausen', 'Göppingen', 'Görlitz',
'Göttingen', 'Günzburg', 'Güstrow', 'Gütersloh', 'Hagenow',
'Hainichen', 'Halberstadt', 'Haldensleben', 'Hamburg', 'Hammelburg',
'Hannover', 'Hannoversch Münden', 'Hansestadttralsund', 'Havelberg',
'Hechingen', 'Heiligenstadt', 'Heinsberg', 'Helmstedt', 'Herford',
'Hersbruck', 'Herzberg', 'Hettstedt', 'Hildburghausen', 'Hildesheim',
'Hofgeismar', 'Hohenmölsen', 'Hohenstein-Ernstthal', 'Holzminden',
'Hoyerswerda', 'Husum', 'Höxter', 'Hünfeld', 'Illertissen', 'Ilmenau',
'Ingolstadt', 'Iserlohn', 'Jena', 'Jessen', 'Jülich', 'Jüterbog',
'Kaiserslautern', 'Kamenz', 'Karlsruhe', 'Kassel', 'Kehl', 'Kelheim',
'Kemnath', 'Kitzingen', 'Kleve', 'Klötze', 'Koblenz', 'Konstanz',
'Kronach', 'Kulmbach', 'Kusel', 'Kyritz', 'Königs Wusterhausen',
'Kötzting', 'Leipziger Land', 'Lemgo', 'Lichtenfels', 'Lippstadt',
'Lobenstein', 'Luckau', 'Luckenwalde', 'Ludwigsburg', 'Ludwigslust',
'Lörrach', 'Lübben', 'Lübeck', 'Lübz', 'Lüdenscheid', 'Lüdinghausen',
'Lüneburg', 'Magdeburg', 'Main-Höchst', 'Mainburg', 'Malchin',
'Mallersdorf', 'Marienberg', 'Marktheidenfeld', 'Mayen', 'Meiningen',
'Meißen', 'Melle', 'Mellrichstadt', 'Melsungen', 'Meppen', 'Merseburg',
'Mettmann', 'Miesbach', 'Miltenberg', 'Mittweida', 'Moers', 'Monschau',
'Mühldorfm Inn', 'Mühlhausen', 'München', 'Nabburg', 'Naila', 'Nauen',
'Neu-Ulm', 'Neubrandenburg', 'Neunburg vorm Wald', 'Neuruppin',
'Neuss', 'Neustadtm Rübenberge', 'Neustadtner Waldnaab', 'Neustrelitz',
'Niesky', 'Norden', 'Nordhausen', 'Northeim', 'Nördlingen',
'Nürtingen', 'Oberviechtach', 'Ochsenfurt', 'Olpe', 'Oranienburg',
'Oschatz', 'Osterburg', 'Osterodem Harz', 'Paderborn', 'Parchim',
'Parsberg', 'Pasewalk', 'Passau', 'Pegnitz', 'Peine', 'Perleberg',
'Pfaffenhofenner Ilm', 'Pinneberg', 'Pirmasens', 'Plauen', 'Potsdam',
'Prenzlau', 'Pritzwalk', 'Pößneck', 'Quedlinburg', 'Querfurt',
'Rastatt', 'Rathenow', 'Ravensburg', 'Recklinghausen', 'Regen',
'Regensburg', 'Rehau', 'Reutlingen', 'Ribnitz-Damgarten', 'Riesa',
'Rochlitz', 'Rockenhausen', 'Roding', 'Rosenheim', 'Rostock', 'Roth',
'Rothenburg oberauber', 'Rottweil', 'Rudolstadt', 'Saarbrücken',
'Saarlouis', 'Sangerhausen', 'Sankt Goar', 'Sankt Goarshausen',
'Saulgau', 'Scheinfeld', 'Schleiz', 'Schlüchtern', 'Schmölln',
'Schongau', 'Schrobenhausen', 'Schwabmünchen', 'Schwandorf',
'Schwarzenberg', 'Schweinfurt', 'Schwerin', 'Schwäbisch Gmünd',
'Schwäbisch Hall', 'Sebnitz', 'Seelow', 'Senftenberg', 'Siegen',
'Sigmaringen', 'Soest', 'Soltau', 'Soltau', 'Sondershausen',
'Sonneberg', 'Spremberg', 'Stade', 'Stade', 'Stadtroda',
'Stadtsteinach', 'Staffelstein', 'Starnberg', 'Staßfurt', 'Steinfurt',
'Stendal', 'Sternberg', 'Stollberg', 'Strasburg', 'Strausberg',
'Stuttgart', 'Suhl', 'Sulzbach-Rosenberg', 'Säckingen', 'Sömmerda',
'Tecklenburg', 'Teterow', 'Tirschenreuth', 'Torgau', 'Tuttlingen',
'Tübingen', 'Ueckermünde', 'Uelzen', 'Uffenheim', 'Vechta',
'Viechtach', 'Viersen', 'Vilsbiburg', 'Vohenstrauß', 'Waldmünchen',
'Wanzleben', 'Waren', 'Warendorf', 'Weimar', 'Weißenfels',
'Weißwasser', 'Werdau', 'Wernigerode', 'Wertingen', 'Wesel', 'Wetzlar',
'Wiedenbrück', 'Wismar', 'Wittenberg', 'Wittmund', 'Wittstock',
'Witzenhausen', 'Wolfach', 'Wolfenbüttel', 'Wolfratshausen', 'Wolgast',
'Wolmirstedt', 'Worbis', 'Wunsiedel', 'Wurzen', 'Zerbst', 'Zeulenroda',
'Zossen', 'Zschopau',
)
states = (
'Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg', 'Bremen',
'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern', 'Niedersachsen',
'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
'Sachsen-Anhalt', 'Schleswig-Holstein', 'Thüringen',
)
def street_suffix_short(self):
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self):
return self.random_element(self.street_suffixes_long)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
def city_with_postcode(self):
pattern = self.random_element(self.city_with_postcode_formats)
return self.generator.parse(pattern)
|
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions
import datetime
import json
class Kickban(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason = None):
await member.kick(reason = reason)
if reason is None:
reason = '_ _'
embed = discord.Embed(color=0xED4245) #Golden
embed.add_field(name=member.display_name + '#' + member.discriminator + ' has been kicked!', value='Reason: ' + reason, inline=True)
embed.set_footer(text='Requested on ' + str(datetime.datetime.now()))
await ctx.send(embed=embed)
@commands.command()
async def ban(self, ctx, member: discord.Member):
with open('./data/json/elevated.json') as f:
data = json.load(f)
if ctx.author.id in data["elevated-members"]:
role = discord.utils.get(member.guild.roles, name = "Banned")
await member.add_roles(role)
with open('./data/json/bans.json') as f:
data = json.load(f)
if member.id not in data["banned-members"]:
data["banned-members"].append(member.id)
with open('./data/json/bans.json', 'w') as f:
json.dump(data, f)
embed = discord.Embed(color=0xED4245) #Red
embed.add_field(name=member.display_name + '#' + member.discriminator + ' has been banned!', value='_ _', inline=True)
embed.set_footer(text='Requested on ' + str(datetime.datetime.now()))
await ctx.send(embed=embed)
else:
await ctx.send(":x: **You don't have permission to use this command.**")
@commands.command()
async def unban(self, ctx, member: discord.Member):
with open('./data/json/elevated.json') as f:
data = json.load(f)
if ctx.author.id in data["elevated-members"]:
role = discord.utils.get(member.guild.roles, name = "Banned")
await member.remove_roles(role)
with open('./data/json/bans.json') as f:
data = json.load(f)
banned_members = data["banned-members"]
if member.id in data["banned-members"]:
index = banned_members.index(member.id)
del banned_members[index]
data["banned-members"] = banned_members
with open('./data/json/bans.json', 'w') as f:
json.dump(data, f)
embed = discord.Embed(color=0x57F287) #Green
embed.add_field(name=member.display_name + '#' + member.discriminator + ' has been unbanned!', value='_ _', inline=True)
embed.set_footer(text='Requested on ' + str(datetime.datetime.now()))
await ctx.send(embed=embed)
else:
await ctx.send(":x: **You don't have permission to use this command.**")
def setup(bot):
bot.add_cog(Kickban(bot))
|
import os
import requests
import shutil
from download_util import download_file
THIS_FILE_PATH = os.path.abspath(__file__)
BASE_DIR = os.path.dirname(THIS_FILE_PATH)
DOWNLOADS_DIR = os.path.join(BASE_DIR, "downloads")
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
downloaded_img_path = os.path.join(DOWNLOADS_DIR, '1.jpg')
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/db/Classic_view_of_a_cloudfree_Peyto_Lake%2C_Banff_National_Park%2C_Alberta%2C_Canada_%284110933448%29.jpg/330px-Classic_view_of_a_cloudfree_Peyto_Lake%2C_Banff_National_Park%2C_Alberta%2C_Canada_%284110933448%29.jpg"
# a smallish item
r = requests.get(url, stream=True)
r.raise_for_status() # 200
with open(downloaded_img_path, 'wb') as f:
f.write(r.content)
# dl_filename = os.path.basename(url)
# new_dl_path = os.path.join(DOWNLOADS_DIR, dl_filename)
# with requests.get(url, stream=True) as r:
# with open(new_dl_path, 'wb') as file_obj:
# shutil.copyfileobj(r.raw, file_obj)
download_file(url, DOWNLOADS_DIR)
|
#!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>]
manage.py (train) [--tubs=tubs] (--model=<model>) [--type=(linear|inferred|tensorrt_linear|tflite_linear)]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import os
import time
import logging
from docopt import docopt
import donkeycar as dk
from donkeycar.parts.transform import TriggeredCallback, DelayedTrigger
from donkeycar.parts.tub_v2 import TubWriter
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.controller import LocalWebController, WebFpv, JoystickController
from donkeycar.parts.throttle_filter import ThrottleFilter
from donkeycar.parts.behavior import BehaviorPart
from donkeycar.parts.file_watcher import FileWatcher
from donkeycar.parts.launch import AiLaunch
from donkeycar.pipeline.augmentations import ImageAugmentation
from donkeycar.utils import *
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def drive(cfg, model_path=None, use_joystick=False, model_type=None,
camera_type='single', meta=[]):
"""
Construct a working robotic vehicle from many parts. Each part runs as a
job in the Vehicle loop, calling either it's run or run_threaded method
depending on the constructor flag `threaded`. All parts are updated one
after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
part finishes processing in a timely manner. Parts may have named outputs
and inputs. The framework handles passing named outputs to parts
requesting the same named input.
"""
logger.info(f'PID: {os.getpid()}')
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
pass
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
if cfg.HAVE_MQTT_TELEMETRY:
from donkeycar.parts.telemetry import MqttTelemetry
tel = MqttTelemetry(cfg)
if cfg.HAVE_ODOM:
if cfg.ENCODER_TYPE == "GPIO":
from donkeycar.parts.encoder import RotaryEncoder
enc = RotaryEncoder(mm_per_tick=0.306096, pin = cfg.ODOM_PIN, debug = cfg.ODOM_DEBUG)
V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
elif cfg.ENCODER_TYPE == "arduino":
from donkeycar.parts.encoder import ArduinoEncoder
enc = ArduinoEncoder()
V.add(enc, outputs=['enc/speed'], threaded=True)
else:
print("No supported encoder found")
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
outputs = ['cam/image_array']
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
#rbx
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, record_lidar=cfg.SIM_RECORD_LIDAR, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from donkeycar.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
# Donkey gym part will output position information if it is configured
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
if cfg.SIM_RECORD_GYROACCEL:
outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
if cfg.SIM_RECORD_VELOCITY:
outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
if cfg.SIM_RECORD_LIDAR:
outputs += ['lidar/dist_array']
V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)
# add lidar
if cfg.USE_LIDAR:
from donkeycar.parts.lidar import RPLidar
if cfg.LIDAR_TYPE == 'RP':
print("adding RP lidar part")
lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
if cfg.LIDAR_TYPE == 'YD':
print("YD Lidar not yet supported")
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "pigpio_rc": # an RC controllers read by GPIO pins. They typically don't have buttons
from donkeycar.parts.controller import RCReceiver
ctr = RCReceiver(cfg)
V.add(ctr, outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=False)
else:
if cfg.CONTROLLER_TYPE == "custom": #custom controller created with `donkey createjs` command
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
elif cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE:
def show_record_count_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"): # these controllers don't use the joystick class
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger('circle', show_record_count_status) #then we are not using the circle button. hijack that to force a record count indication
else:
show_record_count_status()
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
elif cfg.HAVE_ODOM:
inputs = ['cam/image_array', 'enc/speed']
elif model_type == "imu":
assert cfg.HAVE_IMU, 'Missing imu parameter in config'
# Run the pilot if the mode is not user.
inputs = ['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs = ['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
# When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.trt' in model_path or '.tflite' in \
model_path or '.savedmodel' in model_path:
# load the whole model with weigths, etc
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
# when we have a .json extension
# load the model from there and look for a matching
# .wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
# this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True),
outputs=['modelfile/modified'])
# these parts will reload the model file, but only when ai is running
# so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'],
run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'],
outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb),
inputs=["modelfile/reload"], run_condition="ai_running")
outputs = ['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
# Add image transformations like crop or trapezoidal mask
if hasattr(cfg, 'TRANSFORMATIONS') and cfg.TRANSFORMATIONS:
V.add(ImageAugmentation(cfg, 'TRANSFORMATIONS'),
inputs=['cam/image_array'], outputs=['cam/image_array_trans'])
inputs = ['cam/image_array_trans'] + inputs[1:]
V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector \
import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE,
cfg.STOP_SIGN_SHOW_BOUNDING_BOX),
inputs=['cam/image_array', 'pilot/throttle'],
outputs=['pilot/throttle', 'cam/image_array'])
# Choose what inputs should change the car.
class DriveMode:
drive_start = time.time()
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
current_time = time.time()
if current_time - self.drive_start >= 1.0:
print(f"user_angle: {user_angle}, user_throttle: {user_throttle}")
self.drive_start = current_time
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, \
pilot_throttle * cfg.AI_THROTTLE_MULT \
if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
# Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
# Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
steering_zero_pulse=cfg.STEERING_STOPPED_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
throttle_zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=False)
V.add(throttle, inputs=['throttle'], threaded=False)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor
left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
# PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
steering_zero_pulse=cfg.STEERING_STOPPED_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
throttle_zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_ROTATION, cfg.SSD1306_RESOLUTION, auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
# add tub to save data
inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode',]
types = ['image_array', 'float', 'float', 'str']
if cfg.USE_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.HAVE_ODOM:
inputs += ['enc/speed']
types += ['float']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
# rbx
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
types += ['float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_GYROACCEL:
inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
types += ['float', 'float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_VELOCITY:
inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
types += ['float', 'float', 'float']
if cfg.SIM_RECORD_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from donkeycar.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
telem_inputs, _ = tel.add_step_inputs(inputs, types)
V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
# run the vehicle
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
elif args['train']:
print('Use python train.py instead.\n')
|
N, Q = map(int, input().split())
S = input()
items = []
for i in range(Q):
items.append(tuple(map(int, input().split())))
from itertools import accumulate
prev = ""
acc = [0] * N
for i, s in enumerate(S):
if s == "C" and prev == "A":
acc[i] = 1
prev = s
acc = list(accumulate(acc))
ans = []
for i in range(Q):
l, r = items[i]
ans.append(acc[r - 1] - acc[l - 1])
for a in ans:
print(a)
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="cone.colorbar.title.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
from __future__ import unicode_literals
from sqlalchemy import Column
from sqlalchemy import Unicode
from sqlalchemy import Boolean
from sqlalchemy.orm import relationship
from .base import DeclarativeBase
from .base import UTCDateTime
from .base import now_func
class Company(DeclarativeBase):
"""A Company is basically a user to billy system
"""
__tablename__ = 'company'
guid = Column(Unicode(64), primary_key=True)
#: the API key for accessing billy system
api_key = Column(Unicode(64), unique=True, index=True, nullable=False)
#: the processor key (it would be balanced API key if we are using balanced)
processor_key = Column(Unicode(64), index=True, nullable=False)
#: the name of callback in URI like /v1/callback/<KEY GOES HERE>
callback_key = Column(Unicode(64), index=True, unique=True, nullable=False)
#: a short optional name of this company
name = Column(Unicode(128))
#: is this company deleted?
deleted = Column(Boolean, default=False, nullable=False)
#: the created datetime of this company
created_at = Column(UTCDateTime, default=now_func)
#: the updated datetime of this company
updated_at = Column(UTCDateTime, default=now_func)
#: plans of this company
plans = relationship('Plan', cascade='all, delete-orphan',
backref='company')
#: customers of this company
customers = relationship('Customer', cascade='all, delete-orphan',
backref='company')
__all__ = [
Company.__name__,
]
|
import subprocess
import sys
import os
DEFAULT_ARGS=[]
if (os.path.exists("build")):
dl=[]
for r,ndl,fl in os.walk("build"):
r=r.replace("\\","/").strip("/")+"/"
for d in ndl:
dl.insert(0,r+d)
for f in fl:
os.remove(r+f)
for k in dl:
os.rmdir(k)
else:
os.mkdir("build")
if (os.name=="nt"):
cd=os.getcwd()
os.chdir("build")
if ("--release" in sys.argv):
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","NDEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/GL","/Gy","/Zi","/O2","/Oi","/MD","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/LTCG","/OPT:REF","/INCREMENTAL:NO","/OPT:ICF"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
else:
if (subprocess.run(["cl","/Wv:18","/c","/permissive-","/Zc:preprocessor","/GS","/utf-8","/W3","/Zc:wchar_t","/Gm-","/sdl","/Zc:inline","/fp:precise","/D","_DEBUG","/D","_WINDOWS","/D","_UNICODE","/D","UNICODE","/errorReport:none","/WX","/Zc:forScope","/Gd","/Oi","/FC","/EHsc","/nologo","/diagnostics:column","/ZI","/Od","/RTC1","/MDd","/I","../src/include","../src/main.c","../src/hilbert_curve_fft_compression/*.c"]).returncode!=0 or subprocess.run(["link","*.obj","/OUT:hilbert_curve_fft_compression.exe","/DYNAMICBASE","kernel32.lib","user32.lib","gdi32.lib","winspool.lib","comdlg32.lib","advapi32.lib","shell32.lib","ole32.lib","oleaut32.lib","uuid.lib","odbc32.lib","odbccp32.lib","/MACHINE:X64","/SUBSYSTEM:CONSOLE","/ERRORREPORT:none","/NOLOGO","/TLBID:1","/WX","/DEBUG","/INCREMENTAL"]).returncode!=0):
os.chdir(cd)
sys.exit(1)
os.chdir(cd)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression.exe"]+DEFAULT_ARGS)
else:
if ("--release" in sys.argv):
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace('/','$')}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O3","-c",r+f,"-o",f"build/{(r+f).replace('/','$')}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
else:
fl=[]
for r,_,cfl in os.walk("src"):
r=r.replace("\\","/").strip("/")+"/"
for f in cfl:
if (f[-2:]==".c"):
fl.append(f"build/{(r+f).replace('/','$')}.o")
if (subprocess.run(["gcc","-Wall","-lm","-Werror","-O0","-c",r+f,"-o",f"build/{(r+f).replace('/','$')}.o","-Isrc/include"]).returncode!=0):
sys.exit(1)
if (subprocess.run(["gcc","-o","build/hilbert_curve_fft_compression"]+fl+["-lm"]).returncode!=0):
sys.exit(1)
if ("--run" in sys.argv):
subprocess.run(["build/hilbert_curve_fft_compression"]+DEFAULT_ARGS)
|
from django.apps import AppConfig
class LostfoundConfig(AppConfig):
name = 'lostfound'
|
"""rnn.py
~~~~~~~~~~~~~~
Written by Yong Yu Wen, 2018
(Built using tensorflow-gpu 1.6.0)
A TensorFlow-based many-to-one recurrent neural network specifically
for the classification of MBTI types based on social media posts.
Raw un-processed dataset used for this task can be found at
https://www.kaggle.com/datasnaek/mbti-type
Supports several cell types (Basic RNN, GRUs, LSTMs), multiple layer,
training with word embeddings, as well as dropout regularization.
This program incorporates ideas from Denny Britz and Spitis (Github display name)
and their websites http://www.wildml.com and https://r2rt.com
"""
import tensorflow as tf
import time
import pickle
class RNN(object):
def __init__(self, cell_type, state_size, num_steps, num_layers,
num_classes, embedding=None, build_with_dropout=False):
"""
Creates the RNN object
:param cell_type: Type of RNN cell. Supports Basic RNN, GRUs and LSTMs
:param state_size: Number of hidden states
:param num_steps: Number of time steps
:param num_layers: Number of layers
:param num_classes: Number of classes in the output
:param embedding: Word embedding
:param build_with_dropout: Whether to use dropout in the RNN
"""
self.x = tf.placeholder(tf.int32, [None, num_steps], name='input_placeholder')
self.y = tf.placeholder(tf.int32, [None, num_classes], name='labels_placeholder')
with tf.name_scope("embedding"):
self.embeddings = tf.get_variable(name="embeddings", shape=embedding.shape,
initializer=tf.constant_initializer(embedding), trainable=True)
self.state_size = state_size
self.num_steps = num_steps
self.num_layers = num_layers
self.num_classes = num_classes
self.build_with_dropout = build_with_dropout
self.dropout = tf.placeholder_with_default(tf.constant(1.0, dtype=tf.float32), ())
self.cell_type = cell_type
self.cell = self._make_MultiRNNCell()
self.saver = tf.train.Saver()
def _make_cell(self):
"""
Private function to create RNN cell. Required for TensorFlow's MultiRNNCell function
"""
if self.cell_type == 'GRU':
cell = tf.nn.rnn_cell.GRUCell(self.state_size)
elif self.cell_type == 'LSTM':
cell = tf.nn.rnn_cell.LSTMCell(self.state_size, state_is_tuple=True)
else:
cell = tf.nn.rnn_cell.BasicRNNCell(self.state_size)
if self.build_with_dropout:
return tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=self.dropout)
else:
return cell
def _make_MultiRNNCell(self):
"""
Private function to create multi-layer RNNs
"""
cell = tf.nn.rnn_cell.MultiRNNCell([self._make_cell() for _ in range(self.num_layers)])
if self.build_with_dropout:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout)
return cell
def train(self, sess, epochs, learning_rate, pipeline, training_data, validation_data,
SGDR=False, store_accuracies = False, dropout=1.0, checkpoint=None, save=None):
"""
Trains the neural network using the Adam Optimizer (by default)
:param sess: TensorFlow Session
:param epochs: Number of epochs
:param learning_rate: Learning rate for the optimizer
:param pipeline: Pipeline object to feed data into the network for training
:param training_data: Training dataset (in Numpy array format, labels one-hot encoded)
:param validation_data: Validation dataset (in Numpy array format, labels one-hot encoded)
:param SGDR: Stochastic Gradient Descent with Restarts. See https://arxiv.org/abs/1608.03983
:param store_accuracies: Save & store train and validation accuracies to be exported
:param dropout: Dropout keep probability (1.0 for no dropout)
:param checkpoint: Location to save model checkpoint
:param save: Location to save trained model
"""
#~~Read data
training_x, training_y = training_data
validation_x, validation_y = validation_data
rnn_inputs = tf.nn.embedding_lookup(self.embeddings, self.x)
rnn_outputs, final_state = tf.nn.dynamic_rnn(self.cell, rnn_inputs, dtype=tf.float32) #initial_state=init_state
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [self.state_size, self.num_classes])
b = tf.get_variable('b', [self.num_classes], initializer=tf.constant_initializer(0.0))
rnn_outputs = tf.transpose(rnn_outputs, [1, 0, 2])
last = tf.reshape(rnn_outputs[-1], [-1, self.state_size])
predictions = (tf.matmul(last, W) + b)
y_reshaped = tf.reshape(self.y, [-1, self.num_classes])
total_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=predictions, labels=y_reshaped))
#Create Global step
global_step = tf.Variable(0, trainable=False, name='global_step')
#SGDR
if SGDR:
first_decay_steps = int(training_x.shape[0]/pipeline.batch_size)
learning_rate = tf.train.cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
#model evaluation
correct_prediction=tf.equal(tf.argmax(predictions,1),tf.argmax(y_reshaped,1))
model_accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#~~~~~~~~~~~~~~Training of the actual dataset~~~~~~~~~~~~~~~~~~
sess.run(tf.global_variables_initializer())
if save:
try:
self.saver.restore(sess, save)
print("Save restored \n")
except:
print("No Save found. Running new training cycle")
start_time = time.time() #Track time taken for model
training_accuracies = []
validation_accuracies = []
for epoch in range(epochs):
sess.run(pipeline.iterator_train.initializer, feed_dict={pipeline.features_placeholder: training_x,
pipeline.labels_placeholder: training_y})
training_loss = 0
steps = 0
training_state = None
avg_loss = []
accuracies = []
if epoch >0 and checkpoint:
self.saver.save(sess, checkpoint)
print("Saving checkpoint for epoch", epoch)
while True:
try:
steps += 1
batch_x, batch_y = sess.run(pipeline.next_element_train)
feed_dict={self.x: batch_x, self.y: batch_y,
self.dropout: dropout}
training_loss_, _, accuracy = sess.run([total_loss,
train_step,
model_accuracy],
feed_dict)
avg_loss.append(training_loss_)
accuracies.append(accuracy)
if steps%100 == 0:
print("Avg training_loss_ for Epoh {} step {} =".format(epoch, steps), tf.reduce_mean(avg_loss).eval())
avg_loss = []
accuracies = []
except tf.errors.OutOfRangeError:
print("End of training dataset.")
print("Avg accuracy for Epoch {} step {} =".format(epoch, steps), tf.reduce_mean(accuracies).eval())
if store_accuracies:
training_accuracies.append(tf.reduce_mean(accuracies).eval())
accuracies = []
break
#Print Validation Accuracy per Epoch
sess.run(pipeline.iterator_val.initializer, feed_dict={pipeline.features_placeholder: validation_x,
pipeline.labels_placeholder: validation_y})
val_accuracies = []
while True:
try:
val_x, val_y = sess.run(pipeline.next_element_val)
feed_dict={self.x: val_x, self.y: val_y}
accuracy = sess.run(model_accuracy, feed_dict)
val_accuracies.append(accuracy)
except tf.errors.OutOfRangeError:
print("Validation Accuracy for epoch {} is ".format(epoch), tf.reduce_mean(val_accuracies).eval())
if store_accuracies:
validation_accuracies.append(tf.reduce_mean(val_accuracies).eval())
break
end_time = time.time()
total_time = end_time - start_time
print("Finished training network.")
print("Time to train network: {}s".format(total_time))
if store_accuracies:
pickle.dump((training_accuracies, validation_accuracies), open( "accuracies.p", "wb" ) )
print("Pickled Accuracies")
if save:
self.saver.save(sess, save)
print("Model is saved in", save)
class data_pipeline(object):
def __init__(self, batch_size, shuffle_buffer_size):
"""
Pipeline Object to shuffle and split data into batches before feeding into neural network
:param batch_size: Integer Value of the desired batch size
:param shuffle_buffer_size: Buffer Size for shuffling dataset. See TensorFlow docs for mroe information
"""
self.features_placeholder = tf.placeholder(tf.int32)
self.labels_placeholder = tf.placeholder(tf.int32)
self.batch_size = batch_size
self.dataset = tf.data.Dataset.from_tensor_slices((self.features_placeholder, self.labels_placeholder))
#Train input pipeline
self.dataset_train = self.dataset.shuffle(buffer_size=shuffle_buffer_size).batch(batch_size)
self.iterator_train = self.dataset_train.make_initializable_iterator()
self.next_element_train = self.iterator_train.get_next()
#Val input pipeline
self.dataset_val = self.dataset.batch(batch_size)
self.iterator_val = self.dataset_val.make_initializable_iterator()
self.next_element_val = self.iterator_val.get_next()
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinSNTestFramework
from test_framework.util import sync_blocks, sync_mempools
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinSNTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
|
import logging
from django.apps import apps
from django.conf import settings
from zconnect import zsettings
from zconnect.util.general import load_from_module
logger = logging.getLogger(__name__)
class Sender:
"""Abstract interface for sending messages to devices
This will pass a generic Message to the sender implementation to send to the
specified device
"""
def __init__(self):
sender_settings = dict(zsettings.SENDER_SETTINGS)
cls_name = sender_settings.get("cls", "zconnect.messages.IBMInterface")
interface_class = load_from_module(cls_name)
self.interface = interface_class(sender_settings)
def to_device(self, category, body, device=None, device_id=None,
incoming_message=None, **kwargs):
"""Send a message to a specific device
Any extra keyword args will be passed through to the underlying sender
implementation.
Note:
if neither device or device_id is specified, this will not raise an
error!
Args:
category (str): Message category. This is implementation specific,
but will be something like 'event', 'state_update', etc.
body (dict): Body of message to send
device (Device, optional): Device to send for
device_id (str, optional): Device id to load and send for
incoming_message (Message, optional): If given, the Device
associated with that Message will be used to send to.
"""
device = resolve_device(device, device_id, incoming_message)
if not device:
return # Warning message sent in _resolve_device_args
device_type = device.product.iot_name
self.interface.send_message(category, body, device_id=device.get_iot_id(),
device_type=device_type)
def as_device(self, category, body, device=None, device_id=None,
incoming_message=None, **kwargs):
"""Send a message imitating a specific device.
See to_device documentation for meanings of arguments.
"""
device = resolve_device(device, device_id, incoming_message)
if not device:
return # Warning message sent in _resolve_device_args
device_type = device.product.iot_name
self.interface.send_as_device(category, body, device_id=device.get_iot_id(),
device_type=device_type)
def resolve_device(device=None, device_id=None, incoming_message=None):
"""Given a variety of possible things to get the device_id from, return the
'most specific' one. The order of 'specificity' is defined as:
1. incoming_message.device
2. device
3. device_id
Args:
device (Device, optional): Device object
device_id (str, optional): Device id
incoming_message (Message, optional): zconnect Message
Returns:
Device: device object
"""
if incoming_message:
incoming_message_device = incoming_message.device
else:
incoming_message_device = None
if device:
given_device = device
else:
given_device = None
if incoming_message_device:
if device_id:
logger.warning("device_id was given as well as incoming_message - device_id will be ignored")
if given_device:
logger.warning("device was given as well as incoming_message - device will be ignored")
return incoming_message_device
elif given_device:
if device_id:
logger.warning("device_id was given as well as device - device_id will be ignored")
return given_device
elif device_id:
Device = apps.get_model(settings.ZCONNECT_DEVICE_MODEL)
device = Device.objects.filter(pk=device_id).get()
return device
else:
logger.warning("Unable to resolve device with given arguments")
class SenderSingleton:
""" Singleton for message sender object
"""
instance = None
def __new__(cls):
if not cls.instance:
cls.instance = Sender()
return cls.instance
def get_sender():
""" Get singleton for watson sender
Returns:
SenderSingleton: global sender object
"""
sender_settings = dict(zsettings.SENDER_SETTINGS)
# only connect if there are settings
if not sender_settings:
logger.warning("Skipping watson IoT connection because there's no " \
"connection details")
client = None
else:
client = SenderSingleton()
return client
|
# -*- coding: utf-8 -*-
import pytest
from h.models import Organization
from h.services.list_organizations import (
ListOrganizationsService,
list_organizations_factory,
)
from h.services.organization import organization_factory
class TestListOrganizations:
def test_returns_organizations_from_all_authorities_if_no_authority_specified(
self, svc, organizations, default_orgs, alternate_organizations
):
expected_orgs = default_orgs + organizations + alternate_organizations
results = svc.organizations()
assert results == expected_orgs
def test_returns_organizations_for_the_authority_specified(
self,
svc,
authority,
organizations,
alternate_organizations,
alternate_authority,
):
results = svc.organizations(authority=alternate_authority)
assert results == alternate_organizations
class TestListOrganizationsFactory:
def test_list_organizations_factory(self, pyramid_request):
svc = list_organizations_factory(None, pyramid_request)
assert isinstance(svc, ListOrganizationsService)
def test_provides_request_db_as_session(self, pyramid_request):
svc = list_organizations_factory(None, pyramid_request)
assert svc._session == pyramid_request.db
@pytest.fixture
def authority(pyramid_request):
return pyramid_request.default_authority
@pytest.fixture
def alternate_authority():
return "bar.com"
@pytest.fixture
def org_svc(pyramid_request):
return organization_factory(None, pyramid_request)
@pytest.fixture
def organizations(factories, authority, org_svc):
# Add these out of order so it will come back out of order if unsorted..
org2 = org_svc.create(name="Org2", authority=authority)
org1 = org_svc.create(name="Org1", authority=authority)
return [org1, org2]
@pytest.fixture
def alternate_organizations(factories, alternate_authority, org_svc):
# Add these out of order so it will come back out of order if unsorted..
org4 = org_svc.create(name="Org4", authority=alternate_authority)
org3 = org_svc.create(name="Org3", authority=alternate_authority)
return [org3, org4]
@pytest.fixture
def default_orgs(db_session):
return [Organization.default(db_session)]
@pytest.fixture
def svc(db_session):
return ListOrganizationsService(session=db_session)
|
import h5py
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import keras
import h5py
import numpy as np
from keras.layers import Input, Dense, Conv1D, MaxPooling2D, MaxPooling1D, BatchNormalization
from keras.layers.core import Dropout, Activation, Flatten
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras.utils import multi_gpu_model
from keras.regularizers import l1,l2, l1_l2
from keras.constraints import MaxNorm
from keras.optimizers import SGD
from keras.activations import relu
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
input_bp = 82
batch_size=128
seqInput = Input(shape=(input_bp, 4), name='seqInput')
seq = Conv1D(5, 7)(seqInput)
seq = BatchNormalization()(seq)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(5, 3)(seq)
seq = BatchNormalization()(seq)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(6, 3)(seq)
seq = BatchNormalization()(seq)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(6, 3)(seq)
seq = BatchNormalization()(seq)
seq = Activation('relu')(seq)
seq = MaxPooling1D(2)(seq)
seq = Conv1D(1, 3)(seq)
seq = BatchNormalization()(seq)
seq = Activation('sigmoid')(seq)
seq = Flatten()(seq)
model = Model(inputs = [seqInput], outputs = [seq])
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#from keras.optimizers import RMSprop
model.compile('adam', loss='binary_crossentropy', metrics=['accuracy'])
PWM0 = np.loadtxt('PWM')
PWM = np.ones((4,input_bp))*0.25
PWM1 = np.zeros((4,5))*0.25
PWM1[1:2,:] = 0.5
print(PWM0.shape)
print(PWM.shape)
def pwm_to_sample(PWM, n = 1000):
PWM /= PWM.sum(axis=0)
PWM = PWM.T
PWM = PWM[::-1,:]
PWM = PWM[:,::-1]
sample = np.zeros((n,PWM.shape[0],PWM.shape[1]))
for i in range(n):
for j in range(sample.shape[1]):
sample[i,j,np.random.choice(4,1,p=PWM[j,:])] = 1
return sample
size = 10000
sp0 = pwm_to_sample(PWM0,n=size)
sp1 = pwm_to_sample(PWM0,n=size)
sp2 = pwm_to_sample(PWM0,n=size)
sp3 = pwm_to_sample(PWM1,n=size)
sp4 = pwm_to_sample(PWM0,n=size)
spp = pwm_to_sample(PWM,n=size)
spn = pwm_to_sample(PWM,n=size)
pos0 = np.random.randint(0,16,size)
pos1 = np.random.randint(44,60,size)
pos2 = np.r_[np.random.randint(0,16,int(size/2)),np.random.randint(46,62,int(size/2))]
pos4 = np.random.randint(17,45,size)
pos3 = np.random.randint(0,76,size)
print(sp0.shape)
print(sp1.shape)
print(spp.shape)
for i in range(size):
spp[i,pos0[i]:(pos0[i]+PWM0.shape[1]),:] = sp0[i,:,:]
spp[i,pos1[i]:(pos1[i]+PWM0.shape[1]),:] = sp1[i,:,:]
for i in range(size):
spn[i,pos2[i]:(pos2[i]+PWM0.shape[1]),:] = sp2[i,:,:]
spn[i,pos4[i]:(pos4[i]+PWM0.shape[1]),:] = sp4[i,:,:]
# spn[i,pos3[i]:(pos3[i]+PWM1.shape[1]),:] = sp3[i,:,:]
sp = np.concatenate([spp,spn],axis=0)
label = np.r_[np.ones(size),np.zeros(size)]
callbacks=[]
callbacks.append(ModelCheckpoint(filepath='weight.hdf5',save_best_only=True))
callbacks.append(EarlyStopping(patience=15))
history = model.fit(x= sp, y=label, epochs=100,validation_split=0.1,callbacks=callbacks)
history_dict=history.history
loss_values = history_dict['loss']
val_loss_values=history_dict['val_loss']
plt.figure()
plt.plot(loss_values,'bo',label='training loss')
plt.plot(val_loss_values,'r',label='val training loss')
plt.savefig('history.pdf')
#rs = model.predict(oh)[0,:]
with h5py.File('history.h5','w') as f:
f['loss_values'] =loss_values
f['val_loss'] = val_loss_values
f['sample'] = sp
f['label'] = label
|
'''
Created on Nov 29, 2020
@author: manik
'''
'''
File with classes and code which control how a particular person
will move and to where
'''
from src.population import Population
import numpy as np
import src.person_properties_util as idx
class Movement():
"""
Class providing abstraction into each movement of the population
"""
def update_persons(self, persons: np.ndarray, size: int, speed: float = 0.1, heading_update_chance: float = 0.02) -> np.ndarray:
"""
Randomly updates/initializes the destination each person is headed to and corresponding speed randomly
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
size : int
The size of the array of the persons to be updated to
speed : float, optional
Mean of the speed to be generated randomly, by default 0.1
heading_update_chance : float, optional
The odds of updating the destination of each person, by default 0.02
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
#For updating the x position
#Generate a random array with update chance for each person in the population
update = np.random.random(size=(size,))
#Get the persons in the population who have a lower or equal to chance of getting updated in this epoch
shp = update[update <= heading_update_chance].shape
#Update the position for the direction in which they are heading
persons[:,idx.x_dir][update <= heading_update_chance] = np.random.normal(loc = 0, scale = 1/3, size = shp)
#For updating the y position, do the same
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:,idx.y_dir][update <= heading_update_chance] = np.random.normal(loc = 0, scale = 1/3, size = shp)
#Update the speed by generating a random normal distribution using the argument speed as the parameter
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:,idx.speed][update <= heading_update_chance] = np.random.normal(loc = speed, scale = speed / 3, size = shp)
persons[:,idx.speed] = np.clip(persons[:,idx.speed], a_min=0.0005, a_max=0.01)
#Return the updated array
return persons
def out_of_bounds(self, persons: np.ndarray, xbounds, ybounds):
"""
Check if the individual is heading out of bounds of the specified bounds.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the individuals
xbounds : list
List containing bounds for X axis.
ybounds : list
List containing bounds for Y axis.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# Store shape of list of people who are heading out of bounds based on X bound [0]
shp = persons[:,4][(persons[:,2] <= xbounds[:,0]) &
(persons[:,4] < 0)].shape
# Update them randomly using a normal distribution
persons[:,4][(persons[:,2] <= xbounds[:,0]) &
(persons[:,4] < 0)] = np.clip(np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = 0.05, a_max = 1)
# Store shape of list of people who are heading out of bounds based on X bound [1]
shp = persons[:,4][(persons[:,2] >= xbounds[:,1]) &
(persons[:,4] > 0)].shape
# Update them randomly using a normal distribution
persons[:,4][(persons[:,2] >= xbounds[:,1]) &
(persons[:,4] > 0)] = np.clip(-np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = -1, a_max = -0.05)
# Store shape of list of people who are heading out of bounds based on Y bound [0]
shp = persons[:,5][(persons[:,3] <= ybounds[:,0]) &
(persons[:,5] < 0)].shape
# Update them randomly using a normal distribution
persons[:,5][(persons[:,3] <= ybounds[:,0]) &
(persons[:,5] < 0)] = np.clip(np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = 0.05, a_max = 1)
# Store shape of list of people who are heading out of bounds based on Y bound [1]
shp = persons[:,5][(persons[:,3] >= ybounds[:,1]) &
(persons[:,5] > 0)].shape
# Update them randomly using a normal distribution
persons[:,5][(persons[:,3] >= ybounds[:,1]) &
(persons[:,5] > 0)] = np.clip(-np.random.normal(loc = 0.5,
scale = 0.5/3,
size = shp),
a_min = -1, a_max = -0.05)
return persons
def update_pop(self, persons):
"""
Update function to move people physically in the graph.
This function adds the X and Y direction value to the current postion of
the individual to move them.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
filter = (persons[:, idx.current_state] != 3) & (persons[:, idx.social_distance] == 0)
#x
persons[:,2][filter] = persons[:,2][filter] + (persons[:,4][filter] * persons[:,6][filter])
#y
persons[:,3][filter] = persons[:,3][filter] + (persons [:,5][filter] * persons[:,6][filter])
return persons
|
import os
import urllib.request
from osgeo import ogr
from mapswipe_workers.definitions import DATA_PATH, CustomError, logger
from mapswipe_workers.project_types.arbitrary_geometry import grouping_functions as g
from mapswipe_workers.project_types.arbitrary_geometry.group import Group
from mapswipe_workers.project_types.base.project import BaseProject
from mapswipe_workers.project_types.base.tile_server import BaseTileServer
class Project(BaseProject):
def __init__(self, project_draft: dict) -> None:
super().__init__(project_draft)
# set group size
self.groupSize = project_draft["groupSize"]
self.inputGeometries = project_draft["inputGeometries"]
self.tileServer = vars(BaseTileServer(project_draft["tileServer"]))
def validate_geometries(self):
raw_input_file = (
f"{DATA_PATH}/" f"input_geometries/raw_input_{self.projectId}.geojson"
)
valid_input_file = (
f"{DATA_PATH}/" f"input_geometries/valid_input_{self.projectId}.geojson"
)
if not os.path.isdir("{}/input_geometries".format(DATA_PATH)):
os.mkdir("{}/input_geometries".format(DATA_PATH))
# download file from given url
url = self.inputGeometries
urllib.request.urlretrieve(url, raw_input_file)
logger.info(
f"{self.projectId}"
f" - __init__ - "
f"downloaded input geometries from url and saved as file: "
f"{raw_input_file}"
)
self.inputGeometries = raw_input_file
# open the raw input file and get layer
driver = ogr.GetDriverByName("GeoJSON")
datasource = driver.Open(raw_input_file, 0)
try:
layer = datasource.GetLayer()
LayerDefn = layer.GetLayerDefn()
except AttributeError:
raise CustomError("Value error in input geometries file")
# create layer for valid_input_file to store all valid geometries
outDriver = ogr.GetDriverByName("GeoJSON")
# Remove output geojson if it already exists
if os.path.exists(valid_input_file):
outDriver.DeleteDataSource(valid_input_file)
outDataSource = outDriver.CreateDataSource(valid_input_file)
outLayer = outDataSource.CreateLayer(
"geometries", geom_type=ogr.wkbMultiPolygon
)
for i in range(0, LayerDefn.GetFieldCount()):
fieldDefn = LayerDefn.GetFieldDefn(i)
outLayer.CreateField(fieldDefn)
outLayerDefn = outLayer.GetLayerDefn()
# check if raw_input_file layer is empty
if layer.GetFeatureCount() < 1:
err = "empty file. No geometries provided"
# TODO: How to user logger and exceptions?
logger.warning(f"{self.projectId} - check_input_geometry - {err}")
raise Exception(err)
# get geometry as wkt
# get the bounding box/ extent of the layer
extent = layer.GetExtent()
# Create a Polygon from the extent tuple
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(extent[0], extent[2])
ring.AddPoint(extent[1], extent[2])
ring.AddPoint(extent[1], extent[3])
ring.AddPoint(extent[0], extent[3])
ring.AddPoint(extent[0], extent[2])
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
wkt_geometry = poly.ExportToWkt()
# check if the input geometry is a valid polygon
for feature in layer:
feat_geom = feature.GetGeometryRef()
geom_name = feat_geom.GetGeometryName()
fid = feature.GetFID
if not feat_geom.IsValid():
layer.DeleteFeature(fid)
logger.warning(
f"{self.projectId}"
f" - check_input_geometries - "
f"deleted invalid feature {fid}"
)
# we accept only POLYGON or MULTIPOLYGON geometries
elif geom_name != "POLYGON" and geom_name != "MULTIPOLYGON":
layer.DeleteFeature(fid)
logger.warning(
f"{self.projectId}"
f" - check_input_geometries - "
f"deleted non polygon feature {fid}"
)
else:
# Create output Feature
outFeature = ogr.Feature(outLayerDefn)
# Add field values from input Layer
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(
outLayerDefn.GetFieldDefn(i).GetNameRef(), feature.GetField(i)
)
outFeature.SetGeometry(feat_geom)
outLayer.CreateFeature(outFeature)
outFeature = None
# check if layer is empty
if layer.GetFeatureCount() < 1:
err = "no geometries left after checking validity and geometry type."
logger.warning(f"{self.projectId} - check_input_geometry - {err}")
raise Exception(err)
del datasource
del outDataSource
del layer
self.validInputGeometries = valid_input_file
logger.info(
f"{self.projectId}"
f" - check_input_geometry - "
f"filtered correct input geometries and created file: "
f"{valid_input_file}"
)
return wkt_geometry
def create_groups(self):
raw_groups = g.group_input_geometries(self.validInputGeometries, self.groupSize)
for group_id, item in raw_groups.items():
group = Group(self, group_id)
group.create_tasks(
item["feature_ids"],
item["feature_geometries"],
item["center_points"],
item["reference"],
item["screen"],
)
# only append valid groups
if group.is_valid():
self.groups.append(group)
logger.info(
f"{self.projectId} " f"- create_groups - " f"created groups dictionary"
)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListStorageAccountSasTokensResult',
'AwaitableListStorageAccountSasTokensResult',
'list_storage_account_sas_tokens',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:datalakeanalytics:listStorageAccountSasTokens'.""", DeprecationWarning)
@pulumi.output_type
class ListStorageAccountSasTokensResult:
"""
The SAS response that contains the storage account, container and associated SAS token for connection use.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The link (url) to the next page of results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.SasTokenInformationResponseResult']:
"""
The results of the list operation.
"""
return pulumi.get(self, "value")
class AwaitableListStorageAccountSasTokensResult(ListStorageAccountSasTokensResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStorageAccountSasTokensResult(
next_link=self.next_link,
value=self.value)
def list_storage_account_sas_tokens(account_name: Optional[str] = None,
container_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_account_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStorageAccountSasTokensResult:
"""
The SAS response that contains the storage account, container and associated SAS token for connection use.
Latest API Version: 2016-11-01.
:param str account_name: The name of the Data Lake Analytics account.
:param str container_name: The name of the Azure storage container for which the SAS token is being requested.
:param str resource_group_name: The name of the Azure resource group.
:param str storage_account_name: The name of the Azure storage account for which the SAS token is being requested.
"""
pulumi.log.warn("list_storage_account_sas_tokens is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:datalakeanalytics:listStorageAccountSasTokens'.")
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['resourceGroupName'] = resource_group_name
__args__['storageAccountName'] = storage_account_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datalakeanalytics/latest:listStorageAccountSasTokens', __args__, opts=opts, typ=ListStorageAccountSasTokensResult).value
return AwaitableListStorageAccountSasTokensResult(
next_link=__ret__.next_link,
value=__ret__.value)
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class RoomUserInfosObj(object):
def __init__(self, pageNumber=None, pageSize=None, totalElements=None, totalPages=None, content=None):
"""
:param pageNumber: (Optional) 当前页码
:param pageSize: (Optional) 每页数量
:param totalElements: (Optional) 查询总数
:param totalPages: (Optional) 总页数
:param content: (Optional) 分页内容
"""
self.pageNumber = pageNumber
self.pageSize = pageSize
self.totalElements = totalElements
self.totalPages = totalPages
self.content = content
|
# Copyright 2020 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter module."""
from . import views
__all__ = ['views']
|
from appdirs import user_log_dir
import os
import logging.handlers
# Normal base logging directory name
log_directory_name = "irida-uploader"
# When running tests, the Makefile creates an environment variable IRIDA_UPLOADER_TEST to 'True'
# If it exists then we are running a test and should be logging to the test logs directory
if os.environ.get('IRIDA_UPLOADER_TEST'):
log_directory_name = "irida_uploader_test"
# Use systems default logging path, and append our named directory
log_file_path = os.path.join(user_log_dir(log_directory_name), 'irida-uploader.log')
if not os.path.exists(user_log_dir(log_directory_name)):
os.makedirs(user_log_dir(log_directory_name))
# Looks something like this:
# 2019-02-07 14:50:02 INFO Log message goes here...
log_format = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# setup root logger
root_logger = logging.getLogger()
root_logger.handlers = []
logging.basicConfig(
level=logging.NOTSET, # Default to highest (NOTSET) level, so everything is possible to be logged by handlers
handlers=[logging.NullHandler()] # Default log to Null, so that we can handle it manually
)
# Log to file
rotating_file_handler = logging.handlers.RotatingFileHandler(
filename=log_file_path,
maxBytes=(1024 * 1024 * 1024 * 10), # 10GB max file size
backupCount=100,
)
rotating_file_handler.setLevel(logging.DEBUG)
rotating_file_handler.setFormatter(log_format)
root_logger.addHandler(rotating_file_handler)
# Log to the user
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(log_format)
root_logger.addHandler(console)
# manages the logging directory
# only one directory can have a logger at a time
directory_logger = None
def add_log_to_directory(directory):
"""
Starts up a logging handler that creates a log file in the directory being uploaded
:param directory: directory to create a logger in
:return: None
"""
global directory_logger
# If there is already a directory logger in place, throw an exception
if directory_logger:
logging.error("A directory logger already exists!")
raise Exception("ERROR:add_log_to_directory: A directory logger already exists!")
logging.info("Adding log file to {}".format(directory))
log_file = os.path.join(directory, 'irida-uploader.log')
directory_logger = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=(1024 * 1024 * 1024 * 10), # 10GB max file size
backupCount=100,
)
directory_logger.setLevel(logging.INFO)
directory_logger.setFormatter(log_format)
root_logger.addHandler(directory_logger)
def remove_directory_logger():
"""
Deletes the existing directory logger so logging stops
:return: None
"""
global directory_logger
root_logger.removeHandler(directory_logger)
directory_logger = None
logging.info("Stopped active logging to run directory")
def get_user_log_dir():
return user_log_dir(log_directory_name)
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
import datetime
import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
tm._skip_if_no_pytz()
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
self.assert_(result.equals(expected))
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
self.assert_(result.equals(expected))
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)])
self.assert_(result.equals(expected))
result = midx_lv2.append(midx_lv3)
self.assert_(result.equals(expected))
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')]
+ expected_tuples), None)
self.assert_(result.equals(expected))
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = self.round_trip_pickle(frame)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3,4)
for gen, extra in [([1.,3.,2.,5.],4.),
([1,3,2,5],4),
([Timestamp('20130101'),Timestamp('20130103'),Timestamp('20130102'),Timestamp('20130105')],Timestamp('20130104')),
(['1one','3one','2one','5one'],'4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,DataFrame('world',
index=list('def'),
columns=MultiIndex.from_tuples([('red', extra)]))],axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
self.assertEqual(str(df2).splitlines()[0].split(),['red'])
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:,[0,2,1,3]]
assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:,[0,2,1,4,3]]
assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red',extra)] = 'world'
result = result.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assertTrue(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEqual(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assertTrue(isnull(s.values[42:65]).all())
self.assertTrue(notnull(s.values[:42]).all())
self.assertTrue(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assertTrue(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assertTrue((cp.values[:4] == 0).all())
self.assertTrue((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assertTrue((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assertTrue((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEqual(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEqual(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assertTrue((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assertTrue(com.is_integer_dtype(deleveled['prm1']))
self.assertTrue(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with assertRaisesRegexp(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with assertRaisesRegexp(IndexError, "not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEqual(unstacked.index.name, 'first')
self.assertEqual(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEqual(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEqual(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEqual(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with assertRaisesRegexp(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with assertRaisesRegexp(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with assertRaisesRegexp(IndexError, "not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEqual(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assertTrue((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assertFalse(np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assertFalse(swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assertTrue(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assertTrue(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assertFalse(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assertFalse(index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assertTrue((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assertTrue((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect)
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect)
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assertTrue(leftside._get_axis(axis).equals(level_index))
self.assertTrue(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEqual(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEqual(result.index.names, self.ymd.index.names[1:])
self.assertEqual(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEqual(result.index.name, self.ymd.index.names[2])
self.assertEqual(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEqual(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assertTrue((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected)
self.assertEqual(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected)
self.assertEqual(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result)
assert_frame_equal(df1, df2)
self.assertEqual(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples(
[('1a', '2a'), ('1a', '2b'), ('1a', '2c')])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assertTrue(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assertTrue((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assertTrue((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3),
(2, 2), (2, 4)]))
s2 = Series([1, 2, 3, 4],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
df = DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
df = DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'),
('Colorado', 'Green')])
index = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.icol(1)
tm.assert_isinstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assertTrue((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEqual(s[("a", 5)], 5)
self.assertEqual(s[("a", 6)], 6)
self.assertEqual(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo','demo','demo','demo']
idx = MultiIndex.from_tuples(idx_tp,names = ['STK_ID','RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEqual(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['bah','bam',3.0,3],
['bah','bam',4.0,4],['foo','bar',5.0,5],['bah','bam',6.0,6]],
columns=list('ABCD'))
df = df.set_index(['A','B'])
df = df.sortlevel(0)
expected = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['foo','bar',5.0,5]],
columns=list('ABCD')).set_index(['A','B'])
result = df.loc[('foo','bar')]
assert_frame_equal(result,expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2 ,3], [1, 1, 1, 1, 2, 2]))
expected = np.array([False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([1, 2, 3, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M', tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'], tz='Asia/Tokyo')
self.assertTrue(idx.levels[0].equals(expected1))
self.assertTrue(idx.levels[1].equals(idx2))
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product([date1,date2,date3],[date1,date2,date3]):
index = pd.MultiIndex.from_product([[d1],[d2]])
self.assertIsInstance(index.levels[0],pd.DatetimeIndex)
self.assertIsInstance(index.levels[1],pd.DatetimeIndex)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'],
'datetime':['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value':range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00'])
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
self.assertTrue(df.index.levels[0].equals(expected))
self.assertTrue(df.index.levels[1].equals(pd.Index(['a', 'b'])))
df = df.swaplevel(0, 1)
self.assertTrue(df.index.levels[0].equals(pd.Index(['a', 'b'])))
self.assertTrue(df.index.levels[1].equals(expected))
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'], tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-02 09:00', '2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'], tz='US/Eastern')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
# GH 7092
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx1')
idx2 = pd.Index(range(5), name='idx2',dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS', tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(lambda d: pd.Timestamp(d, tz='Europe/Paris'))
assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a','b'], pd.date_range('20130101', periods=3, tz=tz)])
df = pd.DataFrame(np.arange(6,dtype='int64').reshape(6,1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(lambda d: pd.Timestamp(d, offset='D', tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101', periods=3, freq='M'),
['a','b','c']], names=['month', 'feature'])
df = pd.DataFrame(np.arange(9,dtype='int64').reshape(-1,1), index=idx, columns=['a'])
expected = pd.DataFrame({'month': [pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3,
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')},
columns=['month', 'feature', 'a'])
assert_frame_equal(df.reset_index(), expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='Y')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
from collections import OrderedDict
from datetime import datetime
from sqlalchemy.orm.session import Session, make_transient
from airflow import executors, models
from airflow.exceptions import (
AirflowException, DagConcurrencyLimitReached, NoAvailablePoolSlot, PoolNotFound,
TaskConcurrencyLimitReached,
)
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagPickle, DagRun
from airflow.ti_deps.dep_context import BACKFILL_QUEUED_DEPS, DepContext
from airflow.utils import timezone
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.db import provide_session
from airflow.utils.state import State
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
STATES_COUNT_AS_RUNNING = (State.RUNNING, State.QUEUED)
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus:
"""
Internal status of the backfill job. This class is intended to be instantiated
only within a BackfillJob instance and will track the execution of tasks,
e.g. running, skipped, succeeded, failed, etc. Information about the dag runs
related to the backfill job are also being tracked in this structure,
.e.g finished runs, etc. Any other status related information related to the
execution of dag runs / tasks can be included in this structure since it makes
it easier to pass it around.
"""
# TODO(edgarRd): AIRFLOW-1444: Add consistency check on counts
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
"""
:param to_run: Tasks to run in the backfill
:type to_run: dict[tuple[string, string, datetime.datetime], airflow.models.TaskInstance]
:param running: Maps running task instance key to task instance object
:type running: dict[tuple[string, string, datetime.datetime], airflow.models.TaskInstance]
:param skipped: Tasks that have been skipped
:type skipped: set[tuple[string, string, datetime.datetime]]
:param succeeded: Tasks that have succeeded so far
:type succeeded: set[tuple[string, string, datetime.datetime]]
:param failed: Tasks that have failed
:type failed: set[tuple[string, string, datetime.datetime]]
:param not_ready: Tasks not ready for execution
:type not_ready: set[tuple[string, string, datetime.datetime]]
:param deadlocked: Deadlocked tasks
:type deadlocked: set[tuple[string, string, datetime.datetime]]
:param active_runs: Active dag runs at a certain point in time
:type active_runs: list[DagRun]
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:type executed_dag_run_dates: set[datetime.datetime]
:param finished_runs: Number of finished runs so far
:type finished_runs: int
:param total_runs: Number of total dag runs able to run
:type total_runs: int
"""
self.to_run = to_run or OrderedDict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
*args, **kwargs):
"""
:param dag: DAG object.
:type dag: airflow.models.DAG
:param start_date: start date for the backfill date range.
:type start_date: datetime.datetime
:param end_date: end date for the backfill date range.
:type end_date: datetime.datetime
:param mark_success: flag whether to mark the task auto success.
:type mark_success: bool
:param donot_pickle: whether pickle
:type donot_pickle: bool
:param ignore_first_depends_on_past: whether to ignore depend on past
:type ignore_first_depends_on_past: bool
:param ignore_task_deps: whether to ignore the task dependency
:type ignore_task_deps: bool
:param pool: pool to backfill
:type pool: str
:param delay_on_limit_secs:
:param verbose:
:type verbose: flag to whether display verbose message to backfill console
:param conf: a dictionary which user could pass k-v pairs for backfill
:type conf: dictionary
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:type rerun_failed_tasks: bool
:param run_backwards: Whether to process the dates from most to least recent
:type run_backwards bool
:param args:
:param kwargs:
"""
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
self.run_backwards = run_backwards
super().__init__(*args, **kwargs)
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date: datetime, dag: DAG, session: Session = None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:param dag: DAG
:param session: the database session object
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (dag.schedule_interval and
not dag.is_subdag)
else False)
current_active_dag_count = dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= dag.max_active_runs):
return None
run = run or dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
try:
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session, commit=False)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
session.commit()
except Exception:
session.rollback()
raise
return tasks_to_run
def _log_progress(self, ti_status):
self.log.info(
'[backfill progress] | finished run %s of %s | tasks waiting: %s | succeeded: %s | '
'running: %s | failed: %s | skipped: %s | deadlocked: %s | not ready: %s',
ti_status.finished_runs, ti_status.total_runs, len(ti_status.to_run), len(ti_status.succeeded),
len(ti_status.running), len(ti_status.failed), len(ti_status.skipped), len(ti_status.deadlocked),
len(ti_status.not_ready)
)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
@provide_session
def _per_task_process(task, key, ti, session=None):
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id, include_subdags=True)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
backfill_context = DepContext(
deps=BACKFILL_QUEUED_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
ti.refresh_from_db(lock_for_update=True, session=session)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
ti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttm
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
return
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# special case
if ti.state == State.UP_FOR_RESCHEDULE:
self.log.debug(
"Task instance %s reschedule period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
try:
for task in self.dag.topological_sort(include_subdag_tasks=True):
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
pool = session.query(models.Pool) \
.filter(models.Pool.pool == task.pool) \
.first()
if not pool:
raise PoolNotFound('Unknown pool: {}'.format(task.pool))
open_slots = pool.open_slots(session=session)
if open_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are "
"%s open slots in pool %s".format(
open_slots, task.pool))
num_running_task_instances_in_dag = DAG.get_num_task_instances(
self.dag_id,
states=self.STATES_COUNT_AS_RUNNING,
)
if num_running_task_instances_in_dag >= self.dag.concurrency:
raise DagConcurrencyLimitReached(
"Not scheduling since DAG concurrency limit "
"is reached."
)
if task.task_concurrency:
num_running_task_instances_in_task = DAG.get_num_task_instances(
dag_id=self.dag_id,
task_ids=[task.task_id],
states=self.STATES_COUNT_AS_RUNNING,
)
if num_running_task_instances_in_task >= task.task_concurrency:
raise TaskConcurrencyLimitReached(
"Not scheduling since Task concurrency limit "
"is reached."
)
_per_task_process(task, key, ti)
except (NoAvailablePoolSlot, DagConcurrencyLimitReached, TaskConcurrencyLimitReached) as e:
self.log.debug(e)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
"""
for next_run_date in run_dates:
for dag in [self.dag] + self.dag.subdags:
dag_run = self._get_dag_run(next_run_date, dag, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):
"""
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished():
dag_run.set_state(State.FAILED)
session.merge(dag_run)
@provide_session
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
|
from flask import send_file
from python_helper import Constant as c
from python_helper import EnvironmentHelper, log
from python_framework import ResourceManager, FlaskUtil, HttpStatus, LogConstant
from queue_manager_api import QueueManager
import ModelAssociation
app = ResourceManager.initialize(__name__, ModelAssociation.MODEL, managerList=[
QueueManager()
])
@app.route(f'{app.api.baseUrl}/audios/<string:key>')
def getAudio(key=None):
log.info(getAudio, f'{LogConstant.CONTROLLER_SPACE}{FlaskUtil.safellyGetVerb()}{c.SPACE_DASH_SPACE}{FlaskUtil.safellyGetUrl()}')
try:
dto = app.api.resource.service.speak.findAudioByKey(key)
path = f'''{dto.path.split(f'src{EnvironmentHelper.OS_SEPARATOR}')[-1]}{EnvironmentHelper.OS_SEPARATOR}{dto.name}{c.DOT}{dto.extension}'''
return send_file(
path,
mimetype="audio/mp3",
as_attachment=False
), HttpStatus.OK
except Exception as exception:
MESSAGE_KEY = 'message'
responseDto = {MESSAGE_KEY: 'Audio not found'}
log.error(getAudio, responseDto.get(MESSAGE_KEY), exception=exception)
return responseDto, 404
|
from fastapi import FastAPI
from starlette.testclient import TestClient
app = FastAPI()
@app.put("/items/{item_id}")
def save_item_no_body(item_id: str):
return {"item_id": item_id}
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Fast API", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Save Item No Body",
"operationId": "save_item_no_body_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"title": "Item_Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_put_no_body():
response = client.put("/items/foo")
assert response.status_code == 200
assert response.json() == {"item_id": "foo"}
def test_put_no_body_with_body():
response = client.put("/items/foo", json={"name": "Foo"})
assert response.status_code == 200
assert response.json() == {"item_id": "foo"}
|
import threading
from Utils.Utils_function import logMsg
from Sharing.Sharing import sharing1ES, sharing2ES
from Reconstruction.Reconstruction import reconstructionES1, reconstructionES2
from groups import parametres
par = parametres()
PATH_DATA_USERS = par.PATH_DATA_USERS
CHAR_DATA_SPLIT = par.CHAR_DATA_SPLIT
CHAR_MSG_SPLIT = par.CHAR_MSG_SPLIT
WHICH_PHASE = par.WHICH_PHASE
COD3000 = par.COD3000
COD3000_desc = par.COD3000_desc
BUFFER_SIZE_REQUEST_MESSAGES = par.BUFFER_SIZE_REQUEST_MESSAGES
DELIM = par.DELIM
def split(data, char):
return data.split(char)
class ManageClientConnection(threading.Thread):
def __init__(self, clientAddress, clientsocket):
threading.Thread.__init__(self)
self.csocket = clientsocket
self.clientAddress = clientAddress
def run(self):
"""
la connessione e' stabilita con chi vuole paralre con il dealer. A questo punto deve estrapolare
le informazioni che sta ricevendo.
Ci sono due casi:
- SHARING PHASE
- RECONSTRUCTION PHASE
Per capire in che fase si sta operando all'inizio i messaggi sono formatti nel seguente modo:
WHICH_PHASE|||DATA
In cui:
- WHICH_PHASE==SHA1 --> DATA=[mc||External Service Name||id_user]
- WHICH_PHASE==SHA2 --> DATA=[sPrime||id_user]
- WHICH_PHASE==REC1 --> DATA=[sPrime||x1,x2,..,xn||sSecond||mcPrime||eMS||id_user]
- WHICH_PHASE==REC2 --> DATA=[k||kPrime||MS||(g^sPrime h^rPrime)||MC||id_user]
Le informazioni che si salva il dealer PER UTENTE sono:
- c0||c1||..||c(t-1)
- MC
Le informazioni come per il caso dello shareholder vengono salvate dentro alla cartella con tutti gli utenti,
che nel cloud avra' un altro path quale "/home-user/data_users/", la cartella data_users e' gia' stata creata
e il volume viene montato li, QUINDI i dati non vengono persi anche se il container va in down
"""
print "------------------------------------------------------------------------------------------"
print ("New connection added from: ", self.clientAddress)
# you have to read all data, TPC is not message-based prototocl but is a stream protocol so the data
# could be splitted in a more than one packet
data_from_dealer = ''
data = True
while data:
data = self.csocket.recv(BUFFER_SIZE_REQUEST_MESSAGES)
data_from_dealer += data
if data_from_dealer.find(DELIM) != -1:
break
print data_from_dealer
# 2- splitta le informazioni che saranno nel formato: [WHICH_PHASE|||DATA]
# info[0] = WHICH_PHASE
# info[1] = DATA
info = split(data_from_dealer, CHAR_MSG_SPLIT)
# [FROM DEALER] SHA1
if info[0] == WHICH_PHASE[0]:
print " Request SHARING PHASE - STEP ONE"
print " info[1]= " + str(info[1])
# model: SHA1|||MC||id_user
data = split(info[1], CHAR_DATA_SPLIT)
mc = data[0]
id_user = data[1]
print " data passed to __sharing1 function: mc=" + str(mc) + " id_user=" + str(id_user)
self.__sharing1(mc, id_user)
# [FROM CLIENT]SHA2
elif info[0] == WHICH_PHASE[1]:
print " Request SHARING PHASE - STEP TWO"
print " info[1]= " + str(info[1])
# model: SHA2|||MC||id_user
data = split(info[1], CHAR_DATA_SPLIT)
mc = data[0]
id_user = data[1]
# LogMSG pure del client che non puo' farlo da JS
logMsg("Client", "ExternalServer", data_from_dealer, "SHARING", id_user)
print " data passed to __sharing2 function: mc=" + str(mc) + " id_user=" + str(id_user)
self.__sharing2(mc, id_user)
# FROM DEALER] REC1
elif info[0] == WHICH_PHASE[2]:
print " Request RECONSTRUCTION PHASE - STEP ONE"
print " info[1]= " + str(info[1])
# model: REC1|||eMS||id_user
data = split(info[1], CHAR_DATA_SPLIT)
eMS = data[0]
id_user = data[1]
print " data passed to __reconstruction1 function: eMS=" + str(eMS) + " id_user=" + str(id_user)
self.__reconstruction1(eMS, id_user)
# [FROM CLIENT] REC2
elif info[0] == WHICH_PHASE[3]:
print " Request RECONSTRUCTION PHASE - STEP TWO"
print " info[1]= " + str(info[1])
# model: REC2|||eMS||id_user
data = split(info[1], CHAR_DATA_SPLIT)
eMS = data[0]
id_user = data[1]
# LogMSG pure del client che non puo' farlo da JS
logMsg("Client", "ExternalServer", data_from_dealer, "RECONSTRUCTION", id_user)
print " data passed to __reconstruction2 function: eMS=" + str(eMS) + " id_user=" + str(id_user)
self.__reconstruction2(eMS, id_user)
else:
print " ERROR, UNRECOGNIZED PHASE: " + str(info[0])
msg = COD3000 + CHAR_DATA_SPLIT + COD3000_desc
# replay to the user with the External Server code
self.csocket.send((bytes(msg).encode("utf-8")))
print " Client at " + str(self.clientAddress) + " disconnected..."
def __sharing1(self, mc, id_user):
sharing1ES(self, mc, id_user)
def __sharing2(self, mc, id_user):
sharing2ES(self, mc, id_user)
def __reconstruction1(self,eMS, id_user):
reconstructionES1(self,eMS,id_user)
def __reconstruction2(self, x_i, id_user):
reconstructionES2(self, x_i, id_user)
|
from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Tickfont(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
*Arial*, *Balto*, *Courier New*, *Droid Sans*,, *Droid Serif*,
*Droid Sans Mono*, *Gravitas One*, *Old Standard TT*, *Open
Sans*, *Overpass*, *PT Sans Narrow*, *Raleway*, *Times New
Roman*.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'heatmapgl.colorbar'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.heatmapgl.colorbar.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__('tickfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.colorbar.Tickfont
constructor must be a dict or
an instance of plotly.graph_objs.heatmapgl.colorbar.Tickfont"""
)
# Import validators
# -----------------
from plotly.validators.heatmapgl.colorbar import (
tickfont as v_tickfont
)
# Initialize validators
# ---------------------
self._validators['color'] = v_tickfont.ColorValidator()
self._validators['family'] = v_tickfont.FamilyValidator()
self._validators['size'] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('color', None)
self.color = color if color is not None else v
v = arg.pop('family', None)
self.family = family if family is not None else v
v = arg.pop('size', None)
self.size = size if size is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
|
from django.urls import path
from .views import CriarInscricaoIndividual, CriarInscricaoColetiva
urlpatterns = [
path('criarinscricaoindividual', CriarInscricaoIndividual,
name='criar-inscricao-individual'),
path('criarinscricaocoletiva',CriarInscricaoColetiva, name='criar-inscricao-coletiva'),
]
|
# Generated by Django 3.1.1 on 2020-10-09 12:30
from django.db import migrations
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="GlobalPermission",
fields=[],
options={
"verbose_name": "global_permission",
"proxy": True,
"indexes": [],
"constraints": [],
},
bases=("auth.permission",),
),
]
|
from collections import OrderedDict
__author__ = 'kevin'
import socket
from threading import Lock
class LithiumHelper(object):
@staticmethod
def recv_all(sock):
read = ''
try:
data = sock.recv(1024)
read += data
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
@staticmethod
def message_dict(msg):
map = dict()
head = msg.split(":")[0]
for line in msg.split("\n"):
split = line.split(":")
if len(split) >= 2:
map[split[0]] = split[1]
return (head, map)
@staticmethod
def revc_msg_dict(sock, count):
return LithiumHelper.message_dict(LithiumHelper.recv_line_num(sock, count))
@staticmethod
def recv_line_num(sock, count):
out = '';
while count > 0:
line = LithiumHelper.recv_line(sock)
print "recv: %s" % (line)
out += line
count -= 1
return out
@staticmethod
def recv_text(sock):
read = ''
try:
chars = []
lst_char = ''
while True:
a = sock.recv(1)
if a != "\r":
if (a == "\n" and lst_char == "\n") or a == "":
return "".join(chars)
else:
chars.append(a)
lst_char = a
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
@staticmethod
def recv_line(sock):
read = ''
try:
chars = []
while True:
a = sock.recv(1)
if a != "\r":
chars.append(a)
if a == "\n" or a == "":
return "".join(chars)
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
@staticmethod
def to_message_dict(dict):
if dict is None or len(dict) == 0:
return None
out = ""
for key, value in OrderedDict(dict).iteritems():
out += "%s:%s\n" % (str(key), str(value))
out += ""
print out
return out
class AtomicCount(object):
def __init__(self):
self.count = 0
self.lock = Lock()
def incr(self):
self._add_count(1)
def decr(self):
self._add_count(-1)
def _add_count(self, value):
self.lock.acquire()
self.count += value
self.lock.release()
|
#Author:Azrael
import sys
from PyQt5.QtWidgets import QApplication, QDialog, QStackedWidget,QListWidget,\
QTextEdit,QVBoxLayout,QListWidgetItem
class MainPage(QDialog):
def __init__(self, parent=None):
super(MainPage, self).__init__(parent)
self.initUI()
def initUI(self):
self.setWindowTitle("sa1tFish")
self.setGeometry(200, 200, 800, 400)
self.selectList = QListWidget()
self.Item = QListWidgetItem()
self.selectList.setFlow(QListWidget.LeftToRight)
self.selectList.addItems(["function1","function2","function3"])
self.selectList.setMaximumHeight(40)
self.selectList.setMinimumHeight(20)
self.resultEdit1 = QTextEdit("function1--result1--111",self)
self.resultEdit2 = QTextEdit("function2--result2--222",self)
self.resultEdit3 = QTextEdit("function3--result3--333",self)
self.stack = QStackedWidget()
self.stack.addWidget(self.resultEdit1)
self.stack.addWidget(self.resultEdit2)
self.stack.addWidget(self.resultEdit3)
layout = QVBoxLayout(self)
layout.addWidget(self.selectList)
layout.addWidget(self.stack)
layout.setStretch(0,1)
layout.setStretch(1,20)
self.selectList.currentRowChanged.connect(self.stack.setCurrentIndex)
self.setMinimumHeight(200)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainPage()
sys.exit(app.exec_())
|
# Copyright Ryan-Rhys Griffiths and Aditya Raymond Thawani 2020
# Author: Ryan-Rhys Griffiths
"""
Property prediction on the photoswitch dataset using Random Forest.
"""
import argparse
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from data_utils import TaskDataLoader, transform_data, featurise_mols
def main(path, task, representation, use_pca, n_trials, test_set_size):
"""
:param path: str specifying path to dataset.
:param task: str specifying the task. One of ['e_iso_pi', 'z_iso_pi', 'e_iso_n', 'z_iso_n']
:param representation: str specifying the molecular representation. One of ['fingerprints, 'fragments', 'fragprints']
:param use_pca: bool. If True apply PCA to perform Principal Components Regression.
:param n_trials: int specifying number of random train/test splits to use
:param test_set_size: float in range [0, 1] specifying fraction of dataset to use as test set.
"""
data_loader = TaskDataLoader(task, path)
smiles_list, y = data_loader.load_property_data()
X = featurise_mols(smiles_list, representation)
if use_pca:
n_components = 50
else:
n_components = None
r2_list = []
rmse_list = []
mae_list = []
print('\nBeginning training loop...')
for i in range(0, n_trials):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_set_size, random_state=i)
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
X_train, y_train, X_test, y_test, y_scaler = transform_data(X_train, y_train, X_test, y_test, n_components, use_pca)
regr_rf = RandomForestRegressor(n_estimators=1000, max_depth=300, random_state=2)
regr_rf.fit(X_train, y_train)
# Output Standardised RMSE and RMSE on Train Set
y_pred_train = regr_rf.predict(X_train)
train_rmse_stan = np.sqrt(mean_squared_error(y_train, y_pred_train))
train_rmse = np.sqrt(mean_squared_error(y_scaler.inverse_transform(y_train), y_scaler.inverse_transform(y_pred_train)))
print("\nStandardised Train RMSE: {:.3f}".format(train_rmse_stan))
print("Train RMSE: {:.3f}".format(train_rmse))
# Predict on new data
y_rf = regr_rf.predict(X_test)
y_rf = y_scaler.inverse_transform(y_rf)
y_test = y_scaler.inverse_transform(y_test)
score = r2_score(y_test, y_rf)
rmse = np.sqrt(mean_squared_error(y_test, y_rf))
mae = mean_absolute_error(y_test, y_rf)
print("\nR^2: {:.3f}".format(score))
print("RMSE: {:.3f}".format(rmse))
print("MAE: {:.3f}".format(mae))
r2_list.append(score)
rmse_list.append(rmse)
mae_list.append(mae)
r2_list = np.array(r2_list)
rmse_list = np.array(rmse_list)
mae_list = np.array(mae_list)
print("\nmean R^2: {:.4f} +- {:.4f}".format(np.mean(r2_list), np.std(r2_list)/np.sqrt(len(r2_list))))
print("mean RMSE: {:.4f} +- {:.4f}".format(np.mean(rmse_list), np.std(rmse_list)/np.sqrt(len(rmse_list))))
print("mean MAE: {:.4f} +- {:.4f}\n".format(np.mean(mae_list), np.std(mae_list)/np.sqrt(len(mae_list))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, default='../dataset/photoswitches.csv',
help='Path to the photoswitches.csv file.')
parser.add_argument('-t', '--task', type=str, default='e_iso_pi',
help='str specifying the task. One of [e_iso_pi, z_iso_pi, e_iso_n, z_iso_n].')
parser.add_argument('-r', '--representation', type=str, default='fragprints',
help='str specifying the molecular representation. '
'One of [fingerprints, fragments, fragprints].')
parser.add_argument('-pca', '--use_pca', type=bool, default=False,
help='If True apply PCA to perform Principal Components Regression.')
parser.add_argument('-n', '--n_trials', type=int, default=20,
help='int specifying number of random train/test splits to use')
parser.add_argument('-ts', '--test_set_size', type=float, default=0.2,
help='float in range [0, 1] specifying fraction of dataset to use as test set')
args = parser.parse_args()
main(args.path, args.task, args.representation, args.use_pca, args.n_trials, args.test_set_size)
|
"""
Tests the execution of forum notification tasks.
"""
import json
import math
from datetime import datetime, timedelta
from unittest import mock
import ddt
from django.contrib.sites.models import Site
from edx_ace.channel import ChannelType, get_channel_for_message
from edx_ace.recipient import Recipient
from edx_ace.renderers import EmailRenderer
from edx_ace.utils import date
import openedx.core.djangoapps.django_comment_common.comment_client as cc
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from lms.djangoapps.discussion.signals.handlers import ENABLE_FORUM_NOTIFICATIONS_FOR_SITE_KEY
from lms.djangoapps.discussion.tasks import _should_send_message, _track_notification_sent
from openedx.core.djangoapps.ace_common.template_context import get_base_template_context
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangoapps.django_comment_common.models import ForumsConfig
from openedx.core.djangoapps.django_comment_common.signals import comment_created
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory
from openedx.core.lib.celery.task_utils import emulate_http_request
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
NOW = datetime.utcnow()
ONE_HOUR_AGO = NOW - timedelta(hours=1)
TWO_HOURS_AGO = NOW - timedelta(hours=2)
def make_mock_responder(subscribed_thread_ids=None, thread_data=None, comment_data=None, per_page=1): # lint-amnesty, pylint: disable=missing-function-docstring
def mock_subscribed_threads(method, url, **kwargs): # lint-amnesty, pylint: disable=unused-argument
subscribed_thread_collection = [
{'id': thread_id} for thread_id in subscribed_thread_ids
]
page = kwargs.get('params', {}).get('page', 1)
start_index = per_page * (page - 1)
end_index = per_page * page
data = {
'collection': subscribed_thread_collection[start_index: end_index],
'page': page,
'num_pages': int(math.ceil(len(subscribed_thread_collection) / float(per_page))),
'thread_count': len(subscribed_thread_collection)
}
return mock.Mock(status_code=200, text=json.dumps(data), json=mock.Mock(return_value=data))
def mock_comment_find(method, url, **kwargs): # lint-amnesty, pylint: disable=unused-argument
return mock.Mock(status_code=200, text=json.dumps(comment_data), json=mock.Mock(return_value=comment_data))
def mock_thread_find(method, url, **kwargs): # lint-amnesty, pylint: disable=unused-argument
return mock.Mock(status_code=200, text=json.dumps(thread_data), json=mock.Mock(return_value=thread_data))
def mock_request(method, url, **kwargs):
if '/subscribed_threads' in url:
return mock_subscribed_threads(method, url, **kwargs)
if '/comments' in url:
return mock_comment_find(method, url, **kwargs)
if '/threads' in url:
return mock_thread_find(method, url, **kwargs)
return mock_request
@ddt.ddt
class TaskTestCase(ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super().setUpClass()
cls.discussion_id = 'dummy_discussion_id'
cls.course = CourseOverviewFactory.create(language='fr')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with mock.patch('common.djangoapps.student.models.cc.User.save'):
cls.thread_author = UserFactory(
username='thread_author',
password='password',
email='email'
)
cls.comment_author = UserFactory(
username='comment_author',
password='password',
email='email'
)
CourseEnrollmentFactory(
user=cls.thread_author,
course_id=cls.course.id
)
CourseEnrollmentFactory(
user=cls.comment_author,
course_id=cls.course.id
)
config = ForumsConfig.current()
config.enabled = True
config.save()
cls.create_thread_and_comments()
@classmethod
def create_thread_and_comments(cls): # lint-amnesty, pylint: disable=missing-function-docstring
cls.thread = {
'id': cls.discussion_id,
'course_id': str(cls.course.id),
'created_at': date.serialize(TWO_HOURS_AGO),
'title': 'thread-title',
'user_id': cls.thread_author.id,
'username': cls.thread_author.username,
'commentable_id': 'thread-commentable-id',
}
cls.comment = {
'id': 'comment',
'body': 'comment-body',
'created_at': date.serialize(ONE_HOUR_AGO),
'thread_id': cls.thread['id'],
'parent_id': None,
'user_id': cls.comment_author.id,
'username': cls.comment_author.username,
}
cls.comment2 = {
'id': 'comment2',
'body': 'comment2-body',
'created_at': date.serialize(NOW),
'thread_id': cls.thread['id'],
'parent_id': None,
'user_id': cls.comment_author.id,
'username': cls.comment_author.username
}
cls.subcomment = {
'id': 'subcomment',
'body': 'subcomment-body',
'created_at': date.serialize(NOW),
'thread_id': cls.thread['id'],
'parent_id': cls.comment['id'],
'user_id': cls.comment_author.id,
'username': cls.comment_author.username,
}
cls.thread['children'] = [cls.comment, cls.comment2]
cls.comment['child_count'] = 1
cls.thread2 = {
'id': cls.discussion_id,
'course_id': str(cls.course.id),
'created_at': date.serialize(TWO_HOURS_AGO),
'title': 'thread-title',
'user_id': cls.thread_author.id,
'username': cls.thread_author.username,
'commentable_id': 'thread-commentable-id-2',
}
def setUp(self):
super().setUp()
self.request_patcher = mock.patch('requests.request')
self.mock_request = self.request_patcher.start()
self.ace_send_patcher = mock.patch('edx_ace.ace.send')
self.mock_ace_send = self.ace_send_patcher.start()
thread_permalink = '/courses/discussion/dummy_discussion_id'
self.permalink_patcher = mock.patch('lms.djangoapps.discussion.tasks.permalink', return_value=thread_permalink)
self.mock_permalink = self.permalink_patcher.start()
def tearDown(self):
super().tearDown()
self.request_patcher.stop()
self.ace_send_patcher.stop()
self.permalink_patcher.stop()
@ddt.data(True, False)
def test_send_discussion_email_notification(self, user_subscribed):
if user_subscribed:
non_matching_id = 'not-a-match'
# with per_page left with a default value of 1, this ensures
# that we test a multiple page result when calling
# comment_client.User.subscribed_threads()
subscribed_thread_ids = [non_matching_id, self.discussion_id]
else:
subscribed_thread_ids = []
self.mock_request.side_effect = make_mock_responder(
subscribed_thread_ids=subscribed_thread_ids,
comment_data=self.comment,
thread_data=self.thread,
)
user = mock.Mock()
comment = cc.Comment.find(id=self.comment['id']).retrieve()
site = Site.objects.get_current()
site_config = SiteConfigurationFactory.create(site=site)
site_config.site_values[ENABLE_FORUM_NOTIFICATIONS_FOR_SITE_KEY] = True
site_config.save()
with mock.patch('lms.djangoapps.discussion.signals.handlers.get_current_site', return_value=site):
comment_created.send(sender=None, user=user, post=comment)
if user_subscribed:
expected_message_context = get_base_template_context(site)
expected_message_context.update({
'comment_author_id': self.comment_author.id,
'comment_body': self.comment['body'],
'comment_created_at': ONE_HOUR_AGO,
'comment_id': self.comment['id'],
'comment_username': self.comment_author.username,
'course_id': self.course.id,
'thread_author_id': self.thread_author.id,
'thread_created_at': TWO_HOURS_AGO,
'thread_id': self.discussion_id,
'thread_title': 'thread-title',
'thread_username': self.thread_author.username,
'thread_commentable_id': self.thread['commentable_id'],
'post_link': f'https://{site.domain}{self.mock_permalink.return_value}',
'site': site,
'site_id': site.id
})
expected_recipient = Recipient(self.thread_author.id, self.thread_author.email)
actual_message = self.mock_ace_send.call_args_list[0][0][0]
assert expected_message_context == actual_message.context
assert expected_recipient == actual_message.recipient
assert self.course.language == actual_message.language
self._assert_rendered_email(actual_message)
else:
assert not self.mock_ace_send.called
def _assert_rendered_email(self, message): # lint-amnesty, pylint: disable=missing-function-docstring
# check that we can actually render the message
with emulate_http_request(
site=message.context['site'], user=self.thread_author
):
rendered_email = EmailRenderer().render(get_channel_for_message(ChannelType.EMAIL, message), message)
assert self.comment['body'] in rendered_email.body_html
assert self.comment_author.username in rendered_email.body_html
assert self.mock_permalink.return_value in rendered_email.body_html
assert message.context['site'].domain in rendered_email.body_html
def run_should_not_send_email_test(self, thread, comment_dict):
"""
assert email is not sent
"""
self.mock_request.side_effect = make_mock_responder(
subscribed_thread_ids=[self.discussion_id],
comment_data=comment_dict,
thread_data=thread,
)
user = mock.Mock()
comment = cc.Comment.find(id=comment_dict['id']).retrieve()
comment_created.send(sender=None, user=user, post=comment)
actual_result = _should_send_message({
'thread_author_id': self.thread_author.id,
'course_id': self.course.id,
'comment_id': comment_dict['id'],
'thread_id': thread['id'],
})
assert actual_result is False
assert not self.mock_ace_send.called
def test_subcomment_should_not_send_email(self):
self.run_should_not_send_email_test(self.thread, self.subcomment)
def test_second_comment_should_not_send_email(self):
self.run_should_not_send_email_test(self.thread, self.comment2)
def test_thread_without_children_should_not_send_email(self):
"""
test that email notification will not be sent for the thread
that doesn't have attribute 'children'
"""
self.run_should_not_send_email_test(self.thread2, self.comment)
@ddt.data((
{
'thread_id': 'dummy_discussion_id',
'thread_title': 'thread-title',
'thread_created_at': date.serialize(datetime(2000, 1, 1, 0, 0, 0)),
'course_id': 'fake_course_edx',
'thread_author_id': 'a_fake_dude'
},
{
'app_label': 'discussion',
'name': 'responsenotification',
'language': 'en',
'uuid': 'uuid1',
'send_uuid': 'uuid2',
'thread_id': 'dummy_discussion_id',
'course_id': 'fake_course_edx',
'thread_created_at': datetime(2000, 1, 1, 0, 0, 0)
}
), (
{
'thread_id': 'dummy_discussion_id2',
'thread_title': 'thread-title2',
'thread_created_at': date.serialize(datetime(2000, 1, 1, 0, 0, 0)),
'course_id': 'fake_course_edx2',
'thread_author_id': 'a_fake_dude2'
},
{
'app_label': 'discussion',
'name': 'responsenotification',
'language': 'en',
'uuid': 'uuid3',
'send_uuid': 'uuid4',
'thread_id': 'dummy_discussion_id2',
'course_id': 'fake_course_edx2',
'thread_created_at': datetime(2000, 1, 1, 0, 0, 0)
}
))
@ddt.unpack
def test_track_notification_sent(self, context, test_props):
with mock.patch('edx_ace.ace.send').start() as message:
# Populate mock message (
# There are some cruft attrs, but they're harmless.
for key, entry in test_props.items():
setattr(message, key, entry)
test_props['nonInteraction'] = True
# Also augment context with site object, for setting segment context.
site = Site.objects.get_current()
context['site'] = site
with mock.patch('lms.djangoapps.discussion.tasks.segment.track') as mock_segment_track:
_track_notification_sent(message, context)
mock_segment_track.assert_called_once_with(
user_id=context['thread_author_id'],
event_name='edx.bi.email.sent',
properties=test_props,
)
|
from schematics.types import DictType, ListType, ModelType, PolyModelType, StringType
from spaceone.inventory.connector.aws_sqs_connector.schema.data import QueData
from spaceone.inventory.libs.schema.resource import CloudServiceMeta, CloudServiceResource, CloudServiceResponse
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, DateTimeDyField, EnumDyField
from spaceone.inventory.libs.schema.dynamic_layout import ItemDynamicLayout, TableDynamicLayout
sqs = ItemDynamicLayout.set_fields('Queue', fields=[
TextDyField.data_source('ARN', 'data.arn'),
TextDyField.data_source('Name', 'data.name'),
TextDyField.data_source('URL', 'data.url'),
EnumDyField.data_source('FIFO Queue', 'data.fifo_queue', default_badge={
'indigo.500': ['true'], 'coral.600': ['false']
}),
EnumDyField.data_source('Content Based Deduplication', 'data.content_based_duplication', default_badge={
'indigo.500': ['true'], 'coral.600': ['false']
}),
TextDyField.data_source('Approximate Number Of Messages', 'data.approximate_number_of_messages'),
TextDyField.data_source('Approximate Number Of Messages Delayed', 'data.approximate_number_of_messages_delayed'),
TextDyField.data_source('Approximate Number Of Messages Not Visible', 'data.approximate_number_of_messages_not_visible'),
TextDyField.data_source('Delay Seconds', 'data.delay_seconds'),
TextDyField.data_source('Maximum Message Size', 'data.maximum_message_size'),
TextDyField.data_source('Message Retention Period', 'data.message_retention_period'),
TextDyField.data_source('Receive Message Wait Time Seconds', 'data.receive_message_wait_time_seconds'),
TextDyField.data_source('Visibility Timeout', 'data.visibility_timeout'),
DateTimeDyField.data_source('Created Time', 'data.created_timestamp', options={
'source_type': 'timestamp',
'source_format': 'seconds'
}),
DateTimeDyField.data_source('Last Modified Time', 'data.last_modified_timestamp', options={
'source_type': 'timestamp',
'source_format': 'seconds'
}),
])
metadata = CloudServiceMeta.set_layouts(layouts=[sqs])
class SQSResource(CloudServiceResource):
cloud_service_group = StringType(default='SQS')
class QueResource(SQSResource):
cloud_service_type = StringType(default='Queue')
data = ModelType(QueData)
_metadata = ModelType(CloudServiceMeta, default=metadata, serialized_name='metadata')
class SQSResponse(CloudServiceResponse):
resource = PolyModelType(QueResource)
|
from dateutil.relativedelta import relativedelta
from django.http import StreamingHttpResponse
from django.utils import timezone
from rest_framework import viewsets
from rest_framework.settings import api_settings
from .files import FileRenderCN, FileRenderEN
from .models import CyclecountModeDayModel
from . import serializers
from utils.page import MyPageNumberPagination
from .page import CycleCountPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from .filter import QTYRecorderListFilter
from rest_framework.exceptions import APIException
from .serializers import FileRenderSerializer
from .models import QTYRecorder
class QTYRecorderViewSet(viewsets.ModelViewSet):
"""
list:
Response a data list(all)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = QTYRecorderListFilter
def get_queryset(self):
if self.request.user:
return QTYRecorder.objects.filter(openid=self.request.auth.openid)
else:
return QTYRecorder.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.QTYRecorderSerializer
else:
return self.http_method_not_allowed(request=self.request)
class CyclecountModeDayViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
pagination_class = None
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
cur_date = timezone.now()
delt_date = relativedelta(days=1)
if id is None:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=0,
create_time__gte=str((cur_date -delt_date).date()) + ' 00:00:00',
create_time__lte=str((cur_date + delt_date).date()) + ' 00:00:00')
else:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=0,
create_time__gte=str((cur_date - delt_date).date()) + ' 00:00:00',
create_time__lte=str((cur_date + delt_date).date()) + ' 00:00:00', id=id)
else:
return CyclecountModeDayModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.CyclecountGetSerializer
elif self.action in ['create']:
return serializers.CyclecountPostSerializer
elif self.action in ['update']:
return serializers.CyclecountUpdateSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = self.request.data
for i in range(len(data)):
CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid,
t_code=data[i]['t_code']).update(
physical_inventory=data[i]['physical_inventory'], cyclecount_status=1,
difference=data[i]['physical_inventory'] - data[i]['goods_qty'])
return Response({"detail": "success"}, status=200)
def update(self, request, *args, **kwargs):
data = self.request.data
for i in range(len(data)):
scan_count_data = CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid,
t_code=data[i]['t_code']).first()
scan_count_data.physical_inventory = scan_count_data.physical_inventory + data[i]['physical_inventory']
scan_count_data.difference = data[i]['physical_inventory'] - data[i]['goods_qty']
scan_count_data.save()
return Response({"detail": "success"}, status=200)
class CyclecountModeAllViewSet(viewsets.ModelViewSet):
"""
list:
Response a data list(get)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
date_choice = self.request.GET.get('create_time', '')
if date_choice:
if id is None:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=1,
create_time__gte=str(date_choice) + ' 00:00:00',
create_time__lte=str(date_choice) + ' 23:59:59')
else:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=1,
create_time__gte=str(date_choice) + ' 00:00:00',
create_time__lte=str(date_choice) + ' 23:59:59',
id=id)
else:
if id is None:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=1)
else:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=1,
id=id)
else:
return CyclecountModeDayModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.CyclecountGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
class FileDownloadView(viewsets.ModelViewSet):
renderer_classes = (FileRenderCN, ) + tuple(api_settings.DEFAULT_RENDERER_CLASSES)
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time"]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid,
create_time__gte=timezone.now().date() - timezone.timedelta(days=1))
else:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid,
create_time__gte=timezone.now().date() - timezone.timedelta(
days=1), id=id)
else:
return CyclecountModeDayModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.FileRenderSerializer
else:
return self.http_method_not_allowed(request=self.request)
def get_lang(self, data):
lang = self.request.META.get('HTTP_LANGUAGE')
if lang:
if lang == 'zh-hans':
return FileRenderCN().render(data)
else:
return FileRenderEN().render(data)
else:
return FileRenderEN().render(data)
def list(self, request, *args, **kwargs):
from datetime import datetime
dt = datetime.now()
data = (
FileRenderSerializer(instance).data
for instance in self.filter_queryset(self.get_queryset())
)
renderer = self.get_lang(data)
response = StreamingHttpResponse(
renderer,
content_type="text/csv"
)
response['Content-Disposition'] = "attachment; filename='cyclecount_{}.csv'".format(str(dt.strftime('%Y%m%d%H%M%S%f')))
return response
class FileDownloadAllView(viewsets.ModelViewSet):
renderer_classes = (FileRenderCN, ) + tuple(api_settings.DEFAULT_RENDERER_CLASSES)
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time"]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
cur_date = timezone.now()
delt_date = relativedelta(days=1)
if id is None:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=0,
create_time__gte=str((cur_date -delt_date).date()) + ' 00:00:00',
create_time__lte=str((cur_date + delt_date).date()) + ' 00:00:00')
else:
return CyclecountModeDayModel.objects.filter(openid=self.request.auth.openid, cyclecount_status=0,
create_time__gte=str((cur_date - delt_date).date()) + ' 00:00:00',
create_time__lte=str((cur_date + delt_date).date()) + ' 00:00:00', id=id)
else:
return CyclecountModeDayModel.objects.none()
def get_serializer_class(self):
if self.action in ['list']:
return serializers.FileRenderSerializer
else:
return self.http_method_not_allowed(request=self.request)
def get_lang(self, data):
lang = self.request.META.get('HTTP_LANGUAGE')
if lang:
if lang == 'zh-hans':
return FileRenderCN().render(data)
else:
return FileRenderEN().render(data)
else:
return FileRenderEN().render(data)
def list(self, request, *args, **kwargs):
from datetime import datetime
dt = datetime.now()
data = (
FileRenderSerializer(instance).data
for instance in self.filter_queryset(self.get_queryset())
)
renderer = self.get_lang(data)
response = StreamingHttpResponse(
renderer,
content_type="text/csv"
)
response['Content-Disposition'] = "attachment; filename='cyclecountall_{}.csv'".format(str(dt.strftime('%Y%m%d%H%M%S%f')))
return response
|
from random import randint
class Die():
def __init__(self, sides):
self.sides = sides
def roll_die(self):
print(randint(1,self.sides))
|
#!/usr/bin/env python
import json
import os
import shutil
import subprocess
import sys
import tempfile
# Utilities
def listify(x):
if type(x) == list or type(x) == tuple:
return x
return [x]
def check_call(cmd, **args):
if type(cmd) != list:
cmd = cmd.split()
print('running: %s' % cmd)
subprocess.check_call(cmd, **args)
def checked_call_with_output(cmd, expected=None, unexpected=None, stderr=None):
cmd = cmd.split(' ')
print('running: %s' % cmd)
stdout = subprocess.check_output(cmd, stderr=stderr)
if expected:
for x in listify(expected):
assert x in stdout, 'call had the right output: ' + stdout + '\n[[[' + x + ']]]'
if unexpected:
for x in listify(unexpected):
assert x not in stdout, 'call had the wrong output: ' + stdout + '\n[[[' + x + ']]]'
def failing_call_with_output(cmd, expected):
proc = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
assert proc.returncode, 'call must have failed'
assert expected in stdout, 'call did not have the right output'
def hack_emsdk(marker, replacement):
src = open('emsdk.py').read()
assert marker in src
src = src.replace(marker, replacement)
name = '__test_emsdk'
open(name, 'w').write(src)
return name
# Set up
open('hello_world.cpp', 'w').write('int main() {}')
TAGS = json.loads(open('emscripten-releases-tags.txt').read())
LIBC = os.path.expanduser('~/.emscripten_cache/wasm-obj/libc.a')
# Tests
print('test .emscripten contents (latest was installed/activated in test.sh)')
assert 'fastcomp' in open(os.path.expanduser('~/.emscripten')).read()
assert 'upstream' not in open(os.path.expanduser('~/.emscripten')).read()
print('building proper system libraries')
def test_lib_building(emcc, use_asmjs_optimizer):
def test_build(args, expected=None, unexpected=None):
checked_call_with_output(emcc + ' hello_world.cpp' + args,
expected=expected,
unexpected=unexpected,
stderr=subprocess.STDOUT)
# by default we ship libc, struct_info, and the asm.js optimizer, as they
# are important for various reasons (libc takes a long time to build;
# struct_info is a bootstrap product so if the user's setup is broken it's
# confusing; the asm.js optimizer is a native application so it needs a
# working native local build environment). otherwise we don't ship every
# single lib, so some building is expected on first run.
unexpected_system_libs = ['generating system library: libc.',
'generating system asset: optimizer']
if use_asmjs_optimizer:
unexpected_system_libs += ['generating system asset: generated_struct_info.json']
first_time_system_libs = ['generating system library: libdlmalloc.']
test_build('', expected=first_time_system_libs,
unexpected=unexpected_system_libs)
test_build(' -O2', unexpected=unexpected_system_libs + first_time_system_libs)
test_build(' -s WASM=0', unexpected=unexpected_system_libs + first_time_system_libs)
test_build(' -O2 -s WASM=0', unexpected=unexpected_system_libs + first_time_system_libs)
def run_emsdk(cmd):
if type(cmd) != list:
cmd = cmd.split()
check_call([emsdk] + cmd)
WINDOWS = sys.platform.startswith('win')
MACOS = sys.platform == 'darwin'
upstream_emcc = os.path.join('upstream', 'emscripten', 'emcc')
fastcomp_emcc = os.path.join('fastcomp', 'emscripten', 'emcc')
emsdk = './emsdk'
if WINDOWS:
upstream_emcc += '.bat'
fastcomp_emcc += '.bat'
emsdk = 'emsdk.bat'
else:
emsdk = './emsdk'
test_lib_building(fastcomp_emcc, use_asmjs_optimizer=True)
print('update')
run_emsdk('update-tags')
print('test latest-releases-upstream')
run_emsdk('install latest-upstream')
run_emsdk('activate latest-upstream')
test_lib_building(upstream_emcc, use_asmjs_optimizer=False)
assert open(os.path.expanduser('~/.emscripten')).read().count('LLVM_ROOT') == 1
assert 'upstream' in open(os.path.expanduser('~/.emscripten')).read()
assert 'fastcomp' not in open(os.path.expanduser('~/.emscripten')).read()
print('verify version')
checked_call_with_output(upstream_emcc + ' -v', TAGS['latest'], stderr=subprocess.STDOUT)
print('clear cache')
check_call(upstream_emcc + ' --clear-cache')
assert not os.path.exists(LIBC)
print('test tot-upstream')
run_emsdk('install tot-upstream')
assert not os.path.exists(LIBC)
old_config = open(os.path.expanduser('~/.emscripten')).read()
run_emsdk('activate tot-upstream')
assert old_config == open(os.path.expanduser('~/.emscripten.old')).read()
assert os.path.exists(LIBC), 'activation supplies prebuilt libc' # TODO; test on latest as well
check_call(upstream_emcc + ' hello_world.cpp')
print('test tot-fastcomp')
run_emsdk('install tot-fastcomp')
run_emsdk('activate tot-fastcomp')
check_call(fastcomp_emcc + ' hello_world.cpp')
print('test specific release (old)')
run_emsdk('install sdk-1.38.31-64bit')
run_emsdk('activate sdk-1.38.31-64bit')
print('test specific release (new, short name)')
run_emsdk('install 1.38.33')
print('another install must re-download')
checked_call_with_output(emsdk + ' install 1.38.33', expected='Downloading:', unexpected='already exist in destination')
run_emsdk('activate 1.38.33')
assert 'fastcomp' in open(os.path.expanduser('~/.emscripten')).read()
assert 'upstream' not in open(os.path.expanduser('~/.emscripten')).read()
print('test specific release (new, full name)')
run_emsdk('install sdk-1.38.33-upstream-64bit')
run_emsdk('activate sdk-1.38.33-upstream-64bit')
print('test specific release (new, full name)')
run_emsdk('install sdk-tag-1.38.33-64bit')
run_emsdk('activate sdk-tag-1.38.33-64bit')
print('test binaryen source build')
run_emsdk(['install', '--build=Release', '--generator=Unix Makefiles', 'binaryen-master-64bit'])
print('test 32-bit error')
failing_call_with_output('python %s install latest' % hack_emsdk('not is_os_64bit()', 'True'), 'this tool is only provided for 64-bit OSes')
print('test non-git update')
temp_dir = tempfile.mkdtemp()
for filename in os.listdir('.'):
if not filename.startswith('.') and not os.path.isdir(filename):
shutil.copy2(filename, os.path.join(temp_dir, filename))
os.chdir(temp_dir)
run_emsdk('update')
print('second time')
run_emsdk('update')
print('verify downloads exist for all OSes')
latest_hash = TAGS['releases'][TAGS['latest']]
for osname, suffix in [
('linux', 'tbz2'),
('mac', 'tbz2'),
('win', 'zip')
]:
url = 'https://storage.googleapis.com/webassembly/emscripten-releases-builds/%s/%s/wasm-binaries.%s' % (osname, latest_hash, suffix)
print(' checking url: ' + url),
check_call('curl --fail --head --silent ' + url, stdout=subprocess.PIPE)
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from turtlesim/Pose.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Pose(genpy.Message):
_md5sum = "863b248d5016ca62ea2e895ae5265cf9"
_type = "turtlesim/Pose"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 x
float32 y
float32 theta
float32 linear_velocity
float32 angular_velocity"""
__slots__ = ['x','y','theta','linear_velocity','angular_velocity']
_slot_types = ['float32','float32','float32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,theta,linear_velocity,angular_velocity
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Pose, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
if self.linear_velocity is None:
self.linear_velocity = 0.
if self.angular_velocity is None:
self.angular_velocity = 0.
else:
self.x = 0.
self.y = 0.
self.theta = 0.
self.linear_velocity = 0.
self.angular_velocity = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_5f().pack(_x.x, _x.y, _x.theta, _x.linear_velocity, _x.angular_velocity))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 20
(_x.x, _x.y, _x.theta, _x.linear_velocity, _x.angular_velocity,) = _get_struct_5f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_5f().pack(_x.x, _x.y, _x.theta, _x.linear_velocity, _x.angular_velocity))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 20
(_x.x, _x.y, _x.theta, _x.linear_velocity, _x.angular_velocity,) = _get_struct_5f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_5f = None
def _get_struct_5f():
global _struct_5f
if _struct_5f is None:
_struct_5f = struct.Struct("<5f")
return _struct_5f
|
from __future__ import print_function
import errno
import os
from PIL import Image
import torch
import torch.nn as nn
import re
import json
import pickle as cPickle
import numpy as np
import utils
import h5py
import operator
import functools
from torch._six import string_classes
import torch.nn.functional as F
import collections
#from pycocotools.coco import COCO
# from scipy.sparse import coo_matrix
# from sklearn.metrics.pairwise import cosine_similarity
from torch.utils.data.dataloader import default_collate
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def mask_softmax(x, lengths): # , dim=1)
mask = torch.zeros_like(x).to(device=x.device, non_blocking=True)
t_lengths = lengths[:, :, None].expand_as(mask)
arange_id = torch.arange(mask.size(1)).to(device=x.device, non_blocking=True)
arange_id = arange_id[None, :, None].expand_as(mask)
mask[arange_id < t_lengths] = 1
# https://stackoverflow.com/questions/42599498/numercially-stable-softmax
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
# exp(x - max(x)) instead of exp(x) is a trick
# to improve the numerical stability while giving
# the same outputs
x2 = torch.exp(x - torch.max(x))
x3 = x2 * mask
epsilon = 1e-5
x3_sum = torch.sum(x3, dim=1, keepdim=True) + epsilon
x4 = x3 / x3_sum.expand_as(x3)
return x4
class GradReverseMask(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, mask, weight):
"""
The mask should be composed of 0 or 1.
The '1' will get their gradient reversed..
"""
ctx.save_for_backward(mask)
ctx.weight = weight
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
mask_c = mask.clone().detach().float()
mask_c[mask == 0] = 1.0
mask_c[mask == 1] = - float(ctx.weight)
return grad_output * mask_c[:, None].float(), None, None
def grad_reverse_mask(x, mask, weight=1):
return GradReverseMask.apply(x, mask, weight)
class GradReverse(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
class GradMulConst(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
|
# -*- coding: utf-8 -*-
from django.conf import settings
# modify reversions to match our needs if required...
def reversion_register(model_class, fields=None, follow=(), format="json", exclude_fields=None):
"""CMS interface to reversion api - helper function. Registers model for
reversion only if reversion is available.
Auto excludes publisher fields.
"""
# reversion's merely recommended, not required
if not 'reversion' in settings.INSTALLED_APPS:
return
from reversion.models import VERSION_CHANGE
if fields and exclude_fields:
raise ValueError("Just one of fields, exclude_fields arguments can be passed.")
opts = model_class._meta
local_fields = opts.local_fields + opts.local_many_to_many
if fields is None:
fields = [field.name for field in local_fields]
exclude_fields = exclude_fields or []
fields = filter(lambda name: not name in exclude_fields, fields)
from cms.utils import reversion_hacks
reversion_hacks.register_draft_only(model_class, fields, follow, format)
def make_revision_with_plugins(obj, user=None, message=None):
from cms.models.pluginmodel import CMSPlugin
# we can safely import reversion - calls here always check for
# reversion in installed_applications first
import reversion
from reversion.models import VERSION_CHANGE
"""
Only add to revision if it is a draft.
"""
revision_manager = reversion.revision
revision_context = reversion.revision_context_manager
cls = obj.__class__
if cls in revision_manager._registered_models:
placeholder_relation = find_placeholder_relation(obj)
if revision_context.is_active():
# add toplevel object to the revision
adapter = revision_manager.get_adapter(obj.__class__)
revision_context.add_to_context(revision_manager, obj, adapter.get_version_data(obj, VERSION_CHANGE))
# add plugins and subclasses to the revision
filters = {'placeholder__%s' % placeholder_relation: obj}
for plugin in CMSPlugin.objects.filter(**filters):
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
padapter = revision_manager.get_adapter(plugin_instance.__class__)
revision_context.add_to_context(revision_manager, plugin_instance, padapter.get_version_data(plugin_instance, VERSION_CHANGE))
bpadapter = revision_manager.get_adapter(plugin.__class__)
revision_context.add_to_context(revision_manager, plugin, bpadapter.get_version_data(plugin, VERSION_CHANGE))
def find_placeholder_relation(obj):
return 'page'
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from collections import OrderedDict
import sys
import os
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
from downloader import getFilePath
class DarkNetParser(object):
"""Definition of a parser for DarkNet-based YOLOv3-608 (only tested for this topology)."""
def __init__(self, supported_layers):
"""Initializes a DarkNetParser object.
Keyword argument:
supported_layers -- a string list of supported layers in DarkNet naming convention,
parameters are only added to the class dictionary if a parsed layer is included.
"""
# A list of YOLOv3 layers containing dictionaries with all layer
# parameters:
self.layer_configs = OrderedDict()
self.supported_layers = supported_layers
self.layer_counter = 0
def parse_cfg_file(self, cfg_file_path):
"""Takes the yolov3.cfg file and parses it layer by layer,
appending each layer's parameters as a dictionary to layer_configs.
Keyword argument:
cfg_file_path -- path to the yolov3.cfg file as string
"""
with open(cfg_file_path) as cfg_file:
remainder = cfg_file.read()
while remainder is not None:
layer_dict, layer_name, remainder = self._next_layer(remainder)
if layer_dict is not None:
self.layer_configs[layer_name] = layer_dict
return self.layer_configs
def _next_layer(self, remainder):
"""Takes in a string and segments it by looking for DarkNet delimiters.
Returns the layer parameters and the remaining string after the last delimiter.
Example for the first Conv layer in yolo.cfg ...
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
... becomes the following layer_dict return value:
{'activation': 'leaky', 'stride': 1, 'pad': 1, 'filters': 32,
'batch_normalize': 1, 'type': 'convolutional', 'size': 3}.
'001_convolutional' is returned as layer_name, and all lines that follow in yolo.cfg
are returned as the next remainder.
Keyword argument:
remainder -- a string with all raw text after the previously parsed layer
"""
remainder = remainder.split('[', 1)
if len(remainder) == 2:
remainder = remainder[1]
else:
return None, None, None
remainder = remainder.split(']', 1)
if len(remainder) == 2:
layer_type, remainder = remainder
else:
return None, None, None
if remainder.replace(' ', '')[0] == '#':
remainder = remainder.split('\n', 1)[1]
layer_param_block, remainder = remainder.split('\n\n', 1)
layer_param_lines = layer_param_block.split('\n')[1:]
layer_name = str(self.layer_counter).zfill(3) + '_' + layer_type
layer_dict = dict(type=layer_type)
if layer_type in self.supported_layers:
for param_line in layer_param_lines:
if param_line[0] == '#':
continue
param_type, param_value = self._parse_params(param_line)
layer_dict[param_type] = param_value
self.layer_counter += 1
return layer_dict, layer_name, remainder
def _parse_params(self, param_line):
"""Identifies the parameters contained in one of the cfg file and returns
them in the required format for each parameter type, e.g. as a list, an int or a float.
Keyword argument:
param_line -- one parsed line within a layer block
"""
param_line = param_line.replace(' ', '')
param_type, param_value_raw = param_line.split('=')
param_value = None
if param_type == 'layers':
layer_indexes = list()
for index in param_value_raw.split(','):
layer_indexes.append(int(index))
param_value = layer_indexes
elif isinstance(param_value_raw, str) and not param_value_raw.isalpha():
condition_param_value_positive = param_value_raw.isdigit()
condition_param_value_negative = param_value_raw[0] == '-' and \
param_value_raw[1:].isdigit()
if condition_param_value_positive or condition_param_value_negative:
param_value = int(param_value_raw)
else:
param_value = float(param_value_raw)
else:
param_value = str(param_value_raw)
return param_type, param_value
class MajorNodeSpecs(object):
"""Helper class used to store the names of ONNX output names,
corresponding to the output of a DarkNet layer and its output channels.
Some DarkNet layers are not created and there is no corresponding ONNX node,
but we still need to track them in order to set up skip connections.
"""
def __init__(self, name, channels):
""" Initialize a MajorNodeSpecs object.
Keyword arguments:
name -- name of the ONNX node
channels -- number of output channels of this node
"""
self.name = name
self.channels = channels
self.created_onnx_node = False
if name is not None and isinstance(channels, int) and channels > 0:
self.created_onnx_node = True
class ConvParams(object):
"""Helper class to store the hyper parameters of a Conv layer,
including its prefix name in the ONNX graph and the expected dimensions
of weights for convolution, bias, and batch normalization.
Additionally acts as a wrapper for generating safe names for all
weights, checking on feasible combinations.
"""
def __init__(self, node_name, batch_normalize, conv_weight_dims):
"""Constructor based on the base node name (e.g. 101_convolutional), the batch
normalization setting, and the convolutional weights shape.
Keyword arguments:
node_name -- base name of this YOLO convolutional layer
batch_normalize -- bool value if batch normalization is used
conv_weight_dims -- the dimensions of this layer's convolutional weights
"""
self.node_name = node_name
self.batch_normalize = batch_normalize
assert len(conv_weight_dims) == 4
self.conv_weight_dims = conv_weight_dims
def generate_param_name(self, param_category, suffix):
"""Generates a name based on two string inputs,
and checks if the combination is valid."""
assert suffix
assert param_category in ['bn', 'conv']
assert(suffix in ['scale', 'mean', 'var', 'weights', 'bias'])
if param_category == 'bn':
assert self.batch_normalize
assert suffix in ['scale', 'bias', 'mean', 'var']
elif param_category == 'conv':
assert suffix in ['weights', 'bias']
if suffix == 'bias':
assert not self.batch_normalize
param_name = self.node_name + '_' + param_category + '_' + suffix
return param_name
class ResizeParams(object):
#Helper class to store the scale parameter for an Resize node.
def __init__(self, node_name, value):
"""Constructor based on the base node name (e.g. 86_Resize),
and the value of the scale input tensor.
Keyword arguments:
node_name -- base name of this YOLO Resize layer
value -- the value of the scale input to the Resize layer as numpy array
"""
self.node_name = node_name
self.value = value
def generate_param_name(self):
"""Generates the scale parameter name for the Resize node."""
param_name = self.node_name + '_' + "scale"
return param_name
def generate_roi_name(self):
"""Generates the roi input name for the Resize node."""
param_name = self.node_name + '_' + "roi"
return param_name
class WeightLoader(object):
"""Helper class used for loading the serialized weights of a binary file stream
and returning the initializers and the input tensors required for populating
the ONNX graph with weights.
"""
def __init__(self, weights_file_path):
"""Initialized with a path to the YOLOv3 .weights file.
Keyword argument:
weights_file_path -- path to the weights file.
"""
self.weights_file = self._open_weights_file(weights_file_path)
def load_resize_scales(self, resize_params):
"""Returns the initializers with the value of the scale input
tensor given by resize_params.
Keyword argument:
resize_params -- a ResizeParams object
"""
initializer = list()
inputs = list()
name = resize_params.generate_param_name()
shape = resize_params.value.shape
data = resize_params.value
scale_init = helper.make_tensor(
name, TensorProto.FLOAT, shape, data)
scale_input = helper.make_tensor_value_info(
name, TensorProto.FLOAT, shape)
initializer.append(scale_init)
inputs.append(scale_input)
# In opset 11 an additional input named roi is required. Create a dummy tensor to satisfy this.
# It is a 1D tensor of size of the rank of the input (4)
rank = 4
roi_name = resize_params.generate_roi_name()
roi_input = helper.make_tensor_value_info(roi_name, TensorProto.FLOAT, [rank])
roi_init = helper.make_tensor(roi_name, TensorProto.FLOAT, [rank], [0,0,0,0])
initializer.append(roi_init)
inputs.append(roi_input)
return initializer, inputs
def load_conv_weights(self, conv_params):
"""Returns the initializers with weights from the weights file and
the input tensors of a convolutional layer for all corresponding ONNX nodes.
Keyword argument:
conv_params -- a ConvParams object
"""
initializer = list()
inputs = list()
if conv_params.batch_normalize:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'bn', 'bias')
bn_scale_init, bn_scale_input = self._create_param_tensors(
conv_params, 'bn', 'scale')
bn_mean_init, bn_mean_input = self._create_param_tensors(
conv_params, 'bn', 'mean')
bn_var_init, bn_var_input = self._create_param_tensors(
conv_params, 'bn', 'var')
initializer.extend(
[bn_scale_init, bias_init, bn_mean_init, bn_var_init])
inputs.extend([bn_scale_input, bias_input,
bn_mean_input, bn_var_input])
else:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'conv', 'bias')
initializer.append(bias_init)
inputs.append(bias_input)
conv_init, conv_input = self._create_param_tensors(
conv_params, 'conv', 'weights')
initializer.append(conv_init)
inputs.append(conv_input)
return initializer, inputs
def _open_weights_file(self, weights_file_path):
"""Opens a YOLOv3 DarkNet file stream and skips the header.
Keyword argument:
weights_file_path -- path to the weights file.
"""
weights_file = open(weights_file_path, 'rb')
length_header = 5
np.ndarray(
shape=(length_header, ), dtype='int32', buffer=weights_file.read(
length_header * 4))
return weights_file
def _create_param_tensors(self, conv_params, param_category, suffix):
"""Creates the initializers with weights from the weights file together with
the input tensors.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name, param_data, param_data_shape = self._load_one_param_type(
conv_params, param_category, suffix)
initializer_tensor = helper.make_tensor(
param_name, TensorProto.FLOAT, param_data_shape, param_data)
input_tensor = helper.make_tensor_value_info(
param_name, TensorProto.FLOAT, param_data_shape)
return initializer_tensor, input_tensor
def _load_one_param_type(self, conv_params, param_category, suffix):
"""Deserializes the weights from a file stream in the DarkNet order.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name = conv_params.generate_param_name(param_category, suffix)
channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
if param_category == 'bn':
param_shape = [channels_out]
elif param_category == 'conv':
if suffix == 'weights':
param_shape = [channels_out, channels_in, filter_h, filter_w]
elif suffix == 'bias':
param_shape = [channels_out]
param_size = np.product(np.array(param_shape))
param_data = np.ndarray(
shape=param_shape,
dtype='float32',
buffer=self.weights_file.read(param_size * 4))
param_data = param_data.flatten().astype(float)
return param_name, param_data, param_shape
class GraphBuilderONNX(object):
"""Class for creating an ONNX graph from a previously generated list of layer dictionaries."""
def __init__(self, output_tensors):
"""Initialize with all DarkNet default parameters used creating YOLOv3,
and specify the output tensors as an OrderedDict for their output dimensions
with their names as keys.
Keyword argument:
output_tensors -- the output tensors as an OrderedDict containing the keys'
output dimensions
"""
self.output_tensors = output_tensors
self._nodes = list()
self.graph_def = None
self.input_tensor = None
self.epsilon_bn = 1e-5
self.momentum_bn = 0.99
self.alpha_lrelu = 0.1
self.param_dict = OrderedDict()
self.major_node_specs = list()
self.batch_size = 1
def build_onnx_graph(
self,
layer_configs,
weights_file_path,
verbose=True):
"""Iterate over all layer configs (parsed from the DarkNet representation
of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
file and return the graph definition.
Keyword arguments:
layer_configs -- an OrderedDict object with all parsed layers' configurations
weights_file_path -- location of the weights file
verbose -- toggles if the graph is printed after creation (default: True)
"""
for layer_name in layer_configs.keys():
layer_dict = layer_configs[layer_name]
major_node_specs = self._make_onnx_node(layer_name, layer_dict)
if major_node_specs.name is not None:
self.major_node_specs.append(major_node_specs)
outputs = list()
for tensor_name in self.output_tensors.keys():
output_dims = [self.batch_size, ] + \
self.output_tensors[tensor_name]
output_tensor = helper.make_tensor_value_info(
tensor_name, TensorProto.FLOAT, output_dims)
outputs.append(output_tensor)
inputs = [self.input_tensor]
weight_loader = WeightLoader(weights_file_path)
initializer = list()
# If a layer has parameters, add them to the initializer and input lists.
for layer_name in self.param_dict.keys():
_, layer_type = layer_name.split('_', 1)
params = self.param_dict[layer_name]
if layer_type == 'convolutional':
initializer_layer, inputs_layer = weight_loader.load_conv_weights(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
elif layer_type == "upsample":
initializer_layer, inputs_layer = weight_loader.load_resize_scales(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
del weight_loader
self.graph_def = helper.make_graph(
nodes=self._nodes,
name='YOLOv3-608',
inputs=inputs,
outputs=outputs,
initializer=initializer
)
if verbose:
print(helper.printable_graph(self.graph_def))
model_def = helper.make_model(self.graph_def,
producer_name='NVIDIA TensorRT sample')
return model_def
def _make_onnx_node(self, layer_name, layer_dict):
"""Take in a layer parameter dictionary, choose the correct function for
creating an ONNX node and store the information important to graph creation
as a MajorNodeSpec object.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
layer_type = layer_dict['type']
if self.input_tensor is None:
if layer_type == 'net':
major_node_output_name, major_node_output_channels = self._make_input_tensor(
layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
raise ValueError('The first node has to be of type "net".')
else:
node_creators = dict()
node_creators['convolutional'] = self._make_conv_node
node_creators['shortcut'] = self._make_shortcut_node
node_creators['route'] = self._make_route_node
node_creators['upsample'] = self._make_resize_node
if layer_type in node_creators.keys():
major_node_output_name, major_node_output_channels = \
node_creators[layer_type](layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
print(
'Layer of type %s not supported, skipping ONNX node generation.' %
layer_type)
major_node_specs = MajorNodeSpecs(layer_name,
None)
return major_node_specs
def _make_input_tensor(self, layer_name, layer_dict):
"""Create an ONNX input tensor from a 'net' layer and store the batch size.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
batch_size = layer_dict['batch']
channels = layer_dict['channels']
height = layer_dict['height']
width = layer_dict['width']
self.batch_size = batch_size
input_tensor = helper.make_tensor_value_info(
str(layer_name), TensorProto.FLOAT, [
batch_size, channels, height, width])
self.input_tensor = input_tensor
return layer_name, channels
def _get_previous_node_specs(self, target_index=-1):
"""Get a previously generated ONNX node (skip those that were not generated).
Target index can be passed for jumping to a specific index.
Keyword arguments:
target_index -- optional for jumping to a specific index (default: -1 for jumping
to previous element)
"""
previous_node = None
for node in self.major_node_specs[target_index::-1]:
if node.created_onnx_node:
previous_node = node
break
assert previous_node is not None
return previous_node
def _make_conv_node(self, layer_name, layer_dict):
"""Create an ONNX Conv node with optional batch normalization and
activation nodes.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
previous_channels = previous_node_specs.channels
kernel_size = layer_dict['size']
stride = layer_dict['stride']
filters = layer_dict['filters']
batch_normalize = False
if 'batch_normalize' in layer_dict.keys(
) and layer_dict['batch_normalize'] == 1:
batch_normalize = True
kernel_shape = [kernel_size, kernel_size]
weights_shape = [filters, previous_channels] + kernel_shape
conv_params = ConvParams(layer_name, batch_normalize, weights_shape)
strides = [stride, stride]
dilations = [1, 1]
weights_name = conv_params.generate_param_name('conv', 'weights')
inputs.append(weights_name)
if not batch_normalize:
bias_name = conv_params.generate_param_name('conv', 'bias')
inputs.append(bias_name)
conv_node = helper.make_node(
'Conv',
inputs=inputs,
outputs=[layer_name],
kernel_shape=kernel_shape,
strides=strides,
auto_pad='SAME_LOWER',
dilations=dilations,
name=layer_name
)
self._nodes.append(conv_node)
inputs = [layer_name]
layer_name_output = layer_name
if batch_normalize:
layer_name_bn = layer_name + '_bn'
bn_param_suffixes = ['scale', 'bias', 'mean', 'var']
for suffix in bn_param_suffixes:
bn_param_name = conv_params.generate_param_name('bn', suffix)
inputs.append(bn_param_name)
batchnorm_node = helper.make_node(
'BatchNormalization',
inputs=inputs,
outputs=[layer_name_bn],
epsilon=self.epsilon_bn,
momentum=self.momentum_bn,
name=layer_name_bn
)
self._nodes.append(batchnorm_node)
inputs = [layer_name_bn]
layer_name_output = layer_name_bn
if layer_dict['activation'] == 'leaky':
layer_name_lrelu = layer_name + '_lrelu'
lrelu_node = helper.make_node(
'LeakyRelu',
inputs=inputs,
outputs=[layer_name_lrelu],
name=layer_name_lrelu,
alpha=self.alpha_lrelu
)
self._nodes.append(lrelu_node)
inputs = [layer_name_lrelu]
layer_name_output = layer_name_lrelu
elif layer_dict['activation'] == 'linear':
pass
else:
print('Activation not supported.')
self.param_dict[layer_name] = conv_params
return layer_name_output, filters
def _make_shortcut_node(self, layer_name, layer_dict):
"""Create an ONNX Add node with the shortcut properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
shortcut_index = layer_dict['from']
activation = layer_dict['activation']
assert activation == 'linear'
first_node_specs = self._get_previous_node_specs()
second_node_specs = self._get_previous_node_specs(
target_index=shortcut_index)
assert first_node_specs.channels == second_node_specs.channels
channels = first_node_specs.channels
inputs = [first_node_specs.name, second_node_specs.name]
shortcut_node = helper.make_node(
'Add',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(shortcut_node)
return layer_name, channels
def _make_route_node(self, layer_name, layer_dict):
"""If the 'layers' parameter from the DarkNet configuration is only one index, continue
node creation at the indicated (negative) index. Otherwise, create an ONNX Concat node
with the route properties from the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
route_node_indexes = layer_dict['layers']
if len(route_node_indexes) == 1:
split_index = route_node_indexes[0]
assert split_index < 0
# Increment by one because we skipped the YOLO layer:
split_index += 1
self.major_node_specs = self.major_node_specs[:split_index]
layer_name = None
channels = None
else:
inputs = list()
channels = 0
for index in route_node_indexes:
if index > 0:
# Increment by one because we count the input as a node (DarkNet
# does not)
index += 1
route_node_specs = self._get_previous_node_specs(
target_index=index)
inputs.append(route_node_specs.name)
channels += route_node_specs.channels
assert inputs
assert channels > 0
route_node = helper.make_node(
'Concat',
axis=1,
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(route_node)
return layer_name, channels
def _make_resize_node(self, layer_name, layer_dict):
"""Create an ONNX Resize node with the properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
resize_scale_factors = float(layer_dict['stride'])
# Create the scale factor array with node parameters
scales=np.array([1.0, 1.0, resize_scale_factors, resize_scale_factors]).astype(np.float32)
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
channels = previous_node_specs.channels
assert channels > 0
resize_params = ResizeParams(layer_name, scales)
# roi input is the second input, so append it before scales
roi_name = resize_params.generate_roi_name()
inputs.append(roi_name)
scales_name = resize_params.generate_param_name()
inputs.append(scales_name)
resize_node = helper.make_node(
'Resize',
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='floor',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(resize_node)
self.param_dict[layer_name] = resize_params
return layer_name, channels
def main():
"""Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
cfg_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.cfg')
# These are the only layers DarkNetParser will extract parameters from. The three layers of
# type 'yolo' are not parsed in detail because they are included in the post-processing later:
supported_layers = ['net', 'convolutional', 'shortcut',
'route', 'upsample']
# Create a DarkNetParser object, and the use it to generate an OrderedDict with all
# layer's configs from the cfg file:
parser = DarkNetParser(supported_layers)
layer_configs = parser.parse_cfg_file(cfg_file_path)
# We do not need the parser anymore after we got layer_configs:
del parser
# In above layer_config, there are three outputs that we need to know the output
# shape of (in CHW format):
output_tensor_dims = OrderedDict()
output_tensor_dims['082_convolutional'] = [255, 19, 19]
output_tensor_dims['094_convolutional'] = [255, 38, 38]
output_tensor_dims['106_convolutional'] = [255, 76, 76]
# Create a GraphBuilderONNX object with the known output tensor dimensions:
builder = GraphBuilderONNX(output_tensor_dims)
weights_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.weights')
# Now generate an ONNX graph with weights from the previously parsed layer configurations
# and the weights file:
yolov3_model_def = builder.build_onnx_graph(
layer_configs=layer_configs,
weights_file_path=weights_file_path,
verbose=True)
# Once we have the model definition, we do not need the builder anymore:
del builder
# Perform a sanity check on the ONNX model definition:
onnx.checker.check_model(yolov3_model_def)
# Serialize the generated ONNX graph to this file:
output_file_path = 'yolov3.onnx'
onnx.save(yolov3_model_def, output_file_path)
if __name__ == '__main__':
main()
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "box"
_path_str = "box.marker"
_valid_props = {"color", "line", "opacity", "outliercolor", "size", "symbol"}
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.box.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
outliercolor
Sets the border line color of the outlier
sample points. Defaults to marker.color
outlierwidth
Sets the border line width (in px) of the
outlier sample points.
width
Sets the width (in px) of the lines bounding
the marker points.
Returns
-------
plotly.graph_objs.box.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# outliercolor
# ------------
@property
def outliercolor(self):
"""
Sets the color of the outlier sample points.
The 'outliercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outliercolor"]
@outliercolor.setter
def outliercolor(self, val):
self["outliercolor"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
Returns
-------
Any
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.box.marker.Line` instance
or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
"""
def __init__(
self,
arg=None,
color=None,
line=None,
opacity=None,
outliercolor=None,
size=None,
symbol=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Marker`
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.box.marker.Line` instance
or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.box.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("outliercolor", None)
_v = outliercolor if outliercolor is not None else _v
if _v is not None:
self["outliercolor"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
# -*- coding: utf-8 -*-
"""Node grouping utilities."""
from collections import defaultdict
from typing import Callable, Iterable, List, Mapping, Optional, Set, TypeVar
from pybel import BELGraph, BaseEntity
from pybel.constants import ANNOTATIONS, HAS_VARIANT, IS_A, ORTHOLOGOUS, PART_OF, RELATION
from pybel.dsl import BaseConcept
from pybel.struct.filters import concatenate_node_predicates
from pybel.struct.filters.edge_predicates import edge_has_annotation
from pybel.struct.filters.typing import NodePredicates
from ..utils import group_as_sets
__all__ = [
'group_nodes_by_annotation',
'average_node_annotation',
'group_nodes_by_annotation_filtered',
'get_mapped_nodes',
]
X = TypeVar('X')
def group_nodes_by_annotation(
graph: BELGraph,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation."""
result = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
result[d[ANNOTATIONS][annotation]].add(u)
result[d[ANNOTATIONS][annotation]].add(v)
return dict(result)
def average_node_annotation(
graph: BELGraph,
key: str,
annotation: str = 'Subgraph',
aggregator: Optional[Callable[[Iterable[X]], X]] = None,
) -> Mapping[str, X]:
"""Group a graph into sub-graphs and calculate an aggregate score for all nodes in each.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data
:param annotation: A BEL annotation to use to group nodes
:param aggregator: A function from list of values -> aggregate value. Defaults to taking the average of a list of
floats.
"""
if aggregator is None:
def aggregator(x: List[float]) -> float:
"""Calculate the average."""
return sum(x) / len(x)
grouped_nodes = group_nodes_by_annotation(graph, annotation)
return {
subgraph: aggregator([
graph.nodes[node][key]
for node in nodes
if key in graph.nodes[node]
])
for subgraph, nodes in grouped_nodes.items()
}
def group_nodes_by_annotation_filtered(
graph: BELGraph,
node_predicates: NodePredicates,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes}
"""
if node_predicates is None:
raise ValueError('Just use group_nodes_by_annotation() if you do not have a node filter')
node_predicate = concatenate_node_predicates(node_predicates)
return {
key: {
node
for node in nodes
if node_predicate(graph, node)
}
for key, nodes in group_nodes_by_annotation(graph, annotation).items()
}
def get_mapped_nodes(
graph: BELGraph,
namespace: str,
names: Iterable[str],
) -> Mapping[BaseEntity, Set[BaseEntity]]:
"""Get the nodes mapped to this node's concept.
Returns a dict with keys: nodes that match the namespace and in
names and values other nodes (complexes, variants, orthologous...)
or this node.
:param graph: A BEL graph
:param namespace: The namespace to search
:param names: List or set of values from which we want to map nodes from
:return: Main node to variants/groups.
"""
names = {n.lower() for n in names}
namespace = namespace.lower()
return group_as_sets(
(parent_node, mapped_node)
for parent_node, mapped_node, d in graph.edges(data=True)
if (
isinstance(parent_node, BaseConcept)
and parent_node.namespace.lower() == namespace
and parent_node.name.lower() in names
and d[RELATION] in {IS_A, PART_OF, HAS_VARIANT, ORTHOLOGOUS}
)
)
|
"""
This file offers the methods to automatically retrieve the graph Yersinia pestis CO92.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:01:41.210736
The undirected graph Yersinia pestis CO92 has 3943 nodes and 448508 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05771 and has 9 connected components, where the component with most
nodes has 3926 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 209, the mean node degree is 227.50, and
the node degree mode is 1. The top 5 most central nodes are 214092.YPO1910
(degree 1531), 214092.YPO3381 (degree 1387), 214092.YPO0017 (degree 1373),
214092.YPO2870 (degree 1260) and 214092.YPO0256 (degree 1122).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import YersiniaPestisCo92
# Then load the graph
graph = YersiniaPestisCo92()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def YersiniaPestisCo92(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Yersinia pestis CO92 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Yersinia pestis CO92 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:01:41.210736
The undirected graph Yersinia pestis CO92 has 3943 nodes and 448508 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.05771 and has 9 connected components, where the component with most
nodes has 3926 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 209, the mean node degree is 227.50, and
the node degree mode is 1. The top 5 most central nodes are 214092.YPO1910
(degree 1531), 214092.YPO3381 (degree 1387), 214092.YPO0017 (degree 1373),
214092.YPO2870 (degree 1260) and 214092.YPO0256 (degree 1122).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import YersiniaPestisCo92
# Then load the graph
graph = YersiniaPestisCo92()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="YersiniaPestisCo92",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
import string
def z_array(s):
# NOTE:
# Use Z algorithm (Gusfield theorem 1.4.1) to preprocess s.
assert len(s) > 1
z = [len(s)] + [0] * (len(s) - 1)
# Initial comparison for s[1:] with prefix
for i in range(1, len(s)):
if s[i] == s[i - 1]:
z[1] += 1
else:
break
r, l = 0, 0
if z[l] > 0:
r, l = z[1], 1
for k in range(2, len(s)):
assert z[k] == 0
if k > r:
# Case 1
for i in range(k, len(s)):
if s[i] == s[i - k]:
z[k] += 1
else:
break
r, l = k + z[k] - 1, k
else:
# Case 2
# Calculate length of Beta
nbeta = r - k + 1
zkp = z[k - l]
if nbeta > zkp:
# Case 2a: Zkp wins
z[k] = zkp
else:
# Case 2b: Compare characters just past r
nmatch = 0
for i in range(r + 1, len(s)):
if s[i] == s[i - k]:
nmatch += 1
else:
break
l, r = k, r + nmatch
z[k] = r - k + 1
return z
def n_array(s):
# NOTE:
# Compile the N array (Gusfield theorem 2.2.2) from the z array.
return z_array(s[::-1])[::-1]
def big_l_prime_array(p, n):
# NOTE:
# Compile the N array (Gusfield theorem 2.2.2) using p and N array.
# L'[i] = largest index j less than n such that N[j] = |P[i:]|
lp = [0] * len(p)
for j in range(len(p) - 1):
i = len(n) - n[j]
if i < len(p):
lp[i] = j + 1
return lp
def big_l_array(p, lp):
# NOTE:
# Compile L array (Gusfield theorem 2.2.2) using p and L' array.
# L[i] = largest index j less than n such that N[j] >= |P[i:]|
l = [0] * len(p)
l[1] = lp[1]
for i in range(2, len(p)):
l[i] = max(l[i - 1], lp[i])
return l
def small_l_prime_array(n):
# NOTE:
# Compile lp' array (Gusfield theorem 2.2.4) using N array.
small_lp = [0] * len(n)
for i in range(len(n)):
if n[i] == i + 1: # Prefix matching a suffix
small_lp[len(n) - i - 1] = i + 1
for i in range(len(n) - 2, -1, -1): # "Smear" them out to the left
if small_lp[i] == 0:
small_lp[i] = small_lp[i + 1]
return small_lp
def good_suffix_table(p):
# NOTE:
# Return table needed to apply good suffix rule.
n = n_array(p)
lp = big_l_prime_array(p, n)
return lp, big_l_array(p, lp), small_l_prime_array(n)
def good_suffix_mismatch(i, big_l_prime, small_l_prime):
# NOTE:
# Given a mismatch at offset i, and given L/L' and l' arrays,
# return amount to shift as determined by good suffix rule.
length = len(big_l_prime)
assert i < length
if i == length - 1:
return 0
i += 1 # Points to leftmost matching position of P
if big_l_prime[i] > 0:
return length - big_l_prime[i]
return length - small_l_prime[i]
def good_suffix_match(small_l_prime):
# NOTE:
# Given a full match of P to T, return amount to shift as determined by
# good suffix rule.
return len(small_l_prime) - small_l_prime[1]
def dense_bad_char_tab(p, amap):
# NOTE:
# Given pattern string and list with ordered alphabet characters, create
# and return a dense bad character table. Table is indexed by offset then
# by character.
tab = []
nxt = [0] * len(amap)
for i in range(0, len(p)):
c = p[i]
assert c in amap
tab.append(nxt[:])
nxt[amap[c]] = i + 1
return tab
class BoyerMoore(object):
# NOTE:
# Encapsulates pattern and associated Boyer-Moore preprocessing.
def __init__(self, p, alphabet="ACGT"):
self.p = p
self.alphabet = alphabet
# Create map from alphabet characters to integers
self.amap = {}
for i in range(len(self.alphabet)):
self.amap[self.alphabet[i]] = i
# Make bad character rule table
self.bad_char = dense_bad_char_tab(p, self.amap)
# Create good suffix rule table
_, self.big_l, self.small_l_prime = good_suffix_table(p)
def bad_character_rule(self, i, c):
# NOTE:
# Return number of skips given by bad character rule at offset i.
assert c in self.amap
ci = self.amap[c]
assert i > (self.bad_char[i][ci] - 1)
return i - (self.bad_char[i][ci] - 1)
def good_suffix_rule(self, i):
# NOTE:
# Given a mismatch at offset i, return amount to shift as determined
# by (weak) good suffix rule.
length = len(self.big_l)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if self.big_l[i] > 0:
return length - self.big_l[i]
return length - self.small_l_prime[i]
def match_skip(self):
# NOTE:
# Return amount to shift in case where P matches T
return len(self.small_l_prime) - self.small_l_prime[1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.