hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a165d7edf0cd5d351db2dc85677d2a528c744d9
| 1,058
|
py
|
Python
|
util/tools.py
|
damogranlabs/classy_blocks
|
5f23cc2d391c6c7487045b1ce9a01ad3c5ea0b55
|
[
"MIT"
] | 32
|
2020-06-16T11:06:11.000Z
|
2022-03-19T14:31:24.000Z
|
util/tools.py
|
damogranlabs/classy_blocks
|
5f23cc2d391c6c7487045b1ce9a01ad3c5ea0b55
|
[
"MIT"
] | 12
|
2020-12-04T08:24:23.000Z
|
2022-03-21T06:40:33.000Z
|
util/tools.py
|
damogranlabs/classy_blocks
|
5f23cc2d391c6c7487045b1ce9a01ad3c5ea0b55
|
[
"MIT"
] | 12
|
2020-11-19T16:18:18.000Z
|
2022-03-03T08:37:35.000Z
|
import jinja2
import numpy as np
# mesh utils
def template_to_dict(template_path, dict_path, context):
""" renders template with context to product a dictionary (or anything else) """
template_file = open(template_path, "r")
template_text = template_file.read()
template_file.close()
template = jinja2.Template(template_text)
mesh_file = open(dict_path, "w")
mesh_file.write(template.render(context))
mesh_file.close()
def get_count(length, start_thickness, cell_expansion_ratio, tol=1e-6):
""" returns the number of cells required to fill 'length' with cells
of specified start thickness and cell-to-cell expansion ratio """
if abs(cell_expansion_ratio - 1) > tol:
c = np.log(1- length/start_thickness*(1-cell_expansion_ratio))/np.log(cell_expansion_ratio)
else:
c = length/start_thickness
return int(c) + 1
def get_ratio(count, cell_expansion_ratio):
if count <= 1:
raise ValueError("Cell count must be greater than 1")
return cell_expansion_ratio**(count-1)
| 35.266667
| 99
| 0.710775
|
4a165e45c68e8becdae59b5f3ef5e9739ddb0fac
| 145
|
py
|
Python
|
pyplotlm/tools.py
|
esmondhkchu/pyplotlm
|
23de6f133ef792588964aaa45f08e06dee2e9ff8
|
[
"MIT"
] | null | null | null |
pyplotlm/tools.py
|
esmondhkchu/pyplotlm
|
23de6f133ef792588964aaa45f08e06dee2e9ff8
|
[
"MIT"
] | null | null | null |
pyplotlm/tools.py
|
esmondhkchu/pyplotlm
|
23de6f133ef792588964aaa45f08e06dee2e9ff8
|
[
"MIT"
] | null | null | null |
class Error(Exception):
""" base class
"""
pass
class DimensionError(Error):
""" raise when dimension mismatch
"""
pass
| 14.5
| 37
| 0.593103
|
4a165e86577f180dae63bb1ba4ab53126ee6e3a6
| 17,953
|
py
|
Python
|
yt/frontends/ramses/field_handlers.py
|
tukss/yt
|
8bf6fce609cad3d4b291ebd94667019ab2e18377
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/ramses/field_handlers.py
|
tukss/yt
|
8bf6fce609cad3d4b291ebd94667019ab2e18377
|
[
"BSD-3-Clause-Clear"
] | 8
|
2020-04-02T16:51:49.000Z
|
2022-01-11T14:12:44.000Z
|
yt/frontends/ramses/field_handlers.py
|
tukss/yt
|
8bf6fce609cad3d4b291ebd94667019ab2e18377
|
[
"BSD-3-Clause-Clear"
] | 2
|
2020-08-12T15:46:11.000Z
|
2021-02-09T13:09:17.000Z
|
import abc
import glob
import os
from yt.config import ytcfg
from yt.funcs import mylog
from yt.utilities.cython_fortran_utils import FortranFile
from .io import _read_fluid_file_descriptor
from .io_utils import read_offset
FIELD_HANDLERS = set()
def get_field_handlers():
return FIELD_HANDLERS
def register_field_handler(ph):
FIELD_HANDLERS.add(ph)
DETECTED_FIELDS = {}
class HandlerMixin:
"""This contains all the shared methods to handle RAMSES files.
This is not supposed to be user-facing.
"""
def setup_handler(self, domain):
"""
Initalize an instance of the class. This automatically sets
the full path to the file. This is not intended to be
overriden in most cases.
If you need more flexibility, rewrite this function to your
need in the inherited class.
"""
self.ds = ds = domain.ds
self.domain = domain
self.domain_id = domain.domain_id
basename = os.path.abspath(ds.root_folder)
iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1])
if ds.num_groups > 0:
igroup = ((domain.domain_id - 1) // ds.group_size) + 1
full_path = os.path.join(
basename,
f"group_{igroup:05d}",
self.fname.format(iout=iout, icpu=domain.domain_id),
)
else:
full_path = os.path.join(
basename, self.fname.format(iout=iout, icpu=domain.domain_id)
)
if os.path.exists(full_path):
self.fname = full_path
else:
raise FileNotFoundError(
f"Could not find {self._file_type} file (type: {self.ftype}). "
f"Tried {full_path}"
)
if self.file_descriptor is not None:
if ds.num_groups > 0:
# The particle file descriptor is *only* in the first group
self.file_descriptor = os.path.join(
basename, "group_00001", self.file_descriptor
)
else:
self.file_descriptor = os.path.join(basename, self.file_descriptor)
@property
def exists(self):
"""
This function should return True if the *file* the instance
exists. It is called for each file of the type found on the
disk.
By default, it just returns whether the file exists. Override
it for more complex cases.
"""
return os.path.exists(self.fname)
@property
def has_descriptor(self):
"""
This function should return True if a *file descriptor*
exists.
By default, it just returns whether the file exists. Override
it for more complex cases.
"""
return os.path.exists(self.file_descriptor)
@classmethod
def any_exist(cls, ds):
"""
This function should return True if the kind of particle
represented by the class exists in the dataset. It takes as
argument the class itself -not an instance- and a dataset.
Arguments
---------
* ds: a Ramses Dataset
Note
----
This function is usually called once at the initialization of
the RAMSES Dataset structure to determine if the particle type
(e.g. regular particles) exists.
"""
if ds.unique_identifier in cls._unique_registry:
return cls._unique_registry[ds.unique_identifier]
iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1])
fname = os.path.join(
os.path.split(ds.parameter_filename)[0], cls.fname.format(iout=iout, icpu=1)
)
exists = os.path.exists(fname)
cls._unique_registry[ds.unique_identifier] = exists
return exists
class FieldFileHandler(abc.ABC, HandlerMixin):
"""
Abstract class to handle particles in RAMSES. Each instance
represents a single file (one domain).
To add support to a new particle file, inherit from this class and
implement all functions containing a `NotImplementedError`.
See `SinkParticleFileHandler` for an example implementation."""
_file_type = "field"
# These properties are static properties
ftype = None # The name to give to the field type
fname = None # The name of the file(s)
attrs = None # The attributes of the header
known_fields = None # A list of tuple containing the field name and its type
config_field = None # Name of the config section (if any)
file_descriptor = None # The name of the file descriptor (if any)
# These properties are computed dynamically
field_offsets = None # Mapping from field to offset in file
field_types = (
None # Mapping from field to the type of the data (float, integer, ...)
)
def __init_subclass__(cls, *args, **kwargs):
"""
Registers subclasses at creation.
"""
super().__init_subclass__(*args, **kwargs)
if cls.ftype is not None:
register_field_handler(cls)
cls._unique_registry = {}
return cls
def __init__(self, domain):
self.setup_handler(domain)
@classmethod
@abc.abstractmethod
def detect_fields(cls, ds):
"""
Called once to setup the fields of this type
It should set the following static variables:
* parameters: dictionary
Dictionary containing the variables. The keys should match
those of `cls.attrs`
* field_list: list of (ftype, fname)
The list of the field present in the file
"""
pass
@classmethod
def get_detected_fields(cls, ds):
"""
Get the detected fields from the registry.
"""
if ds.unique_identifier in DETECTED_FIELDS:
d = DETECTED_FIELDS[ds.unique_identifier]
if cls.ftype in d:
return d[cls.ftype]
return None
@classmethod
def set_detected_fields(cls, ds, fields):
"""
Store the detected fields into the registry.
"""
if ds.unique_identifier not in DETECTED_FIELDS:
DETECTED_FIELDS[ds.unique_identifier] = {}
DETECTED_FIELDS[ds.unique_identifier].update({cls.ftype: fields})
@classmethod
def purge_detected_fields(cls, ds):
"""
Purge the registry.
This should be called on dataset creation to force the field
detection to be called.
"""
if ds.unique_identifier in DETECTED_FIELDS:
DETECTED_FIELDS.pop(ds.unique_identifier)
@property
def level_count(self):
"""
Return the number of cells per level.
"""
if getattr(self, "_level_count", None) is not None:
return self._level_count
self.offset
return self._level_count
@property
def offset(self):
"""
Compute the offsets of the fields.
By default, it skips the header (as defined by `cls.attrs`)
and computes the offset at each level.
It should be generic enough for most of the cases, but if the
*structure* of your fluid file is non-canonical, change this.
"""
if getattr(self, "_offset", None) is not None:
return self._offset
with FortranFile(self.fname) as fd:
# Skip headers
nskip = len(self.attrs)
fd.skip(nskip)
min_level = self.domain.ds.min_level
offset, level_count = read_offset(
fd,
min_level,
self.domain.domain_id,
self.parameters["nvar"],
self.domain.amr_header,
)
self._offset = offset
self._level_count = level_count
return self._offset
class HydroFieldFileHandler(FieldFileHandler):
ftype = "ramses"
fname = "hydro_{iout:05d}.out{icpu:05d}"
file_descriptor = "hydro_file_descriptor.txt"
config_field = "ramses-hydro"
attrs = (
("ncpu", 1, "i"),
("nvar", 1, "i"),
("ndim", 1, "i"),
("nlevelmax", 1, "i"),
("nboundary", 1, "i"),
("gamma", 1, "d"),
)
@classmethod
def detect_fields(cls, ds):
# Try to get the detected fields
detected_fields = cls.get_detected_fields(ds)
if detected_fields:
return detected_fields
num = os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]
testdomain = 1 # Just pick the first domain file to read
basepath = os.path.abspath(os.path.dirname(ds.parameter_filename))
basename = "%s/%%s_%s.out%05i" % (basepath, num, testdomain)
fname = basename % "hydro"
fname_desc = os.path.join(basepath, cls.file_descriptor)
attrs = cls.attrs
with FortranFile(fname) as fd:
hvals = fd.read_attrs(attrs)
cls.parameters = hvals
# Store some metadata
ds.gamma = hvals["gamma"]
nvar = hvals["nvar"]
ok = False
# Either the fields are given by dataset
if ds._fields_in_file is not None:
fields = list(ds._fields_in_file)
ok = True
elif os.path.exists(fname_desc):
# Or there is an hydro file descriptor
mylog.debug("Reading hydro file descriptor.")
# For now, we can only read double precision fields
fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]
# We get no fields for old-style hydro file descriptor
ok = len(fields) > 0
elif cls.config_field and ytcfg.has_section(cls.config_field):
# Or this is given by the config
cfg = ytcfg.get(cls.config_field, "fields")
known_fields = []
for field in (_.strip() for _ in cfg.split("\n") if _.strip() != ""):
known_fields.append(field.strip())
fields = known_fields
ok = True
# Else, attempt autodetection
if not ok:
foldername = os.path.abspath(os.path.dirname(ds.parameter_filename))
rt_flag = any(glob.glob(os.sep.join([foldername, "info_rt_*.txt"])))
if rt_flag: # rt run
if nvar < 10:
mylog.info("Detected RAMSES-RT file WITHOUT IR trapping.")
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"Pressure",
"Metallicity",
"HII",
"HeII",
"HeIII",
]
else:
mylog.info("Detected RAMSES-RT file WITH IR trapping.")
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"Pres_IR",
"Pressure",
"Metallicity",
"HII",
"HeII",
"HeIII",
]
else:
if nvar < 5:
mylog.debug(
"nvar=%s is too small! YT doesn't currently "
"support 1D/2D runs in RAMSES %s"
)
raise ValueError
# Basic hydro runs
if nvar == 5:
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"Pressure",
]
if nvar > 5 and nvar < 11:
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"Pressure",
"Metallicity",
]
# MHD runs - NOTE:
# THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
if nvar == 11:
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"B_x_left",
"B_y_left",
"B_z_left",
"B_x_right",
"B_y_right",
"B_z_right",
"Pressure",
]
if nvar > 11:
fields = [
"Density",
"x-velocity",
"y-velocity",
"z-velocity",
"B_x_left",
"B_y_left",
"B_z_left",
"B_x_right",
"B_y_right",
"B_z_right",
"Pressure",
"Metallicity",
]
mylog.debug(
"No fields specified by user; automatically setting fields array to %s",
fields,
)
# Allow some wiggle room for users to add too many variables
count_extra = 0
while len(fields) < nvar:
fields.append("var" + str(len(fields)))
count_extra += 1
if count_extra > 0:
mylog.debug("Detected %s extra fluid fields.", count_extra)
cls.field_list = [(cls.ftype, e) for e in fields]
cls.set_detected_fields(ds, fields)
return fields
class GravFieldFileHandler(FieldFileHandler):
ftype = "gravity"
fname = "grav_{iout:05d}.out{icpu:05d}"
config_field = "ramses-grav"
attrs = (
("ncpu", 1, "i"),
("nvar", 1, "i"),
("nlevelmax", 1, "i"),
("nboundary", 1, "i"),
)
@classmethod
def detect_fields(cls, ds):
ndim = ds.dimensionality
iout = int(str(ds).split("_")[1])
basedir = os.path.split(ds.parameter_filename)[0]
fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
with FortranFile(fname) as fd:
cls.parameters = fd.read_attrs(cls.attrs)
nvar = cls.parameters["nvar"]
ndim = ds.dimensionality
if nvar == ndim + 1:
fields = ["potential"] + [f"{k}-acceleration" for k in "xyz"[:ndim]]
ndetected = ndim
else:
fields = [f"{k}-acceleration" for k in "xyz"[:ndim]]
ndetected = ndim
if ndetected != nvar and not ds._warned_extra_fields["gravity"]:
mylog.warning("Detected %s extra gravity fields.", nvar - ndetected)
ds._warned_extra_fields["gravity"] = True
for i in range(nvar - ndetected):
fields.append(f"var{i}")
cls.field_list = [(cls.ftype, e) for e in fields]
return fields
class RTFieldFileHandler(FieldFileHandler):
ftype = "ramses-rt"
fname = "rt_{iout:05d}.out{icpu:05d}"
config_field = "ramses-rt"
attrs = (
("ncpu", 1, "i"),
("nvar", 1, "i"),
("ndim", 1, "i"),
("nlevelmax", 1, "i"),
("nboundary", 1, "i"),
("gamma", 1, "d"),
)
@classmethod
def detect_fields(cls, ds):
# Try to get the detected fields
detected_fields = cls.get_detected_fields(ds)
if detected_fields:
return detected_fields
fname = ds.parameter_filename.replace("info_", "info_rt_")
rheader = {}
def read_rhs(cast):
line = f.readline()
p, v = line.split("=")
rheader[p.strip()] = cast(v)
with open(fname, "r") as f:
# Read nRTvar, nions, ngroups, iions
for _ in range(4):
read_rhs(int)
f.readline()
# Read X and Y fractions
for _ in range(2):
read_rhs(float)
f.readline()
# Reat unit_np, unit_pfd
for _ in range(2):
read_rhs(float)
# Read rt_c_frac
# Note: when using variable speed of light, this line will contain multiple
# values corresponding the the velocity at each level
read_rhs(lambda line: [float(e) for e in line.split()])
f.readline()
# Read n star, t2star, g_star
for _ in range(3):
read_rhs(float)
# Touchy part, we have to read the photon group properties
mylog.debug("Not reading photon group properties")
cls.rt_parameters = rheader
ngroups = rheader["nGroups"]
iout = int(str(ds).split("_")[1])
basedir = os.path.split(ds.parameter_filename)[0]
fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
with FortranFile(fname) as fd:
cls.parameters = fd.read_attrs(cls.attrs)
fields = []
for ng in range(ngroups):
tmp = [
"Photon_density_%s",
"Photon_flux_x_%s",
"Photon_flux_y_%s",
"Photon_flux_z_%s",
]
fields.extend([t % (ng + 1) for t in tmp])
cls.field_list = [(cls.ftype, e) for e in fields]
cls.set_detected_fields(ds, fields)
return fields
@classmethod
def get_rt_parameters(cls, ds):
if cls.rt_parameters:
return cls.rt_parameters
# Call detect fields to get the rt_parameters
cls.detect_fields(ds)
return cls.rt_parameters
| 31.331588
| 88
| 0.526709
|
4a165f815b17b7bc7a2911f9d2de8c31b3135a71
| 5,375
|
py
|
Python
|
onegeo_api/utils.py
|
neogeo-technologies/onegeo-api
|
a6b1801f691098b8213f0c06fd9e3e374c4cf4a0
|
[
"Apache-2.0"
] | null | null | null |
onegeo_api/utils.py
|
neogeo-technologies/onegeo-api
|
a6b1801f691098b8213f0c06fd9e3e374c4cf4a0
|
[
"Apache-2.0"
] | null | null | null |
onegeo_api/utils.py
|
neogeo-technologies/onegeo-api
|
a6b1801f691098b8213f0c06fd9e3e374c4cf4a0
|
[
"Apache-2.0"
] | 1
|
2019-10-18T13:52:03.000Z
|
2019-10-18T13:52:03.000Z
|
# Copyright (c) 2017-2019 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from base64 import b64decode
from collections import deque
from collections import Mapping
from django.contrib.auth import authenticate
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from functools import wraps
from numbers import Number
from onegeo_api.exceptions import ConflictError
from pathlib import Path
import sys
class HttpResponseSeeOther(HttpResponseRedirect):
status_code = 303
class BasicAuth(object):
def view_or_basicauth(self, view, request, test_func, *args, **kwargs):
http_auth = request.META.get('HTTP_AUTHORIZATION', '')
if http_auth not in ('', None):
auth = http_auth.split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
try:
username, password = b64decode(
auth[1]).decode('utf-8').split(':')
except Exception:
pass
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
request.user = user
return view(*args, **kwargs)
return HttpResponse(status=401)
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
request = None
args = list(args)
for arg in args:
if isinstance(arg, WSGIRequest):
request = arg
break
return self.view_or_basicauth(
f, request, lambda u: u.is_authenticated(), *args, **kwargs)
return wrapper
class Singleton(type):
__instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super().__call__(*args, **kwargs)
return cls.__instances[cls]
def clean_my_obj(obj):
if isinstance(obj, (list, tuple, set)):
return type(obj)(clean_my_obj(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)(
(clean_my_obj(k), clean_my_obj(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def merge_two_objs(obj1, obj2, path=None):
"""Merge 'obj1' to 'obj2'."""
if path is None:
path = []
for k in obj2:
if k in obj1:
if isinstance(obj1[k], dict) and isinstance(obj2[k], dict):
merge_two_objs(obj1[k], obj2[k], path + [str(k)])
elif obj1[k] == obj2[k]:
pass
else:
if isinstance(obj2[k], str):
desc = "value '{}' is ambiguous".format(obj2[k])
else:
desc = "values {} are ambiguous".format(
', '.join(["'{}'".format(v) for v
in (set(obj2[k]) - set(obj1[k]))]))
raise ConflictError(
"Conflict error at path: '{0}.{1}': {2}".format(
'.'.join(path), str(k), desc))
else:
obj1[k] = obj2[k]
return obj1
def subdirectories(root):
p = Path(root)
if not p.exists():
raise ConnectionError('Given path does not exist.')
return [x.as_uri() for x in p.iterdir() if x.is_dir()]
def estimate_size(obj):
"""Recursively iterate to sum size of object."""
done = []
def inner(sub):
if id(sub) in done:
return 0
sizeof = sys.getsizeof(sub)
if isinstance(sub, (str, bytes, Number, range, bytearray)):
pass # bypass remaining control flow and return
elif isinstance(sub, (tuple, list, set, deque)):
sizeof += sum(inner(i) for i in sub)
elif isinstance(sub, Mapping) or hasattr(sub, 'items'):
sizeof += sum(inner(k) + inner(v) for k, v in getattr(sub, 'items')())
# Check for custom object instances - may subclass above too
if hasattr(sub, '__dict__'):
sizeof += inner(vars(sub))
if hasattr(sub, '__slots__'): # can have __slots__ with __dict__
sizeof += sum(inner(getattr(sub, s)) for s in sub.__slots__ if hasattr(sub, s))
done.append(id(obj))
return sizeof
return inner(obj)
def pagination_handler(f):
@wraps(f)
def wrapper(*args, **kwargs):
x = kwargs.pop('page_number', None)
y = kwargs.pop('page_size', None)
if isinstance(x, int) and isinstance(y, int) and x > 0 and y > 0:
i = (x * y) - y
j = i + y
kwargs.update({'i': i, 'j': j})
return f(*args, **kwargs)
return wrapper
| 32.77439
| 91
| 0.571721
|
4a1660f90d1db1bfaa16dfddf4a5ddf2f913db43
| 2,254
|
bzl
|
Python
|
config/config.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 28
|
2020-08-11T16:22:46.000Z
|
2022-03-04T15:41:52.000Z
|
config/config.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 137
|
2020-08-11T16:07:49.000Z
|
2022-02-27T10:59:05.000Z
|
config/config.bzl
|
zeroxoneb/antlir
|
811d88965610d16a5c85d831d317f087797ca732
|
[
"MIT"
] | 10
|
2020-09-10T00:01:28.000Z
|
2022-03-08T18:00:28.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This dict is imported into `//antlir/bzl:constants.bzl` to provide per
# repository configuration.
do_not_use_repo_cfg = {
"antlir_linux_flavor": "fedora33",
"artifact_key_to_path": " ".join([
(k + " " + v)
for k, v in {
"build_appliance.newest": "//images/appliance:stable-build-appliance",
"extractor.common_deps": "//images/appliance:stable-build-appliance",
"metalos.layer.base": "//images/base:fedora.vm",
"vm.rootfs.btrfs": "//images/base:fedora.vm.btrfs",
"vm.rootfs.layer": "//images/base:fedora.vm",
}.items()
]),
"flavor_available": " ".join(["fedora33"]),
"flavor_default": "fedora33",
# KEEP THIS DICTIONARY SMALL.
#
# For each `feature`, we have to emit as many targets as there are
# elements, because we do not know the version set that the
# including `image.layer` will use. This would be fixable if Buck
# supported providers like Bazel does.
"flavor_to_config": {
# Do NOT put this in `flavor_available`.
"antlir_test": {
"build_appliance": "//antlir/compiler/test_images:build_appliance_testing",
"rpm_installer": "dnf",
},
"fedora33": {
"build_appliance": "//images/appliance:stable-build-appliance",
"rpm_installer": "dnf",
},
},
"host_mounts_allowed_in_targets": " ".join([
"//images/appliance:host-build-appliance",
"//images/appliance:host-build-appliance__layer-feature",
]),
"host_mounts_for_repo_artifacts": [],
# Future: Once we can guarantee `libcap-ng` to be at least 0.8, add
# this in.
#
# Also check this issue to see if this can be detected from
# `cap-ng.h` instead -- once both OSS and FB builds can be
# guaranteed to have this issue fixed, we can move the conditonal
# compilation into the `.c` file and remove this config.
# https://github.com/stevegrubb/libcap-ng/issues/20
#
# "libcap_ng_compiler_flags": "-DCAPNG_SUPPORTS_AMBIENT=1",
}
| 40.981818
| 87
| 0.635759
|
4a16610e990af86131080a26abedf4b4dc18ac62
| 1,731
|
py
|
Python
|
jsuarez/MPIUtils.py
|
jarbus/neural-mmo
|
7ad02fab50f2781c0a71f7d2afd10c1503110736
|
[
"MIT"
] | 1,450
|
2019-03-04T15:47:38.000Z
|
2022-03-30T03:33:35.000Z
|
jsuarez/MPIUtils.py
|
jarbus/neural-mmo
|
7ad02fab50f2781c0a71f7d2afd10c1503110736
|
[
"MIT"
] | 34
|
2019-03-05T09:50:38.000Z
|
2021-08-31T15:20:27.000Z
|
jsuarez/MPIUtils.py
|
LaudateCorpus1/neural-mmo
|
a9a7c34a1fb24fbf252e2958bdb869c213e580a3
|
[
"MIT"
] | 164
|
2019-03-04T16:09:19.000Z
|
2022-02-26T15:43:40.000Z
|
#Author: Joseph Suarez
from pdb import set_trace as T
import sys, builtins
import numpy as np
from mpi4py import MPI
from mpi4py.MPI import COMM_WORLD as comm
MASTER = 0
SILENT = 1
ALL = 2
class LoadBalancer:
def __init__(self, cores):
self.nCores = len(cores)
self.cores = cores
self.loads = dict((core, 0) for core in cores)
def assignWorker(self):
#core = 1
#self.loads[core] += 1
#return np.random.choice(self.cores)
#return min([len(e) for e in load.values()])
core = min(self.loads, key=self.loads.get)
self.loads[core] += 1
return core
def deleteWorker(self, core):
self.loads[core] -= 1
def print(verbose, *args):
if verbose == ALL or (verbose == MASTER and isMaster()):
builtins.print(*args)
sys.stdout.flush()
def send(data, dst, seq=None, usePar=False):
if not usePar:
seq.inbox = data
return
comm.send(data, dst)
def recv(src, seq=None, usePar=False):
if not usePar:
return seq.inbox
return comm.recv(source=src)
#Returns a req
def isend(data, dst, tag):
return comm.isend(data, dest=dst, tag=tag)
#Returns a req
def irecv(src, tag):
return comm.irecv(source=src, tag=tag)
def gather(dst):
return comm.gather(root=dst)
def assignWorker(clients):
return np.random.choice(clients)
def distributeFunc(f):
if isMaster():
x = f()
else:
x = None
return distribute(x)
def npValMean(val):
meanVal = np.zeros_like(val)
comm.Allreduce(val, meanVal, op=MPI.SUM)
return meanVal / comm.Get_size()
def distribute(x):
return comm.bcast(x, root=MASTER)
def isMaster():
return comm.Get_rank() == MASTER
def core():
return comm.Get_rank()
| 21.109756
| 59
| 0.649913
|
4a16635070e8956ec2175584c1cb3eecef76768d
| 2,038
|
py
|
Python
|
docs/DSDC/miniprez/OLD_MINIPREZ/tests/test_slide.py
|
thoppe/Presentation_Topics
|
e9aba07e9ab087b44e6044c6082ba8e873a9b4fd
|
[
"MIT"
] | 2
|
2018-12-03T17:03:19.000Z
|
2018-12-10T16:42:39.000Z
|
docs/DSDC/miniprez/OLD_MINIPREZ/tests/test_slide.py
|
thoppe/Presentation_Topics_in_NLP
|
e9aba07e9ab087b44e6044c6082ba8e873a9b4fd
|
[
"MIT"
] | 1
|
2019-02-19T15:12:19.000Z
|
2019-02-19T15:12:19.000Z
|
docs/DSDC/miniprez/OLD_MINIPREZ/tests/test_slide.py
|
thoppe/Presentation_Topics_in_NLP
|
e9aba07e9ab087b44e6044c6082ba8e873a9b4fd
|
[
"MIT"
] | 1
|
2019-02-19T12:51:37.000Z
|
2019-02-19T12:51:37.000Z
|
from miniprez.parser import section
from nose.tools import assert_equal
slide0 = '''
---- .alignright
@h1 .text-data Good morning
@p Time to rise and shine!
'''
slide0_out = '''<section class="alignright"><h1 class="text-data"><text>Good morning<p><text>Time to rise and shine!</text></p></text></h1></section>'''
slide1 = '''
---- .aligncenter
.red Red dogs
.big big red dogs
.small small red dogs
.fat small fat red dogs
'''
slide1_out = '''<section class="aligncenter"><div class="red"><text>Red dogs<div class="big"><text>big red dogs</text></div><div class="small"><text>small red dogs<div class="fat"><text>small fat red dogs</text></div></text></div></text></div></section>'''
slide2 = '''
----
@background(foobar)
@h1 big dogs
'''
slide2_out = '''<section><span class="background" style="background-image:url(\'foobar\')"></span><div class="wrap"><h1><text>big dogs</text></h1></div></section>'''
slide3 = '''
----
Below is a list
@h1
+ item one
+ item **two**
'''
slide3_out = '''<section><text>Below is a list</text><h1><ul class="markdownlist"><li><text>item one</text></li><li><text>item <strong>two</strong></text></li></ul></h1></section>'''
slide4 = '''
----
Below is some code
```
for x in range(20):
print x
```
'''
slide4_out = '''<section><text>Below is some code</text><pre class="prettyprint">for x in range(20):\n print x</pre></section>'''
def test_empty_section():
empty_section = ["----"]
S = section(empty_section)
assert_equal(str(S), "<section></section>")
def test_simple_slide0():
S = section(slide0.split('\n'))
assert_equal(str(S), slide0_out)
def test_simple_slide1():
S = section(slide1.split('\n'))
assert_equal(str(S), slide1_out)
def test_background_div_wrap():
S = section(slide2.split('\n'))
assert_equal(str(S), slide2_out)
def test_markdownlist():
S = section(slide3.split('\n'))
assert_equal(str(S), slide3_out)
def test_codeblock():
S = section(slide4.split('\n'))
assert_equal(str(S), slide4_out)
| 26.467532
| 256
| 0.64475
|
4a166515fbc0b485abfd1a2ec82cbd61084d1dc8
| 434
|
py
|
Python
|
calamari_ocr/ocr/text_processing/default_text_postprocessor.py
|
Nesbi/calamari
|
25eb872118d15d0740f702ef42ef6f785e1a5858
|
[
"Apache-2.0"
] | 5
|
2021-02-12T16:22:46.000Z
|
2021-03-08T13:04:15.000Z
|
calamari_ocr/ocr/text_processing/default_text_postprocessor.py
|
Nesbi/calamari
|
25eb872118d15d0740f702ef42ef6f785e1a5858
|
[
"Apache-2.0"
] | 1
|
2020-05-06T15:03:17.000Z
|
2020-05-07T15:22:01.000Z
|
calamari_ocr/ocr/text_processing/default_text_postprocessor.py
|
Nesbi/calamari
|
25eb872118d15d0740f702ef42ef6f785e1a5858
|
[
"Apache-2.0"
] | null | null | null |
from calamari_ocr.ocr.text_processing import \
MultiTextProcessor, StripTextProcessor, BidiTextProcessor, \
TextNormalizer, TextRegularizer
class DefaultTextPostprocessor(MultiTextProcessor):
def __init__(self):
super().__init__(
[
TextNormalizer(),
TextRegularizer(),
StripTextProcessor(),
BidiTextProcessor(),
]
)
| 27.125
| 64
| 0.601382
|
4a16659896cf53326603b031f3035dbfc1999eec
| 6,755
|
py
|
Python
|
plugins/modules/oci_waas_policy_custom_protection_rules_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_waas_policy_custom_protection_rules_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_waas_policy_custom_protection_rules_facts.py
|
sohwaje/oci-ansible-collection
|
9e6b8cf55e596a96560710a457a7df05886fc59c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_policy_custom_protection_rules_facts
short_description: Fetches details about one or multiple WaasPolicyCustomProtectionRules resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple WaasPolicyCustomProtectionRules resources in Oracle Cloud Infrastructure
- Gets the list of currently configured custom protection rules for a WAAS policy.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
required: true
mod_security_rule_id:
description:
- Filter rules using a list of ModSecurity rule IDs.
type: list
elements: str
action:
description:
- Filter rules using a list of actions.
type: list
elements: str
choices:
- "DETECT"
- "BLOCK"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: List waas_policy_custom_protection_rules
oci_waas_policy_custom_protection_rules_facts:
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
waas_policy_custom_protection_rules:
description:
- List of WaasPolicyCustomProtectionRules resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the custom protection rule.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The user-friendly name of the custom protection rule.
returned: on success
type: str
sample: display_name_example
action:
description:
- "The action to take when the custom protection rule is triggered.
`DETECT` - Logs the request when the criteria of the custom protection rule are met. `BLOCK` - Blocks the request when the criteria of the
custom protection rule are met."
returned: on success
type: str
sample: DETECT
mod_security_rule_ids:
description:
- The list of the ModSecurity rule IDs that apply to this protection rule. For more information about ModSecurity's open source WAF rules, see
L(Mod Security's documentation,https://www.modsecurity.org/CRS/Documentation/index.html).
returned: on success
type: list
sample: []
exclusions:
description:
- ""
returned: on success
type: complex
contains:
target:
description:
- The target of the exclusion.
returned: on success
type: str
sample: REQUEST_COOKIES
exclusions:
description:
- ""
returned: on success
type: list
sample: []
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"action": "DETECT",
"mod_security_rule_ids": [],
"exclusions": [{
"target": "REQUEST_COOKIES",
"exclusions": []
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class WaasPolicyCustomProtectionRulesFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"waas_policy_id",
]
def list_resources(self):
optional_list_method_params = [
"mod_security_rule_id",
"action",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_waas_policy_custom_protection_rules,
waas_policy_id=self.module.params.get("waas_policy_id"),
**optional_kwargs
)
WaasPolicyCustomProtectionRulesFactsHelperCustom = get_custom_class(
"WaasPolicyCustomProtectionRulesFactsHelperCustom"
)
class ResourceFactsHelper(
WaasPolicyCustomProtectionRulesFactsHelperCustom,
WaasPolicyCustomProtectionRulesFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
waas_policy_id=dict(type="str", required=True),
mod_security_rule_id=dict(type="list", elements="str"),
action=dict(type="list", elements="str", choices=["DETECT", "BLOCK"]),
display_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="waas_policy_custom_protection_rules",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(waas_policy_custom_protection_rules=result)
if __name__ == "__main__":
main()
| 32.166667
| 158
| 0.641599
|
4a166762d595f091ce89d041f648357a21abfdd4
| 4,347
|
py
|
Python
|
models/wide_resnet_bn_imagenet64.py
|
shizhouxing/Fast-Certified-Robust-Training
|
addac383f6fac58d1bae8a231cf0ac9dab405a06
|
[
"BSD-3-Clause"
] | 16
|
2021-04-06T11:57:39.000Z
|
2022-03-02T12:18:24.000Z
|
models/wide_resnet_bn_imagenet64.py
|
shizhouxing/Fast-Certified-Robust-Training
|
addac383f6fac58d1bae8a231cf0ac9dab405a06
|
[
"BSD-3-Clause"
] | 1
|
2021-10-30T02:11:57.000Z
|
2021-11-12T01:30:59.000Z
|
models/wide_resnet_bn_imagenet64.py
|
shizhouxing/Fast-Certified-Robust-Training
|
addac383f6fac58d1bae8a231cf0ac9dab405a06
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T07:54:34.000Z
|
2022-01-06T07:54:34.000Z
|
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import sys
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv1x1(in_planes, out_planes, stride = 1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=True)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride, downsample, residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.residual:
if self.downsample is not None:
identity = self.downsample(identity)
x += identity
return self.relu(x)
class Wide_ResNet(nn.Module):
def __init__(
self, block, layers, num_classes=1000,
zero_init_residual=False, dense=False, residual=True,
widen_factor=1, base_width=16, pool=True,
in_ch=3, in_dim=32
):
super(Wide_ResNet, self).__init__()
self.residual = residual
self.inplanes = base_width
self.conv1 = conv3x3(in_ch, self.inplanes)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, base_width * widen_factor, layers[0], stride=1)
self.layer2 = self._make_layer(block, base_width*2 * widen_factor, layers[1], stride=2)
self.layer3 = self._make_layer(block, base_width*4 * widen_factor, layers[2], stride=2)
self.pool = pool
if pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if dense:
dim_dense = 512
self.dense = nn.Linear(self.inplanes, dim_dense)
self.dense_bn = nn.BatchNorm1d(dim_dense)
self.fc = nn.Linear(dim_dense, num_classes)
else:
self.dense = None
self.fc = nn.Linear(self.inplanes, num_classes)
else:
assert dense
dim_dense = 512
self.dense = nn.Linear(self.inplanes*((in_dim//4)**2), dim_dense)
self.dense_bn = nn.BatchNorm1d(dim_dense)
self.fc = nn.Linear(dim_dense, num_classes)
def _make_layer(self, block, planes, blocks, stride):
if blocks == 0:
return nn.Sequential()
downsample = None
if self.residual and (stride != 1 or self.inplanes != planes):
downsample = nn.Sequential(
conv1x1(self.inplanes, planes, stride),
nn.BatchNorm2d(planes),
)
layers = [block(self.inplanes, planes, stride, downsample, residual=self.residual)]
for _ in range(1, blocks):
layers.append(block(planes, planes, residual=self.residual))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if self.pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.dense:
x = F.relu(self.dense_bn(self.dense(x)))
return self.fc(x)
def wide_resnet_bn_imagenet64(in_ch=3, in_dim=56):
model = Wide_ResNet(BasicBlock, [1, 1, 1], num_classes=200, widen_factor=10, base_width=16, pool=False, dense=True, in_ch=in_ch, in_dim=in_dim)
return model
def count_params(model):
cnt = 0
for p in model.parameters():
cnt += p.numel()
return cnt
if __name__ == '__main__':
model = wide_resnet_bn_imagenet64(in_ch=3, in_dim=64)
dummy_in = torch.zeros(2, 3, 64, 64)
print(model(dummy_in).shape)
print(model)
print('wideresnet', count_params(model)/1e6)
exit(0)
| 32.931818
| 147
| 0.600874
|
4a1667c3146b6e8cc76715fbe1b6860e1ea8ad19
| 5,628
|
py
|
Python
|
core/downloaders/QBittorrent.py
|
yannickbilcot/Watcher3
|
b6d2524efa99a0f59878e0cbb630bc2420e7f1ee
|
[
"Apache-2.0"
] | 68
|
2019-07-08T13:46:26.000Z
|
2022-03-29T05:35:25.000Z
|
core/downloaders/QBittorrent.py
|
yannickbilcot/Watcher3
|
b6d2524efa99a0f59878e0cbb630bc2420e7f1ee
|
[
"Apache-2.0"
] | 172
|
2019-07-21T18:39:16.000Z
|
2022-03-29T09:31:51.000Z
|
core/downloaders/QBittorrent.py
|
yannickbilcot/Watcher3
|
b6d2524efa99a0f59878e0cbb630bc2420e7f1ee
|
[
"Apache-2.0"
] | 31
|
2019-08-25T13:20:04.000Z
|
2022-03-29T05:34:28.000Z
|
import logging
import json
import core
from core.helpers import Torrent, Url
logging = logging.getLogger(__name__)
cookie = None
def _send(method, post_data=None):
''' Sends API request to QBittorrent
method (str): name of method to call. *must* include category (ie 'query/preferences')
post_data (dict): post data to send with request <optional>
Returns str text response from QBit
'''
global cookie
conf = core.CONFIG['Downloader']['Torrent']['QBittorrent']
if not cookie:
r = _login('{}:{}/'.format(conf['host'], conf['port']), conf['user'], conf['pass'])
if r is not True:
logging.error('Unable to connect to QBittorrent: {}'.format(r))
return False
url = '{}:{}/api/v2/{}'.format(conf['host'], conf['port'], method)
try:
response = Url.open(url, post_data=post_data, headers={'cookie': cookie})
except Exception as e:
logging.error('Unable to contact QBittorrent API.', exc_info=True)
raise APIConnectionError(response.status_code, response.reason)
if response.status_code == 403:
logging.info('QBittorrent request unauthorized.')
cookie = None
u = '{}:{}/'.format(conf['host'], conf['port'])
if _login(u, conf['user'], conf['pass']) is not True:
raise APIConnectionError('403', 'Unable to log in to QBittorrent.')
else:
try:
response = Url.open(url, post_data=post_data, headers={'cookie': cookie})
except Exception as e:
logging.error('Unable to contact QBittorrent API.', exc_info=True)
raise APIConnectionError(response.status_code, response.reason)
elif response.status_code != 200:
logging.error('QBittorrent API call failed: {}'.format(response.reason))
raise APIConnectionError(response.status_code, response.reason)
return response.text
def test_connection(data):
''' Tests connectivity to qbittorrent
data: dict of qbittorrent server information
Return True on success or str error message on failure
'''
logging.info('Testing connection to QBittorrent.')
url = '{}:{}/'.format(data['host'], data['port'])
return _login(url, data['user'], data['pass'])
def add_torrent(data):
''' Adds torrent or magnet to qbittorrent
data: dict of torrrent/magnet information
Adds torrents to default/path/<category>
Returns dict {'response': True, 'downloadid': 'id'}
{'response': False, 'error': 'exception'}
'''
logging.info('Sending torrent {} to QBittorrent.'.format(data['title']))
conf = core.CONFIG['Downloader']['Torrent']['QBittorrent']
host = conf['host']
port = conf['port']
base_url = '{}:{}/'.format(host, port)
download_dir = _get_download_dir(base_url)
if download_dir is None:
return {'response': False, 'error': 'Unable to get path information.'}
# if we got download_dir we can connect.
post_data = {'urls': data['torrentfile'],
'savepath': '{}{}'.format(download_dir, conf['category']),
'category': conf['category']
}
try:
_send('torrents/add', post_data=post_data)
downloadid = Torrent.get_hash(data['torrentfile'])
return {'response': True, 'downloadid': downloadid}
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('QBittorrent torrents/add failed.', exc_info=True)
return {'response': False, 'error': str(e)}
def _get_download_dir(base_url):
logging.debug('Getting default download dir for QBittorrent.')
try:
response = _send('app/preferences')
return json.loads(response)['save_path']
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('QBittorrent app/preferences failed.', exc_info=True)
return None
def _login(url, username, password):
global cookie
logging.info('Attempting to log in to QBittorrent.')
post_data = {'username': username, 'password': password}
url = '{}api/v2/auth/login'.format(url)
try:
response = Url.open(url, post_data=post_data)
cookie = response.headers.get('Set-Cookie')
if response.text == 'Ok.':
logging.info('Successfully connected to QBittorrent.')
return True
elif response.text == 'Fails.':
logging.warning('Incorrect usename or password QBittorrent.')
return 'Incorrect usename or password'
else:
logging.warning(response.text)
return response.text
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('qbittorrent test_connection', exc_info=True)
return '{}.'.format(str(e))
def cancel_download(downloadid):
''' Cancels download in client
downloadid: int download id
Returns bool
'''
logging.info('Cancelling download # {} in QBittorrent.'.format(downloadid))
try:
_send('torrents/delete', post_data={'hashes': downloadid.lower(), 'deleteFiles': False})
return True
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('QBittorrent torrents/delete failed.', exc_info=True)
return None
class APIConnectionError(Exception):
''' Raised when a timed task is in conflict with itself '''
def __init__(self, status_code, reason):
self.msg = 'QBittorrent API request error {}: {}'.format(status_code, reason)
| 32.16
| 96
| 0.637704
|
4a1667d131cb26985f3a94a4b217d982524f7798
| 1,683
|
py
|
Python
|
robustness-calculation.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | 1
|
2022-01-05T12:20:18.000Z
|
2022-01-05T12:20:18.000Z
|
robustness-calculation.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | null | null | null |
robustness-calculation.py
|
jscohen4/cohen-policy-tree-adaptation
|
a6efafda591e5850fc264f3756cce2bb8e07719f
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
import random
import json
import os
from os import listdir
from os.path import isfile, join
import seaborn as sns
import pickle
from matplotlib import cm
seed_policy_adjust = json.load(open('nondom-tracker/seed_policy_adjust.json'))
basline_ind = json.load('misc-files/basline_testing_scenarios.json')
robust_dist = np.array([])
robust_pols_dicts = {}
for seed in range(10): #9 also
robust_seed_pols = []
optrun = 'training_scenarios_seed_%s'%seed
snapshots = pickle.load(open('snapshots/%s.pkl'%optrun, 'rb'))
f = snapshots['best_f'][-1]
P = snapshots['best_P'][-1]
if seed == 0:
f_all = f[seed_policy_adjust['%s'%seed]]
else:
f_all = np.concatenate((f_all, f[seed_policy_adjust['%s'%seed]]))
for j,pol_num in enumerate(seed_policy_adjust['%s'%seed]):
df = pd.DataFrame()
for scset in range(5):
dfscset = pd.read_csv('testing_outputs/ind_sc/seed_%s_pol_%s_scset_%s.csv'%(seed,pol_num,scset), index_col = 0)
df = pd.concat([df, dfscset], axis=1, sort=False)
meets_baseline = 0
for sc in df.columns:
baseline = basline_ind[sc]
opt = df[sc].values
if (-1*opt[1] > baseline[1]*0.9) & (opt[2] < baseline[2]*1.3) & (opt[3] < baseline[3]) & (opt[0] < 900):
meets_baseline +=1
score = meets_baseline/(47*5)
robust_dist = np.append(robust_dist,score)
if score > 0.8:
P[pol_num].graphviz_export('trees/nondom/seed_%s/pol_%s.pdf'%(seed,pol_num))
robust_seed_pols.append(pol_num)
robust_pols_dicts[seed] = robust_seed_pols
with open('nondom-tracker/seed_policy_adjust_robust.json', 'w') as fp:
json.dump(robust_pols_dicts, fp, indent=4)
| 34.346939
| 114
| 0.718954
|
4a1669634ab30d1149054d70a5876b2699ec5866
| 1,448
|
py
|
Python
|
tests/assignments/test_assignment4.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
tests/assignments/test_assignment4.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
tests/assignments/test_assignment4.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
import unittest
from src.assignments.assignment4 import sample_function
from src.assignments.assignment4 import factorial
class Test_Assign4(unittest.TestCase):
def test_sample_one(self):
'''
This is an example to guide you in creating test cases.
The sample_function takes an argument and returns the same value. If it takes a 2 it will return a 2.
:return:
'''
self.assertEqual(2, sample_function(2))
def test_sample_two(self):
'''
This is an example to guide you in creating test cases.
The sample_function takes an argument and returns the same value. If it takes a 2 it will return a 2.
In this test case, the test is for no equality.
:return:
'''
self.assertNotEqual (1, sample_function(2))
#create two test cases for the factorial function, one test case with a value of 5 and the other with value of 6
#THE NAME OF THE FUNCTION MUST BEGIN WITH test OTHERWISE THE TestCase suite will not recognize it as a test case.
def test_factorial_one(self):
'''
'''
self.assertEqual(120, factorial(5))
def test_factorial_two(self):
self.assertEqual(720, factorial(6))
#remove the pound sign at the beginning of the next statement to run tests locally.
#unittest.Main(verbosity=2)
#add the pound sign back before uploading to Github
| 28.96
| 114
| 0.665746
|
4a16696e86ee6f894c59fe16bbc9340fe41a149e
| 27,669
|
py
|
Python
|
cinder/scheduler/manager.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2015-04-02T21:44:36.000Z
|
2016-04-29T21:19:04.000Z
|
cinder/scheduler/manager.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2016-04-29T21:45:26.000Z
|
2016-05-04T19:41:23.000Z
|
cinder/scheduler/manager.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 4
|
2016-01-27T00:25:52.000Z
|
2021-03-25T19:54:08.000Z
|
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
import collections
from datetime import datetime
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import versionutils
import six
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder import manager
from cinder.message import api as mess_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils as vol_utils
scheduler_manager_opts = [
cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use'),
cfg.IntOpt('scheduler_driver_init_wait_time',
default=60,
min=1,
help='Maximum time in seconds to wait for the driver to '
'report as ready'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_manager_opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
def append_operation_type(name=None):
def _decorator(schedule_function):
@six.wraps(schedule_function)
def inject_operation_decorator(*args, **kwargs):
request_spec = kwargs.get('request_spec', None)
request_spec_list = kwargs.get('request_spec_list', None)
if request_spec:
request_spec['operation'] = name or schedule_function.__name__
if request_spec_list:
for rs in request_spec_list:
rs['operation'] = name or schedule_function.__name__
return schedule_function(*args, **kwargs)
return inject_operation_decorator
return _decorator
class SchedulerManager(manager.CleanableManager, manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
self._startup_delay = True
self.backup_api = backup_rpcapi.BackupAPI()
self.volume_api = volume_rpcapi.VolumeAPI()
self.sch_api = scheduler_rpcapi.SchedulerAPI()
self.message_api = mess_api.API()
self.rpc_api_version = versionutils.convert_version_to_int(
self.RPC_API_VERSION)
def init_host_with_rpc(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
for __ in range(CONF.scheduler_driver_init_wait_time):
if self.driver.is_first_receive():
break
eventlet.sleep(1)
self._startup_delay = False
def reset(self):
super(SchedulerManager, self).reset()
self.volume_api = volume_rpcapi.VolumeAPI()
self.sch_api = scheduler_rpcapi.SchedulerAPI()
self.driver.reset()
@periodic_task.periodic_task(spacing=CONF.message_reap_interval,
run_immediately=True)
def _clean_expired_messages(self, context):
self.message_api.cleanup_expired_messages(context)
@periodic_task.periodic_task(spacing=CONF.reservation_clean_interval,
run_immediately=True)
def _clean_expired_reservation(self, context):
QUOTAS.expire(context)
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None,
cluster_name=None, timestamp=None,
**kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
# If we received the timestamp we have to deserialize it
elif timestamp:
timestamp = datetime.strptime(timestamp,
timeutils.PERFECT_TIME_FORMAT)
self.driver.update_service_capabilities(service_name,
host,
capabilities,
cluster_name,
timestamp)
def notify_service_capabilities(self, context, service_name,
capabilities, host=None, backend=None,
timestamp=None):
"""Process a capability update from a service node."""
# TODO(geguileo): On v4 remove host field.
if capabilities is None:
capabilities = {}
# If we received the timestamp we have to deserialize it
elif timestamp:
timestamp = datetime.strptime(timestamp,
timeutils.PERFECT_TIME_FORMAT)
backend = backend or host
self.driver.notify_service_capabilities(service_name,
backend,
capabilities,
timestamp)
def _wait_for_scheduler(self):
# NOTE(dulek): We're waiting for scheduler to announce that it's ready
# or CONF.scheduler_driver_init_wait_time seconds from service startup
# has passed.
while self._startup_delay and not self.driver.is_ready():
eventlet.sleep(1)
@append_operation_type()
def create_group(self, context, group, group_spec=None,
group_filter_properties=None, request_spec_list=None,
filter_properties_list=None):
self._wait_for_scheduler()
try:
self.driver.schedule_create_group(
context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
except exception.NoValidBackend:
LOG.error("Could not find a backend for group "
"%(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to create generic group "
"%(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
@objects.Volume.set_workers
@append_operation_type()
def create_volume(self, context, volume, snapshot_id=None, image_id=None,
request_spec=None, filter_properties=None,
backup_id=None):
self._wait_for_scheduler()
try:
flow_engine = create_volume.get_flow(context,
self.driver,
request_spec,
filter_properties,
volume,
snapshot_id,
image_id,
backup_id)
except Exception:
msg = _("Failed to create scheduler manager volume flow")
LOG.exception(msg)
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@append_operation_type()
def create_snapshot(self, ctxt, volume, snapshot, backend,
request_spec=None, filter_properties=None):
"""Create snapshot for a volume.
The main purpose of this method is to check if target
backend (of volume and snapshot) has sufficient capacity
to host to-be-created snapshot.
"""
self._wait_for_scheduler()
try:
tgt_backend = self.driver.backend_passes_filters(
ctxt, backend, request_spec, filter_properties)
tgt_backend.consume_from_volume(
{'size': request_spec['volume_properties']['size']})
except exception.NoValidBackend as ex:
self._set_snapshot_state_and_notify('create_snapshot',
snapshot,
fields.SnapshotStatus.ERROR,
ctxt, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().create_snapshot(ctxt, volume,
snapshot)
def _do_cleanup(self, ctxt, vo_resource):
# We can only receive cleanup requests for volumes, but we check anyway
# We need to cleanup the volume status for cases where the scheduler
# died while scheduling the volume creation.
if (isinstance(vo_resource, objects.Volume) and
vo_resource.status == 'creating'):
vo_resource.status = 'error'
vo_resource.save()
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
try:
self.backup_api.publish_service_capabilities(context)
except exception.ServiceTooOld as e:
# cinder-backup has publish_service_capabilities starting Stein
# release only.
msg = ("Failed to notify about cinder-backup service "
"capabilities for host %(host)s. This is normal "
"during a live upgrade. Error: %(e)s")
LOG.warning(msg, {'host': self.host, 'e': e})
@append_operation_type()
def migrate_volume(self, context, volume, backend, force_copy,
request_spec, filter_properties):
"""Ensure that the backend exists and can accept the volume."""
self._wait_for_scheduler()
def _migrate_volume_set_error(self, context, ex, request_spec):
if volume.status == 'maintenance':
previous_status = (
volume.previous_status or 'maintenance')
volume_state = {'volume_state': {'migration_status': 'error',
'status': previous_status}}
else:
volume_state = {'volume_state': {'migration_status': 'error'}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
try:
tgt_backend = self.driver.backend_passes_filters(context, backend,
request_spec,
filter_properties)
except exception.NoValidBackend as ex:
_migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_volume_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().migrate_volume(context, volume,
tgt_backend,
force_copy)
# FIXME(geguileo): Remove this in v4.0 of RPC API.
def migrate_volume_to_host(self, context, volume, host, force_host_copy,
request_spec, filter_properties=None):
return self.migrate_volume(context, volume, host, force_host_copy,
request_spec, filter_properties)
@append_operation_type(name='retype_volume')
def retype(self, context, volume, request_spec, filter_properties=None):
"""Schedule the modification of a volume's type.
:param context: the request context
:param volume: the volume object to retype
:param request_spec: parameters for this retype request
:param filter_properties: parameters to filter by
"""
self._wait_for_scheduler()
def _retype_volume_set_error(self, context, ex, request_spec,
volume_ref, reservations, msg=None):
if reservations:
QUOTAS.rollback(context, reservations)
previous_status = (
volume_ref.previous_status or volume_ref.status)
volume_state = {'volume_state': {'status': previous_status}}
self._set_volume_state_and_notify('retype', volume_state,
context, ex, request_spec, msg)
reservations = request_spec.get('quota_reservations')
old_reservations = request_spec.get('old_reservations', None)
new_type = request_spec.get('volume_type')
if new_type is None:
msg = _('New volume type not specified in request_spec.')
ex = exception.ParameterNotFound(param='volume_type')
_retype_volume_set_error(self, context, ex, request_spec,
volume, reservations, msg)
# Default migration policy is 'never'
migration_policy = request_spec.get('migration_policy')
if not migration_policy:
migration_policy = 'never'
try:
tgt_backend = self.driver.find_retype_backend(context,
request_spec,
filter_properties,
migration_policy)
except Exception as ex:
# Not having a valid host is an expected exception, so we don't
# reraise on it.
reraise = not isinstance(ex, exception.NoValidBackend)
with excutils.save_and_reraise_exception(reraise=reraise):
_retype_volume_set_error(self, context, ex, request_spec,
volume, reservations)
else:
volume_rpcapi.VolumeAPI().retype(context, volume,
new_type['id'], tgt_backend,
migration_policy,
reservations,
old_reservations)
@append_operation_type()
def manage_existing(self, context, volume, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
self._wait_for_scheduler()
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error_managing'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
try:
backend = self.driver.backend_passes_filters(
context, volume.service_topic_queue, request_spec,
filter_properties)
# At the API we didn't have the pool info, so the volume DB entry
# was created without it, now we add it.
volume.host = backend.host
volume.cluster_name = backend.cluster_name
volume.save()
except exception.NoValidBackend as ex:
_manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_manage_existing_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing(context, volume,
request_spec.get('ref'))
@append_operation_type()
def manage_existing_snapshot(self, context, volume, snapshot, ref,
request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the snapshot."""
self._wait_for_scheduler()
try:
backend = self.driver.backend_passes_filters(
context, volume.service_topic_queue, request_spec,
filter_properties)
backend.consume_from_volume({'size': volume.size})
except exception.NoValidBackend as ex:
self._set_snapshot_state_and_notify('manage_existing_snapshot',
snapshot,
fields.SnapshotStatus.ERROR,
context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing_snapshot(
context, snapshot, ref,
volume.service_topic_queue)
def get_pools(self, context, filters=None):
"""Get active pools from scheduler's cache.
NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is
an RPC call (is blocking for the c-api). Also this is admin-only API
extension so it won't hurt the user much to retry the request manually.
"""
return self.driver.get_pools(context, filters)
@append_operation_type(name='create_group')
def validate_host_capacity(self, context, backend, request_spec,
filter_properties):
try:
backend_state = self.driver.backend_passes_filters(
context,
backend,
request_spec, filter_properties)
backend_state.consume_from_volume(
{'size': request_spec['volume_properties']['size']})
except exception.NoValidBackend:
LOG.error("Desired host %(host)s does not have enough "
"capacity.", {'host': backend})
return False
return True
@append_operation_type()
def extend_volume(self, context, volume, new_size, reservations,
request_spec=None, filter_properties=None):
def _extend_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': volume.previous_status,
'previous_status': None}}
self._set_volume_state_and_notify('extend_volume', volume_state,
context, ex, request_spec)
if not filter_properties:
filter_properties = {}
filter_properties['new_size'] = new_size
try:
backend_state = self.driver.backend_passes_filters(
context,
volume.service_topic_queue,
request_spec, filter_properties)
backend_state.consume_from_volume(
{'size': new_size - volume.size})
volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size,
reservations)
except exception.NoValidBackend as ex:
QUOTAS.rollback(context, reservations,
project_id=volume.project_id)
_extend_volume_set_error(self, context, ex, request_spec)
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
exception=ex)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = ("Failed to schedule_%(method)s: %(ex)s" %
{'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
if volume_state.get('status') == 'error_managing':
volume_state['status'] = 'error'
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
def _set_snapshot_state_and_notify(self, method, snapshot, state,
context, ex, request_spec,
msg=None):
if not msg:
msg = ("Failed to schedule_%(method)s: %(ex)s" %
{'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
model_update = dict(status=state)
snapshot.update(model_update)
snapshot.save()
payload = dict(request_spec=request_spec,
snapshot_id=snapshot.id,
state=state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
@property
def upgrading_cloud(self):
min_version_str = self.sch_api.determine_rpc_version_cap()
min_version = versionutils.convert_version_to_int(min_version_str)
return min_version < self.rpc_api_version
def _cleanup_destination(self, clusters, service):
"""Determines the RPC method, destination service and name.
The name is only used for logging, and it is the topic queue.
"""
# For the scheduler we don't have a specific destination, as any
# scheduler will do and we know we are up, since we are running this
# code.
if service.binary == 'cinder-scheduler':
cleanup_rpc = self.sch_api.do_cleanup
dest = None
dest_name = service.host
else:
cleanup_rpc = self.volume_api.do_cleanup
# For clustered volume services we try to get info from the cache.
if service.is_clustered:
# Get cluster info from cache
dest = clusters[service.binary].get(service.cluster_name)
# Cache miss forces us to get the cluster from the DB via OVO
if not dest:
dest = service.cluster
clusters[service.binary][service.cluster_name] = dest
dest_name = dest.name
# Non clustered volume services
else:
dest = service
dest_name = service.host
return cleanup_rpc, dest, dest_name
def work_cleanup(self, context, cleanup_request):
"""Process request from API to do cleanup on services.
Here we retrieve from the DB which services we want to clean up based
on the request from the user.
Then send individual cleanup requests to each of the services that are
up, and we finally return a tuple with services that we have sent a
cleanup request and those that were not up and we couldn't send it.
"""
if self.upgrading_cloud:
raise exception.UnavailableDuringUpgrade(action='workers cleanup')
LOG.info('Workers cleanup request started.')
filters = dict(service_id=cleanup_request.service_id,
cluster_name=cleanup_request.cluster_name,
host=cleanup_request.host,
binary=cleanup_request.binary,
is_up=cleanup_request.is_up,
disabled=cleanup_request.disabled)
# Get the list of all the services that match the request
services = objects.ServiceList.get_all(context, filters)
until = cleanup_request.until or timeutils.utcnow()
requested = []
not_requested = []
# To reduce DB queries we'll cache the clusters data
clusters = collections.defaultdict(dict)
for service in services:
cleanup_request.cluster_name = service.cluster_name
cleanup_request.service_id = service.id
cleanup_request.host = service.host
cleanup_request.binary = service.binary
cleanup_request.until = until
cleanup_rpc, dest, dest_name = self._cleanup_destination(clusters,
service)
# If it's a scheduler or the service is up, send the request.
if not dest or dest.is_up:
LOG.info('Sending cleanup for %(binary)s %(dest_name)s.',
{'binary': service.binary,
'dest_name': dest_name})
cleanup_rpc(context, cleanup_request)
requested.append(service)
# We don't send cleanup requests when there are no services alive
# to do the cleanup.
else:
LOG.info('No service available to cleanup %(binary)s '
'%(dest_name)s.',
{'binary': service.binary,
'dest_name': dest_name})
not_requested.append(service)
LOG.info('Cleanup requests completed.')
return requested, not_requested
def create_backup(self, context, backup):
volume = self.db.volume_get(context, backup.volume_id)
try:
host = self.driver.get_backup_host(volume)
backup.host = host
backup.save()
self.backup_api.create_backup(context, backup)
except exception.ServiceNotFound:
msg = "Service not found for creating backup."
LOG.error(msg)
vol_utils.update_backup_error(backup, msg)
| 43.098131
| 79
| 0.577614
|
4a166a2e52683ab8718dd3ff17394c00808348c3
| 3,603
|
py
|
Python
|
stackoverflow_highest_votes_questions/superpower/middlewares.py
|
ayang818/-
|
45d3ea9367475285126c5b385a104e702f9b3a05
|
[
"MIT"
] | 3
|
2019-05-24T07:11:52.000Z
|
2020-09-04T06:34:52.000Z
|
stackoverflow_highest_votes_questions/superpower/middlewares.py
|
ayang818/spider_training
|
45d3ea9367475285126c5b385a104e702f9b3a05
|
[
"MIT"
] | null | null | null |
stackoverflow_highest_votes_questions/superpower/middlewares.py
|
ayang818/spider_training
|
45d3ea9367475285126c5b385a104e702f9b3a05
|
[
"MIT"
] | 2
|
2019-11-16T05:42:43.000Z
|
2019-12-13T02:28:03.000Z
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SuperpowerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SuperpowerDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.644231
| 78
| 0.666667
|
4a166ae2397328369c92e4ac71f95a7d9f477285
| 410
|
py
|
Python
|
AuE893_spring20_Shubham_Horane/build/assignment2_pkg/catkin_generated/pkg.installspace.context.pc.py
|
shorane/ROS_Autonomous_TurtleBot
|
0da048aa5f8e03f2d5c1c9d686586b59ed7dfec5
|
[
"MIT"
] | null | null | null |
AuE893_spring20_Shubham_Horane/build/assignment2_pkg/catkin_generated/pkg.installspace.context.pc.py
|
shorane/ROS_Autonomous_TurtleBot
|
0da048aa5f8e03f2d5c1c9d686586b59ed7dfec5
|
[
"MIT"
] | null | null | null |
AuE893_spring20_Shubham_Horane/build/assignment2_pkg/catkin_generated/pkg.installspace.context.pc.py
|
shorane/ROS_Autonomous_TurtleBot
|
0da048aa5f8e03f2d5c1c9d686586b59ed7dfec5
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "assignment2_ws"
PROJECT_SPACE_DIR = "/home/shubham/git_space/AuE893_spring20_Shubham_Horane/install"
PROJECT_VERSION = "0.0.0"
| 45.555556
| 84
| 0.729268
|
4a166ba54d2e8144d8ca0aa640ee0a6bb946ed7e
| 3,815
|
py
|
Python
|
libnessus/test/test_report.py
|
bmx0r/python-libnessus
|
bc4ed69bf4c2f6bab2f4c3be7e38613f8c344a62
|
[
"CC-BY-3.0"
] | 13
|
2015-12-12T23:23:24.000Z
|
2022-01-21T01:50:05.000Z
|
libnessus/test/test_report.py
|
bmx0r/python-libnessus
|
bc4ed69bf4c2f6bab2f4c3be7e38613f8c344a62
|
[
"CC-BY-3.0"
] | 5
|
2015-08-26T16:00:34.000Z
|
2019-09-25T18:55:12.000Z
|
libnessus/test/test_report.py
|
bmx0r/python-libnessus
|
bc4ed69bf4c2f6bab2f4c3be7e38613f8c344a62
|
[
"CC-BY-3.0"
] | 12
|
2015-02-16T01:41:55.000Z
|
2022-01-21T01:50:07.000Z
|
#!/usr/bin/env python
from libnessus.objects.reporthost import NessusReportHost
from .test_nessus import TestNessus
from datetime import datetime
import copy
class TestNessusReport(TestNessus):
"""Test Report object"""
def test_hosts(self):
""" Check that all obj in this array are NessusReportHost
Check the number of host in a report
Check the attribute is an array
"""
for testfile in self.flist:
self.assertEqual(len(testfile['report'].hosts), testfile['hosts'])
self.assertEqual(
isinstance(testfile['report'].hosts, (
list, tuple)), True)
for host in testfile['report'].hosts:
self.assertEqual(
isinstance(host, NessusReportHost), True)
def test_save(self):
"""Test the save method...
This is done in the plugin test
"""
def test_iscomparable(self):
"""
test_iscomparable test to throm typeError if not the same type
"""
value = self.forgedreport
# test different type
self.assertRaises(TypeError, value.iscomparable, 5)
def test_eq(self):
""""""
value = self.forgedreport
# test different type
self.assertRaises(TypeError, value.__eq__, 5)
value2 = copy.deepcopy(value)
self.assertEqual((value == value2), True)
def test_ne(self):
""""""
value = self.forgedreport
# test different type
self.assertRaises(TypeError, value.__eq__, "5")
value2 = copy.deepcopy(value)
self.assertEqual((value != value2), False)
def test_started(self):
"""Test the startime of the scan"""
for testfile in self.flist:
rep_start = testfile['report'].started
datefromrep = datetime.strptime(testfile['rep_start'],
'%a %b %d %H:%M:%S %Y')
self.assertEqual(rep_start, datefromrep)
def test_endtime(self):
"""Test the endtime of the scan"""
for testfile in self.flist:
rep_end = testfile['report'].endtime
expected = datetime.strptime(testfile['rep_end'],
'%a %b %d %H:%M:%S %Y')
err_msg = "In file %s expected : %s value : %s " % (testfile['file'],
expected,
rep_end)
self.assertEqual(rep_end, expected, err_msg)
def test_summary(self):
""""""
def test_elapsed(self):
"""test the difference between end and start time"""
for testfile in self.flist:
value = testfile['report'].endtime - testfile['report'].started
end = datetime.strptime(testfile['rep_end'], '%a %b %d %H:%M:%S %Y')
start = datetime.strptime(testfile['rep_start'], '%a %b %d %H:%M:%S %Y')
expected = end - start
err_msg = "In file %s expected : %s value : %s " % (testfile['file'],
expected,
value)
self.assertEqual(value, expected, err_msg)
def test_hosts_total(self):
"""Return the number of host in the report"""
for testfile in self.flist:
value = testfile['report'].hosts_total
expected = testfile['hosts']
err_msg = "In file %s expected : %s value : %s " % (testfile['file'],
expected,
value)
self.assertEqual(value, expected, err_msg)
| 39.739583
| 84
| 0.516645
|
4a166c11f5e913adff19743983c9c187190f136b
| 2,950
|
py
|
Python
|
gridnetwork/workers/worker.py
|
shashigharti/PyGridNetwork
|
b69c0988031177c39f555d6e561ecdf6661e5d56
|
[
"Apache-2.0"
] | null | null | null |
gridnetwork/workers/worker.py
|
shashigharti/PyGridNetwork
|
b69c0988031177c39f555d6e561ecdf6661e5d56
|
[
"Apache-2.0"
] | null | null | null |
gridnetwork/workers/worker.py
|
shashigharti/PyGridNetwork
|
b69c0988031177c39f555d6e561ecdf6661e5d56
|
[
"Apache-2.0"
] | 1
|
2021-07-06T04:32:24.000Z
|
2021-07-06T04:32:24.000Z
|
import time
import json
import requests
import re
import asyncio
from ..codes import MSG_FIELD, GRID_EVENTS, NODE_EVENTS, WORKER_PROPERTIES
from ..utils.wrappers import threaded
class Worker(object):
"""Worker class for running PySyft models for training and inference."""
def __init__(self, id: str, socket):
"""
Args:
id: ID of the worker.
socket: Socket descriptor used to send/receive messages.
"""
self._id = id
self._socket = socket
self._ping = 0
self._status = WORKER_PROPERTIES.ONLINE
self.connected_nodes = {}
self.hosted_models = {}
self.hosted_datasets = {}
self.cpu_percent = 0
self.mem_usage = 0
@property
def status(self):
"""str: Return the status of the Worker instance."""
if not self._socket:
return WORKER_PROPERTIES.OFFLINE
elif self._ping < WORKER_PROPERTIES.PING_THRESHOLD:
return WORKER_PROPERTIES.ONLINE
else:
return WORKER_PROPERTIES.BUSY
@property
def address(self):
"""str: Return the address of the Worker instance."""
if self._socket:
addr = self._socket.environ["REMOTE_ADDR"]
return re.sub("[:f]", "", addr)
@property
def location(self):
""":obj:`dict` of :obj:`str`: Return the location of the Worker instance."""
if self.address:
url = "http://ip-api.com/json/{}".format(self.address)
r = requests.get(url)
result = json.loads(r.text)
if result["status"] == "success":
return {
"region": result["regionName"],
"country": result["country"],
"city": result["city"],
}
else:
return {}
def send(self, message):
"""Send a message from the Worker instance."""
self._socket.send(message)
# Run it in a different thread
@threaded
def monitor(self):
"""Monitor the worker and send JSON message across the network."""
while self._socket:
self.__begin = time.time()
self._socket.send(json.dumps({MSG_FIELD.TYPE: NODE_EVENTS.MONITOR}))
time.sleep(WORKER_PROPERTIES.HEALTH_CHECK_INTERVAL)
def update_node_infos(self, message):
"""
Update information for the connected nodes, hosted models and datasets as well as
information on CPU and memory usage.
"""
if self.__begin:
end = time.time()
self._ping = (end - self.__begin) * 1000
self.connected_nodes = message[MSG_FIELD.NODES]
self.hosted_models = message[MSG_FIELD.MODELS]
self.hosted_datasets = message[MSG_FIELD.DATASETS]
self.cpu_percent = message[MSG_FIELD.CPU]
self.mem_usage = message[MSG_FIELD.MEM_USAGE]
| 33.522727
| 89
| 0.586102
|
4a166c35715ae8e7234db657dd25d606b0e56041
| 4,066
|
py
|
Python
|
nuplan/planning/metrics/utils/expert_comparisons.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128
|
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/planning/metrics/utils/expert_comparisons.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28
|
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/planning/metrics/utils/expert_comparisons.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14
|
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
from typing import List, Optional, Union
import numpy as np
import numpy.typing as npt
from nuplan.common.actor_state.ego_state import EgoState
from nuplan.common.actor_state.state_representation import Point2D, StateSE2
from nuplan.common.geometry.compute import principal_value
from nuplan.planning.metrics.utils.state_extractors import calculate_ego_progress_to_goal
def compute_traj_errors(
ego_traj: Union[List[Point2D], List[StateSE2]],
expert_traj: Union[List[Point2D], List[StateSE2]],
discount_factor: float = 1.0,
heading_diff_weight: float = 1.0,
) -> npt.NDArray: # type:ignore
"""
Compute the errors between the position/position_with_yaw of ego trajectory and expert trajectory
:param ego_traj: a list of Point2D or StateSE2 that describe ego position/position with yaw
:param expert_traj: a list of Point2D or StateSE2 that describe expert position/position with yaw
:param discount_factor: Displacements corresponding to the k^th timestep will
be discounted by a factor of discount_factor^k., defaults to 1.0
:param heading_diff_weight: factor to weight heading differences if yaw errors are also
considered, defaults to 1.0
:return array of errors.
"""
traj_len = len(ego_traj)
expert_traj_len = len(expert_traj)
assert traj_len != 0, "ego_traj should be a nonempty list"
assert (
traj_len == expert_traj_len or traj_len == expert_traj_len - 1
), "ego and expert have different trajectory lengths"
# Compute the differences
displacements = np.zeros((traj_len, 2))
for i in range(traj_len):
displacements[i, :] = [ego_traj[i].x - expert_traj[i].x, ego_traj[i].y - expert_traj[i].y]
dist_seq = np.hypot(displacements[:, 0], displacements[:, 1])
if isinstance(ego_traj[0], StateSE2) and isinstance(expert_traj[0], StateSE2) and heading_diff_weight != 0:
yaw_displacements: npt.NDArray[np.float32] = np.array(
[ego_traj[i].heading - expert_traj[i].heading for i in range(traj_len)]
)
heading_errors = np.abs(principal_value(yaw_displacements))
weighted_heading_errors = heading_errors * heading_diff_weight
dist_seq = dist_seq + weighted_heading_errors
# Discount the errors in time
if discount_factor != 1:
discount_weights = get_discount_weights(discount_factor=discount_factor, traj_len=traj_len)
dist_seq = np.multiply(dist_seq, discount_weights)
return dist_seq # type:ignore
def get_discount_weights(
discount_factor: float, traj_len: int, num_trajs: int = 1
) -> Optional[npt.NDArray[np.float32]]:
"""
Return the trajectory discount weight array if applicable
:param discount_factor: the discount factor by which the displacements corresponding to the k^th timestep will
be discounted
:param traj_len: len of traj
:param optional num_trajs: num of ego trajs, default is set to 1, but it's generalized in case we need to
compare multiple ego trajs with expert
:return array of discount_weights.
"""
discount_weights = None
if discount_factor != 1.0:
# Compute discount_factors
pow_arr = np.tile(np.arange(traj_len), (num_trajs, 1)) # type:ignore
discount_weights = np.power(discount_factor, pow_arr)
return discount_weights
def calculate_relative_progress_to_goal(
ego_states: List[EgoState], expert_states: List[EgoState], goal: StateSE2, tolerance: float = 0.1
) -> float:
"""
Ratio of ego's to the expert's progress towards goal rounded up
:param ego_states: A list of ego states
:param expert_states: A list of expert states
:param goal: goal
:param tolerance: tolerance used for round up
:return Ratio of progress towards goal.
"""
ego_progress_value = calculate_ego_progress_to_goal(ego_states, goal)
expert_progress_value = calculate_ego_progress_to_goal(expert_states, goal)
relative_progress: float = max(tolerance, ego_progress_value) / max(tolerance, expert_progress_value)
return relative_progress
| 43.255319
| 114
| 0.734629
|
4a166dd66943484428f2df3dc9e6663e8e496914
| 1,199
|
py
|
Python
|
packages/ekstep_pipelines_tests/common/audio_commons/transcription_clients_tests/ekstepmodel_transcription_client_tests.py
|
jeevan-revaneppa-hirethanad/audio-to-speech-pipeline
|
a5bd7f0321834507e4157eb1aea8659cd205bf1c
|
[
"MIT"
] | 23
|
2021-03-20T13:24:37.000Z
|
2022-03-26T19:02:33.000Z
|
packages/ekstep_pipelines_tests/common/audio_commons/transcription_clients_tests/ekstepmodel_transcription_client_tests.py
|
jeevan-revaneppa-hirethanad/audio-to-speech-pipeline
|
a5bd7f0321834507e4157eb1aea8659cd205bf1c
|
[
"MIT"
] | 10
|
2021-04-06T14:00:35.000Z
|
2022-03-16T12:27:13.000Z
|
packages/ekstep_pipelines_tests/common/audio_commons/transcription_clients_tests/ekstepmodel_transcription_client_tests.py
|
jeevan-revaneppa-hirethanad/audio-to-speech-pipeline
|
a5bd7f0321834507e4157eb1aea8659cd205bf1c
|
[
"MIT"
] | 16
|
2021-03-30T10:57:34.000Z
|
2022-03-23T01:07:19.000Z
|
import unittest
from unittest import mock
from unittest.mock import Mock
from ekstep_data_pipelines.common.audio_commons.transcription_clients.azure_transcription_client import (
EkstepTranscriptionClient,
)
class TestEkstepTranscriptionClient(unittest.TestCase):
def setUp(self):
super(TestEkstepTranscriptionClient, self).setUp()
config = {"server_host": '127.0.0.1', "port": '50051', "language": "hi"}
self.ekstep_client = EkstepTranscriptionClient(**config)
@mock.patch("pickle.dump")
def test_call_speech_to_text_ekstep(self, mock_dump):
mock_client = Mock()
self.ekstep_client.client = mock_client
mock_new_result = Mock()
mock_client.recognize.return_value = mock_new_result
mock_new_result.transcript = (
" कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।"
)
actual_result = self.ekstep_client.generate_transcription(
"test_language", "input_file_path"
)
self.assertEqual(mock_client.recognize.call_count, 1)
self.assertEqual(
actual_result, " कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।"
)
| 29.243902
| 105
| 0.659716
|
4a166f014aed87519bb8382d268c82a891d8a838
| 32,296
|
py
|
Python
|
papers/alt-ed-prestige/data/analysis_1_vars.py
|
Vandivier/research-dissertation-case-for-alt-ed
|
58907cb10ceadec981beba15077d4c6e939307ec
|
[
"MIT"
] | null | null | null |
papers/alt-ed-prestige/data/analysis_1_vars.py
|
Vandivier/research-dissertation-case-for-alt-ed
|
58907cb10ceadec981beba15077d4c6e939307ec
|
[
"MIT"
] | 2
|
2022-01-13T04:03:23.000Z
|
2022-03-12T01:02:54.000Z
|
papers/alt-ed-prestige/data/analysis_1_vars.py
|
Vandivier/research-dissertation-case-for-alt-ed
|
58907cb10ceadec981beba15077d4c6e939307ec
|
[
"MIT"
] | null | null | null |
# ref: alt-ed-covid-2...analysis_1_vars_and_regression.py
# ref: alt-ed-matching-effects-2...analysis_1_vars_and_regression.py
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from statsmodels.iolib.summary2 import summary_col
def fsImproveProviderNames(sColName):
sMassagedName = sColName
sMassagedName = sMassagedName.replace('provider_hirability_1', 'provider_hirability_b_nacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_1', 'provider_impressed_b_nacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_2', 'provider_hirability_c_nacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_2', 'provider_impressed_c_nacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_3', 'provider_hirability_d_nacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_3', 'provider_impressed_d_nacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_4', 'provider_hirability_e_yacc_nself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_4', 'provider_impressed_e_yacc_nself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_5', 'provider_hirability_f_yacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_5', 'provider_impressed_f_yacc_nself_yother')
sMassagedName = sMassagedName.replace('provider_hirability_6', 'provider_hirability_g_yacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_impressed_6', 'provider_impressed_g_yacc_yself_nother')
sMassagedName = sMassagedName.replace('provider_hirability_7', 'provider_hirability_h_yacc_yself_yother')
sMassagedName = sMassagedName.replace('provider_impressed_7', 'provider_impressed_h_yacc_yself_yother')
return sMassagedName
def fsReformatColumnNames(sColName):
sMassagedName = sColName.replace(',', '').replace(
' ', '_').replace('-', '_').replace('>', '').replace('+', '').lower()
sMassagedName = sMassagedName.replace('?', '')
sMassagedName = sMassagedName.replace('(', '')
sMassagedName = sMassagedName.replace(')', '')
sMassagedName = sMassagedName.replace('.', '_')
sMassagedName = sMassagedName.replace('_/_', '_')
sMassagedName = sMassagedName.replace('__', '_')
sMassagedName = sMassagedName.replace('how_impressed_would_you_be_if_you_heard_that_someone_studied_at_this_school', 'provider_impressed')
sMassagedName = sMassagedName.replace('how_impressed_would_you_be_if_you_heard_that_someone_studied_at_', 'provider_impressed_')
sMassagedName = sMassagedName.replace('for_many_professions_learning_at_this_school_can_qualify_a_person_for_an_entry_level_position', 'provider_hirability')
return sMassagedName
def getData(dropFirstDummy=True):
df = pd.read_csv('prestige-hidden.csv')
# ref: https://stackoverflow.com/a/51428632/3931488
print(df.columns)
# df.replace(to_replace="Not employed at present", value="a", inplace=True)
# df.replace(to_replace="I usually spend more time with customers and external business partners than with coworkers.", value="b", inplace=True)
# df.replace(to_replace="I usually spend at least an hour each day with customers and external business partners.", value="c", inplace=True)
# df.replace(to_replace="I usually spend less than an hour each day in direct contact with customers and external business partners.", value="d", inplace=True)
# df = df.replace("Not employed at present", "a")
# df = df.replace("I usually spend more time with customers and external business partners than with coworkers.", "b")
# df = df.replace("I usually spend at least an hour each day with customers and external business partners.", "c")
# df = df.replace("I usually spend less than an hour each day in direct contact with customers and external business partners.", "d")
df.rename(columns={
"Do you contribute to hiring and firing decisions at your company?": "manager_effects",
"For many professions, alternative credentials can qualify a person for an entry-level position.": "baseline_hirability", # aka favorability
"It will soon become fairly conventional for high school graduates to obtain alternative credentials instead of going to college.": "conventional_alt_creds",
"It will soon become common for high school graduates to obtain alternative credentials instead of going to college.": "conventional_alt_creds",
"When you add up the pros and cons for online education, it's probably a good thing for society overall.": "favor_online_ed",
"Which of these industries most closely matches your profession?": "industry",
"Gender?": "gender",
"Household Income?": "income",
"Age?": "age",
"What is the highest level of education you have completed?": "education",
"Which race/ethnicity best describes you? (Please choose only one.) ": "ethnicity",
"What state do you reside in?": "state",
"What is the name of a reputable certification or non-college credential in your profession? Use “n/a” if nothing comes to mind.": "named_credential",
"I prefer to hire or work with a person that has a college degree rather a person that holds a reputable certification or non-college credential.": "cat_prefer_degree",
"Do you tend to work more closely with coworkers at your company or customers and external business partners?": "work_with_external_partners",
}, inplace=True)
# get dummies ref: https://stackoverflow.com/questions/55738056/using-categorical-variables-in-statsmodels-ols-class
df = pd.get_dummies(df, columns=['manager_effects'])
df = pd.get_dummies(df, columns=['industry']).rename(
fsReformatColumnNames, axis='columns')
df = pd.get_dummies(df, columns=['income'])
df = pd.get_dummies(df, columns=['age'])
df = pd.get_dummies(df, columns=['education'])
df = pd.get_dummies(df, columns=['ethnicity'])
df = pd.get_dummies(df, columns=['state'])
df = pd.get_dummies(df, columns=['gender'])
df = pd.get_dummies(df, columns=['cat_prefer_degree'])
df['cat_work_with_external_partners_a'] = df['work_with_external_partners'].str.contains('Not employed at present')
df['cat_work_with_external_partners_b'] = df['work_with_external_partners'].str.contains('I usually spend more time with customers and external business partners than with coworkers.')
df['cat_work_with_external_partners_c'] = df['work_with_external_partners'].str.contains('I usually spend at least an hour each day with customers and external business partners.')
df['cat_work_with_external_partners_d'] = df['work_with_external_partners'].str.contains('I usually spend less than an hour each day in direct contact with customers and external business partners.')
df = df.rename(fsReformatColumnNames, axis='columns')
if dropFirstDummy:
df.drop(columns=['manager_effects_not_employed_at_present', 'industry_agriculture', 'income_prefer_not_to_answer',
'age_60', 'education_ged', 'ethnicity_american_indian_or_alaskan_native',
'state_alabama', 'gender_other'])
# help build long model formula
print(" + ".join(list(df.columns)))
# custom vars
# ref: analysis_2_summary_prestige.py
df['provider_hirability_a_nacc_nself_nother'] = df['provider_hirability_']
df['provider_impressed_a_nacc_nself_nother'] = df['provider_impressed']
df.drop(columns=['provider_hirability_', 'provider_impressed'])
df = df.rename(fsImproveProviderNames, axis='columns')
df['hireability_concrete_high_prestige'] = (df['provider_impressed_california_institute_of_technology'] + df['provider_impressed_university_of_chicago']
+ df['provider_impressed_app_academy'] + df['provider_impressed_general_assembly'] + df['provider_impressed_google'])/5
df['hireability_concrete_low_prestige'] = (df['provider_impressed_portland_state_university'] + df['provider_impressed_university_of_nebraska_omaha']
+ df['provider_impressed_fvi_school_of_technology'] + df['provider_impressed_bov_academy'])/4
df['hireability_vignette_high_prestige'] = (df['provider_hirability_d_nacc_yself_yother'] + df['provider_hirability_h_yacc_yself_yother'])/2
df['hireability_vignette_low_prestige'] = (df['provider_hirability_a_nacc_nself_nother'] + df['provider_hirability_e_yacc_nself_nother'])/2
df['hireability_total_high_prestige'] = (df['hireability_concrete_high_prestige'] + df['hireability_vignette_high_prestige'])/2
df['hireability_total_low_prestige'] = (df['hireability_concrete_low_prestige'] + df['hireability_vignette_low_prestige'])/2
df['hireability_delta_prestige'] = df['hireability_total_high_prestige'] - df['hireability_total_low_prestige']
df['hireability_concrete_accredited'] = (df['provider_impressed_california_institute_of_technology'] + df['provider_impressed_university_of_chicago']
+ df['provider_impressed_portland_state_university'] + df['provider_impressed_university_of_nebraska_omaha'])/4
df['hireability_concrete_unaccredited'] = (df['provider_impressed_app_academy'] + df['provider_impressed_general_assembly'] + df['provider_impressed_google']
+ df['provider_impressed_fvi_school_of_technology'] + df['provider_impressed_bov_academy'])/5
df['hireability_vignette_accredited'] = (df['provider_hirability_e_yacc_nself_nother'] + df['provider_hirability_h_yacc_yself_yother'])/2
df['hireability_vignette_unaccredited'] = (df['provider_hirability_a_nacc_nself_nother'] + df['provider_hirability_d_nacc_yself_yother'])/2
df['hireability_total_accredited'] = (df['hireability_concrete_accredited'] + df['hireability_vignette_accredited'])/2
df['hireability_total_unaccredited'] = (df['hireability_concrete_unaccredited'] + df['hireability_vignette_unaccredited'])/2
df['hireability_delta_accreditation'] = df['hireability_total_accredited'] - df['hireability_total_unaccredited']
# this factor is same as revealed_preference_cat_prefer_degree but no google
# from a consumer process perspective, the user picks a top-knotch bootcamp on an aggregator site (rating > 4/5 w 100+ reviews)
df['prefer_alt_cred_revealed_high_v_low_no_goog'] = df.eval('False'
+ ' or provider_impressed_app_academy > provider_impressed_portland_state_university'
+ ' or provider_impressed_app_academy > provider_impressed_university_of_nebraska_omaha'
+ ' or provider_impressed_general_assembly > provider_impressed_portland_state_university'
+ ' or provider_impressed_general_assembly > provider_impressed_university_of_nebraska_omaha'
+ '').astype(int)
# # this is a revealed or ecological preference
# # it is more clear than the plain cat_prefer_degree_false
# # they can also cross-reference each other as a robustness check
# # this permissive check is 'if any high-quality alt cred is preferred to any low quality school'
# consumer process perspective, user takes the best creds even outside of an aggregator; more difficult;
# can be accomplished by consulting hiring managers, industry employees, other experts, and extensive research
df['prefer_alt_cred_revealed_high_v_low'] = df.eval('prefer_alt_cred_revealed_high_v_low_no_goog'
+ ' or provider_impressed_google > provider_impressed_portland_state_university'
+ ' or provider_impressed_google > provider_impressed_university_of_nebraska_omaha'
+ '').astype(int)
# high v high demonstrates that some employers, although rare, prefer creds to prestigious universities
# from a functional perspective this supports the 'job search as a numbers game' process
# the candidate must consult a large number of employers and will have a low chance of success, but eventually find high employer support
df['prefer_alt_cred_revealed_high_v_high'] = df.eval('False'
+ ' or provider_impressed_app_academy > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_app_academy > provider_impressed_university_of_chicago'
+ ' or provider_impressed_general_assembly > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_general_assembly > provider_impressed_university_of_chicago'
+ ' or provider_impressed_google > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_google > provider_impressed_university_of_chicago'
+ '').astype(int)
# high v high no goog reflects process just aggregator consult
df['prefer_alt_cred_revealed_high_v_high_no_goog'] = df.eval('False'
+ ' or provider_impressed_app_academy > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_app_academy > provider_impressed_university_of_chicago'
+ ' or provider_impressed_general_assembly > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_general_assembly > provider_impressed_university_of_chicago'
+ '').astype(int)
# # this is even more permissive; if any alt cred is preferred to any school
# # why might this happen?
# # 1. general distaste for accredited education or occasional distaste for a specific school
# # 2. general favorability to alternative credentials or occasional high favor to specific credentials (esp google)
# # 3. improper distorted perception; eg naming effects appear important.
# # eg many ppl highly rated p_fvi_school_of_technology - seems due to naming effects; caltech improperly higher than chicago too
df['prefer_alt_cred_revealed'] = df.eval('False'
+ ' or provider_impressed_google > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_google > provider_impressed_university_of_chicago'
+ ' or provider_impressed_google > provider_impressed_portland_state_university'
+ ' or provider_impressed_google > provider_impressed_university_of_nebraska_omaha'
+ ''
+ ' or provider_impressed_app_academy > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_app_academy > provider_impressed_university_of_chicago'
+ ' or provider_impressed_app_academy > provider_impressed_portland_state_university'
+ ' or provider_impressed_app_academy > provider_impressed_university_of_nebraska_omaha'
+ ''
+ ' or provider_impressed_general_assembly > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_general_assembly > provider_impressed_university_of_chicago'
+ ' or provider_impressed_general_assembly > provider_impressed_portland_state_university'
+ ' or provider_impressed_general_assembly > provider_impressed_university_of_nebraska_omaha'
+ ''
+ ' or provider_impressed_bov_academy > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_bov_academy > provider_impressed_university_of_chicago'
+ ' or provider_impressed_bov_academy > provider_impressed_portland_state_university'
+ ' or provider_impressed_bov_academy > provider_impressed_university_of_nebraska_omaha'
+ ''
+ ' or provider_impressed_fvi_school_of_technology > provider_impressed_california_institute_of_technology'
+ ' or provider_impressed_fvi_school_of_technology > provider_impressed_university_of_chicago'
+ ' or provider_impressed_fvi_school_of_technology > provider_impressed_portland_state_university'
+ ' or provider_impressed_fvi_school_of_technology > provider_impressed_university_of_nebraska_omaha'
+ '').astype(int)
df['prefer_alt_cred_espoused_weakly'] = df.eval('cat_prefer_degree_false == 1').astype(int)
# TODO: maybe booleanize heard of vars and create a sum var
print("---")
print("done getting data")
print("---")
return df
# https://en.wikipedia.org/wiki/Panel_data
# this is basically wrangling a cross-sectional panel
def getPanelizedData():
df = getData()
# section 1 hireability on alternative credentials in general is_unaccredited,
# and this is communicated to respondents.
# i expect/hope is_reiterated_unaccredited is insignificant; that is in section 2, 3
dfNew = df[0:0]
# baseline_hirability = hirability for non-actual & non-vignette (eg, baseline) records
dfNew['hirability'] = []
dfNew['is_concrete'] = []
dfNew['is_vignette'] = []
dfNew['is_accredited'] = []
dfNew['is_reiterated_unaccredited'] = []
dfNew['prestige_other'] = []
dfNew['prestige_own'] = []
dfNew['is_high_other_prestige'] = []
dfNew['is_high_own_prestige'] = []
dfNew['is_high_prestige'] = []
dfNew['is_low_other_prestige'] = []
dfNew['is_low_own_prestige'] = []
dfNew['is_low_prestige'] = []
# TODO: I'm not using name recognition for now, but I will check it out as a follow-on study
# OR, if current study doesn't present expected result (prestige ~=||> accreditation)
# TODO: future study, external quality data; name recognition and quality for when is_concrete
# TODO: future study, internal name recognition (heard of) instead of external name recognition
# TODO: future study, are own prestige, perceived other prestige, and actual other prestige correlated? shouldn't they be in an efficient economy?
# internal_name_recognition
# external_name_recognition
# Each raw response is folded into 18 cross-sectional panel observations.
# TODO: I guess itertouples is preferred but having trouble doing that or seeing perf benefit w named tuples
for index, row in df.iterrows():
observationSectionOne = row.copy()
observationSectionOne.at['is_concrete'] = 0
observationSectionOne.at['is_vignette'] = 0
observationSectionOne.at['is_accredited'] = 0
observationSectionOne.at['is_reiterated_unaccredited'] = 0
observationSectionOne.at['hirability'] = observationSectionOne.at['baseline_hirability']
observationCalTech = row.copy()
observationCalTech.at['is_concrete'] = 1
observationCalTech.at['is_vignette'] = 0
observationCalTech.at['is_accredited'] = 1
observationCalTech.at['is_reiterated_unaccredited'] = 0
observationCalTech.at['is_stipulated_other_impressed'] = 1
observationCalTech.at['is_stipulated_self_impressed'] = 1
observationCalTech.at['prestige_own'] = observationCalTech.at['provider_impressed_california_institute_of_technology']
observationChicago = row.copy()
observationChicago.at['is_concrete'] = 1
observationChicago.at['is_vignette'] = 0
observationChicago.at['is_accredited'] = 1
observationChicago.at['is_reiterated_unaccredited'] = 0
observationChicago.at['is_stipulated_other_impressed'] = 1
observationChicago.at['is_stipulated_self_impressed'] = 1
observationChicago.at['prestige_own'] = observationChicago.at['provider_impressed_university_of_chicago']
observationPsu = row.copy()
observationPsu.at['is_concrete'] = 1
observationPsu.at['is_vignette'] = 0
observationPsu.at['is_accredited'] = 1
observationPsu.at['is_reiterated_unaccredited'] = 0
observationPsu.at['is_stipulated_other_impressed'] = 0
observationPsu.at['is_stipulated_self_impressed'] = 0
observationPsu.at['prestige_own'] = observationPsu.at['provider_impressed_portland_state_university']
observationUno = row.copy()
observationUno.at['is_concrete'] = 1
observationUno.at['is_vignette'] = 0
observationUno.at['is_accredited'] = 1
observationUno.at['is_reiterated_unaccredited'] = 0
observationUno.at['is_stipulated_other_impressed'] = 0
observationUno.at['is_stipulated_self_impressed'] = 0
observationUno.at['prestige_own'] = observationUno.at['provider_impressed_university_of_nebraska_omaha']
observationAppAcademy = row.copy()
observationAppAcademy.at['is_concrete'] = 1
observationAppAcademy.at['is_vignette'] = 0
observationAppAcademy.at['is_accredited'] = 0
observationAppAcademy.at['is_reiterated_unaccredited'] = 1
observationAppAcademy.at['is_stipulated_other_impressed'] = 1
observationAppAcademy.at['is_stipulated_self_impressed'] = 1
observationAppAcademy.at['prestige_own'] = observationAppAcademy.at['provider_impressed_app_academy']
observationGenAssembly = row.copy()
observationGenAssembly.at['is_concrete'] = 1
observationGenAssembly.at['is_vignette'] = 0
observationGenAssembly.at['is_accredited'] = 0
observationGenAssembly.at['is_reiterated_unaccredited'] = 1
observationGenAssembly.at['is_stipulated_other_impressed'] = 1
observationGenAssembly.at['is_stipulated_self_impressed'] = 1
observationGenAssembly.at['prestige_own'] = observationGenAssembly.at['provider_impressed_general_assembly']
observationFviTech = row.copy()
observationFviTech.at['is_concrete'] = 1
observationFviTech.at['is_vignette'] = 0
observationFviTech.at['is_accredited'] = 0
observationFviTech.at['is_reiterated_unaccredited'] = 1
observationFviTech.at['is_stipulated_other_impressed'] = 0
observationFviTech.at['is_stipulated_self_impressed'] = 0
observationFviTech.at['prestige_own'] = observationFviTech.at['provider_impressed_fvi_school_of_technology']
observationBov = row.copy()
observationBov.at['is_concrete'] = 1
observationBov.at['is_vignette'] = 0
observationBov.at['is_accredited'] = 0
observationBov.at['is_reiterated_unaccredited'] = 1
observationBov.at['is_stipulated_other_impressed'] = 0
observationBov.at['is_stipulated_self_impressed'] = 0
observationBov.at['prestige_own'] = observationBov.at['provider_impressed_bov_academy']
observationGoogle = row.copy()
observationGoogle.at['is_concrete'] = 1
observationGoogle.at['is_vignette'] = 0
observationGoogle.at['is_accredited'] = 0
observationGoogle.at['is_reiterated_unaccredited'] = 1
observationGoogle.at['is_stipulated_other_impressed'] = 1
observationGoogle.at['is_stipulated_self_impressed'] = 1
observationGoogle.at['prestige_own'] = observationGoogle.at['provider_impressed_google']
observation_a_nacc_nself_nother = row.copy()
observation_a_nacc_nself_nother.at['is_concrete'] = 0
observation_a_nacc_nself_nother.at['is_vignette'] = 1
observation_a_nacc_nself_nother.at['is_accredited'] = 0
observation_a_nacc_nself_nother.at['is_reiterated_unaccredited'] = 1
observation_a_nacc_nself_nother.at['is_stipulated_other_impressed'] = 0
observation_a_nacc_nself_nother.at['is_stipulated_self_impressed'] = 0
observation_a_nacc_nself_nother.at['prestige_own'] = observation_a_nacc_nself_nother.at['provider_impressed_a_nacc_nself_nother']
observation_a_nacc_nself_nother.at['hirability'] = observation_a_nacc_nself_nother.at['provider_hirability_a_nacc_nself_nother']
observation_b_nacc_nself_yother = row.copy()
observation_b_nacc_nself_yother.at['is_concrete'] = 0
observation_b_nacc_nself_yother.at['is_vignette'] = 1
observation_b_nacc_nself_yother.at['is_accredited'] = 0
observation_b_nacc_nself_yother.at['is_reiterated_unaccredited'] = 1
observation_b_nacc_nself_yother.at['is_stipulated_other_impressed'] = 1
observation_b_nacc_nself_yother.at['is_stipulated_self_impressed'] = 0
observation_b_nacc_nself_yother.at['prestige_own'] = observation_b_nacc_nself_yother.at['provider_impressed_b_nacc_nself_yother']
observation_b_nacc_nself_yother.at['hirability'] = observation_b_nacc_nself_yother.at['provider_hirability_b_nacc_nself_yother']
observation_c_nacc_yself_nother = row.copy()
observation_c_nacc_yself_nother.at['is_concrete'] = 0
observation_c_nacc_yself_nother.at['is_vignette'] = 1
observation_c_nacc_yself_nother.at['is_accredited'] = 0
observation_c_nacc_yself_nother.at['is_reiterated_unaccredited'] = 1
observation_c_nacc_yself_nother.at['is_stipulated_other_impressed'] = 0
observation_c_nacc_yself_nother.at['is_stipulated_self_impressed'] = 1
observation_c_nacc_yself_nother.at['prestige_own'] = observation_c_nacc_yself_nother.at['provider_impressed_c_nacc_yself_nother']
observation_c_nacc_yself_nother.at['hirability'] = observation_c_nacc_yself_nother.at['provider_hirability_c_nacc_yself_nother']
observation_d_nacc_yself_nother = row.copy()
observation_d_nacc_yself_nother.at['is_concrete'] = 0
observation_d_nacc_yself_nother.at['is_vignette'] = 1
observation_d_nacc_yself_nother.at['is_accredited'] = 0
observation_d_nacc_yself_nother.at['is_reiterated_unaccredited'] = 1
observation_d_nacc_yself_nother.at['is_stipulated_other_impressed'] = 1
observation_d_nacc_yself_nother.at['is_stipulated_self_impressed'] = 1
observation_d_nacc_yself_nother.at['prestige_own'] = observation_d_nacc_yself_nother.at['provider_impressed_d_nacc_yself_yother']
observation_d_nacc_yself_nother.at['hirability'] = observation_d_nacc_yself_nother.at['provider_hirability_d_nacc_yself_yother']
observation_e_yacc_nself_nother = row.copy()
observation_e_yacc_nself_nother.at['is_concrete'] = 0
observation_e_yacc_nself_nother.at['is_vignette'] = 1
observation_e_yacc_nself_nother.at['is_accredited'] = 1
observation_e_yacc_nself_nother.at['is_reiterated_unaccredited'] = 0
observation_e_yacc_nself_nother.at['is_stipulated_other_impressed'] = 0
observation_e_yacc_nself_nother.at['is_stipulated_self_impressed'] = 0
observation_e_yacc_nself_nother.at['prestige_own'] = observation_e_yacc_nself_nother.at['provider_impressed_e_yacc_nself_nother']
observation_e_yacc_nself_nother.at['hirability'] = observation_e_yacc_nself_nother.at['provider_hirability_e_yacc_nself_nother']
observation_f_yacc_nself_yother = row.copy()
observation_f_yacc_nself_yother.at['is_concrete'] = 0
observation_f_yacc_nself_yother.at['is_vignette'] = 1
observation_f_yacc_nself_yother.at['is_accredited'] = 1
observation_f_yacc_nself_yother.at['is_reiterated_unaccredited'] = 0
observation_f_yacc_nself_yother.at['is_stipulated_other_impressed'] = 1
observation_f_yacc_nself_yother.at['is_stipulated_self_impressed'] = 0
observation_f_yacc_nself_yother.at['prestige_own'] = observation_f_yacc_nself_yother.at['provider_impressed_f_yacc_nself_yother']
observation_f_yacc_nself_yother.at['hirability'] = observation_f_yacc_nself_yother.at['provider_hirability_f_yacc_nself_yother']
observation_g_yacc_yself_nother = row.copy()
observation_g_yacc_yself_nother.at['is_concrete'] = 0
observation_g_yacc_yself_nother.at['is_vignette'] = 1
observation_g_yacc_yself_nother.at['is_accredited'] = 1
observation_g_yacc_yself_nother.at['is_reiterated_unaccredited'] = 0
observation_g_yacc_yself_nother.at['is_stipulated_other_impressed'] = 0
observation_g_yacc_yself_nother.at['is_stipulated_self_impressed'] = 1
observation_g_yacc_yself_nother.at['prestige_own'] = observation_g_yacc_yself_nother.at['provider_impressed_g_yacc_yself_nother']
observation_g_yacc_yself_nother.at['hirability'] = observation_g_yacc_yself_nother.at['provider_hirability_g_yacc_yself_nother']
observation_h_yacc_yself_yother = row.copy()
observation_h_yacc_yself_yother.at['is_concrete'] = 0
observation_h_yacc_yself_yother.at['is_vignette'] = 1
observation_h_yacc_yself_yother.at['is_accredited'] = 1
observation_h_yacc_yself_yother.at['is_reiterated_unaccredited'] = 0
observation_h_yacc_yself_yother.at['is_stipulated_other_impressed'] = 1
observation_h_yacc_yself_yother.at['is_stipulated_self_impressed'] = 1
observation_h_yacc_yself_yother.at['prestige_own'] = observation_h_yacc_yself_yother.at['provider_impressed_h_yacc_yself_yother']
observation_h_yacc_yself_yother.at['hirability'] = observation_h_yacc_yself_yother.at['provider_hirability_h_yacc_yself_yother']
newRows = [observationSectionOne, observationCalTech, observationChicago, observationPsu, observationUno,
observationAppAcademy, observationGenAssembly, observationFviTech, observationBov, observationGoogle,
observation_a_nacc_nself_nother, observation_b_nacc_nself_yother, observation_c_nacc_yself_nother, observation_d_nacc_yself_nother,
observation_e_yacc_nself_nother, observation_f_yacc_nself_yother, observation_g_yacc_yself_nother, observation_h_yacc_yself_yother]
dfNew = dfNew.append(newRows, ignore_index=True)
# TODO: del column, don't drop https://stackoverflow.com/questions/13411544/delete-column-from-pandas-dataframe
dfNew.drop(columns=[
'provider_impressed_california_institute_of_technology',
'provider_hirability_a_nacc_nself_nother', 'provider_impressed_a_nacc_nself_nother',
'provider_hirability_b_nacc_nself_yother', 'provider_impressed_b_nacc_nself_yother',
'provider_hirability_c_nacc_yself_nother', 'provider_impressed_c_nacc_yself_nother',
'provider_hirability_d_nacc_yself_yother', 'provider_impressed_d_nacc_yself_yother',
'provider_hirability_e_yacc_nself_nother', 'provider_impressed_e_yacc_nself_nother',
'provider_hirability_f_yacc_nself_yother', 'provider_impressed_f_yacc_nself_yother',
'provider_hirability_g_yacc_yself_nother', 'provider_impressed_g_yacc_yself_nother',
'provider_hirability_h_yacc_yself_yother', 'provider_impressed_h_yacc_yself_yother',
])
# dfNew['is_high_prestige'] = dfNew['is_high_other_prestige'] * dfNew['is_high_prestige']
# dfNew['is_low_prestige'] = dfNew['is_low_other_prestige'] * dfNew['is_low_prestige']
dfNew['is_high_prestige'] = dfNew['is_stipulated_other_impressed'] * dfNew['is_stipulated_self_impressed']
# dfNew['is_crude_aggregated_prestige'] = dfNew['is_stipulated_other_impressed'] + dfNew['is_stipulated_self_impressed']
dfNew['is_low_prestige'] = (dfNew['is_stipulated_other_impressed'] == 0) & (dfNew['is_stipulated_self_impressed'] == 0)
dfNew['is_low_context_and_accredited'] = dfNew['is_low_context'] * dfNew['is_accredited']
dfNew['is_low_context_and_fvi'] = dfNew['is_low_context'] * dfNew['provider_impressed_fvi_school_of_technology']
print('dfNew len = ' + str(len(dfNew.index)))
print('---')
return dfNew
def getVignetteData(df: pd.DataFrame = None) -> pd.DataFrame:
if df is None:
df = getPanelizedData()
dfNew = df[df.is_vignette == 1]
dfNew = dfNew[dfNew.hirability > 0]
dfNew = dfNew[dfNew.prestige_own > 0]
print('getVignetteData dfNew len = ' + str(len(dfNew.index)))
print('---')
return dfNew
def getConcreteData(df: pd.DataFrame = None) -> pd.DataFrame:
if df is None:
df = getPanelizedData()
dfNew = df[df.is_concrete == 1]
# uncomment below line to get 0 records; this is good bc concrete has no hirability
# dfNew = dfNew[dfNew.hirability > 0]
dfNew = dfNew[dfNew.prestige_own > 0]
print('getConcreteData dfNew len = ' + str(len(dfNew.index)))
print('---')
return dfNew
# if this file executed as script
# dump to file to assist validation
if __name__ == '__main__':
df = getPanelizedData()
df.to_csv('prestige-postprocess-hidden.csv')
| 67.4238
| 203
| 0.753437
|
4a1670262c718dec13595c07b3c6d75c404f93f0
| 375
|
py
|
Python
|
Strings/string_method_quiz_FIXME.py
|
lvolkmann/couch-to-coder-python-exercises
|
afecb696d93eead9ba50613dc0723f2eca92d11a
|
[
"MIT"
] | null | null | null |
Strings/string_method_quiz_FIXME.py
|
lvolkmann/couch-to-coder-python-exercises
|
afecb696d93eead9ba50613dc0723f2eca92d11a
|
[
"MIT"
] | null | null | null |
Strings/string_method_quiz_FIXME.py
|
lvolkmann/couch-to-coder-python-exercises
|
afecb696d93eead9ba50613dc0723f2eca92d11a
|
[
"MIT"
] | null | null | null |
# How do get rid of the white space around variable animal if animal = “ dog “?
animal = " dog "
# I have a long string of numbers separated by commas in variable x. I want to find the index of the first occurrence of 72. How?
x = "126354186372555"
#I want to figure out how many occurrences of 72 occur in a string? How would I do that?
string_name = "127212721272"
| 34.090909
| 129
| 0.722667
|
4a16707a376e93dca4a67a6779091eb06bac7d39
| 16,911
|
py
|
Python
|
pandas/core/internals/concat.py
|
r00ta/pandas
|
33f91d8f9f2e84f2b5f3ac3f0481b691c977c427
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2019-04-16T21:03:23.000Z
|
2021-05-08T13:25:44.000Z
|
pandas/core/internals/concat.py
|
chanson90/pandas
|
3f1e5940e3929577f094ea2708f94ee184e7a336
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/concat.py
|
chanson90/pandas
|
3f1e5940e3929577f094ea2708f94ee184e7a336
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# TODO: Needs a better name; too many modules are already called "concat"
from collections import defaultdict
import copy
import numpy as np
from pandas._libs import internals as libinternals, tslibs
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
_get_dtype, is_categorical_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_extension_array_dtype, is_float_dtype,
is_numeric_dtype, is_sparse, is_timedelta64_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos,
mgr.nblocks,
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit(object):
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '{name}({block!r}, {indexers})'.format(
name=self.__class__.__name__, block=self.block,
indexers=self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return _get_dtype(maybe_promote(self.block.dtype,
self.block.fill_value)[0])
@cache_readonly
def is_na(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif is_sparse(self.block.values.dtype):
return False
elif self.block.is_extension:
values_flat = values
else:
values_flat = values.ravel(order='K')
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isna(values_flat[i:i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, 'is_object', False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order='K')
if len(values) and values[0] is None:
fill_value = None
if (getattr(self.block, 'is_datetimetz', False) or
is_datetime64tz_dtype(empty_dtype)):
if self.block is None:
array = empty_dtype.construct_array_type()
return array(np.full(self.shape[1], fill_value),
dtype=empty_dtype)
pass
elif getattr(self.block, 'is_categorical', False):
pass
elif getattr(self.block, 'is_sparse', False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
if is_uniform_reindex(join_units):
# XXX: integrate property
empty_dtype = join_units[0].block.dtype
upcasted_na = join_units[0].block.fill_value
return empty_dtype, upcasted_na
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if is_categorical_dtype(dtype):
upcast_cls = 'category'
elif is_datetime64tz_dtype(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
elif is_sparse(dtype):
upcast_cls = dtype.subtype.name
elif is_extension_array_dtype(dtype):
upcast_cls = 'object'
elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
upcast_cls = dtype.name
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_na:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslibs.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslibs.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslibs.iNaT
else: # pragma
try:
g = np.find_common_type(upcast_classes, [])
except TypeError:
# At least one is an ExtensionArray
return np.dtype(np.object_), np.nan
else:
if is_float_dtype(g):
return g, g.type(np.nan)
elif is_numeric_dtype(g):
if has_none_blocks:
return np.float64, np.nan
else:
return g, None
msg = "invalid dtype determination in get_concat_dtype"
raise AssertionError(msg)
def is_uniform_join_units(join_units):
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
"""
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units) and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units) and
# disregard Panels
all(ju.block.ndim <= 2 for ju in join_units) and
# only use this path when there is something to concatenate
len(join_units) > 1)
def is_uniform_reindex(join_units):
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block and ju.block.is_extension for ju in join_units) and
len({ju.block.dtype.name for ju in join_units}) == 1
)
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
| 35.085062
| 89
| 0.57779
|
4a1670b127aeebe2125fb62a73f870aca422b118
| 8,819
|
py
|
Python
|
.history/pages/intro_20220303154518.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303154518.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303154518.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
"""
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def cs_body():
col1 = st.columns(1)
col1.header('Ryan Paik')
col1.markdown(
'''
You don't learn to walk by following rules. You learn by doing, and by falling over.”*
-Richard Branson
-----
''')
col1.subheader("Welcome to my Code Compendium.")
col1.markdwon('''
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit that I fell in love with while I was building flask api's.
-----
**Programming is only as deep as you want to dive in.**
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
**Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
**Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
**Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
| 27.135385
| 228
| 0.660392
|
4a1671ca249faaeaa3480c87b129f6244898f043
| 2,700
|
py
|
Python
|
supports/integration-test/test_stress_sync_10MB.py
|
ypang2017/Test
|
6f3cb231b2030f5c02f030730153e2a8516df2b2
|
[
"Apache-2.0"
] | 2
|
2018-02-02T08:18:20.000Z
|
2020-11-16T11:03:23.000Z
|
supports/integration-test/test_stress_sync_10MB.py
|
ypang2017/Compression
|
6f3cb231b2030f5c02f030730153e2a8516df2b2
|
[
"Apache-2.0"
] | null | null | null |
supports/integration-test/test_stress_sync_10MB.py
|
ypang2017/Compression
|
6f3cb231b2030f5c02f030730153e2a8516df2b2
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import time
from util import *
# 10MB
FILE_SIZE = 10 * 1024 * 1024
DEST_DIR = "hdfs://datanode3:9000/dest"
class TestStressDR(unittest.TestCase):
def test_sync_rule_10000(self):
# file : every 1s | path matches "/1MB/*" | sync -dest
# file_path = create_random_file(10 * 1024 * 1024)
# submit rule
max_number = 10000
file_paths = []
cids = []
# create random directory
source_dir = "/" + random_string + "/"
# create 10K random files in random directory
for i in range(max_number):
file_paths.append(create_random_file_parallel(source_dir, FILE_SIZE))
time.sleep(1)
rule_str = "file : every 1s | path matches " + \
"\"" + source_dir + "*\" | sync -dest " + DEST_DIR
rid = submit_rule(rule_str)
# Activate rule
start_rule(rid)
# Submit read action to trigger rule
# Read three times
time.sleep(1)
# Statue check
while True:
time.sleep(1)
rule = get_rule(rid)
if rule['numCmdsGen'] >= max_number:
break
time.sleep(5)
delete_rule(rid)
# delete all random files
for i in range(max_number):
cids.append(delete_file(file_path[i]))
wait_for_cmdlets(cids)
def test_sync_rule_100000(self):
# file : every 1s | path matches "/1MB/*" | sync -dest
# file_path = create_random_file(10 * 1024 * 1024)
# submit rule
max_number = 100000
file_paths = []
cids = []
# create random directory
source_dir = "/" + random_string + "/"
# create 10K random files in random directory
for i in range(max_number):
file_paths.append(create_random_file_parallel(source_dir, FILE_SIZE))
time.sleep(1)
rule_str = "file : every 1s | path matches " + \
"\"" + source_dir + "*\" | sync -dest " + DEST_DIR
rid = submit_rule(rule_str)
# Activate rule
start_rule(rid)
# Submit read action to trigger rule
# Read three times
time.sleep(1)
# Statue check
while True:
time.sleep(1)
rule = get_rule(rid)
if rule['numCmdsGen'] >= max_number:
break
time.sleep(5)
delete_rule(rid)
# delete all random files
for i in range(max_number):
cids.append(delete_file(file_path[i]))
wait_for_cmdlets(cids)
if __name__ == '__main__':
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
unittest.main()
| 31.395349
| 81
| 0.568519
|
4a1672f6d7034131ab41f544f7c74cc0b0396a5e
| 5,182
|
py
|
Python
|
orcid_api_v3/models/services_v30_rc1.py
|
tenet-ac-za/NZ-ORCID-Hub
|
f1183fbb94509b102fa58d7812ed33d8f35c5d4d
|
[
"MIT"
] | 15
|
2017-02-06T01:41:57.000Z
|
2021-07-22T08:53:40.000Z
|
orcid_api_v3/models/services_v30_rc1.py
|
tenet-ac-za/NZ-ORCID-Hub
|
f1183fbb94509b102fa58d7812ed33d8f35c5d4d
|
[
"MIT"
] | 82
|
2017-03-23T00:30:04.000Z
|
2022-02-01T00:10:34.000Z
|
orcid_api_v3/models/services_v30_rc1.py
|
tenet-ac-za/NZ-ORCID-Hub
|
f1183fbb94509b102fa58d7812ed33d8f35c5d4d
|
[
"MIT"
] | 6
|
2017-03-23T07:26:05.000Z
|
2021-02-23T11:20:21.000Z
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.affiliation_group_v30_rc1_service_summary_v30_rc1 import AffiliationGroupV30Rc1ServiceSummaryV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
class ServicesV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30Rc1',
'affiliation_group': 'list[AffiliationGroupV30Rc1ServiceSummaryV30Rc1]',
'path': 'str'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'affiliation_group': 'affiliation-group',
'path': 'path'
}
def __init__(self, last_modified_date=None, affiliation_group=None, path=None): # noqa: E501
"""ServicesV30Rc1 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._affiliation_group = None
self._path = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if affiliation_group is not None:
self.affiliation_group = affiliation_group
if path is not None:
self.path = path
@property
def last_modified_date(self):
"""Gets the last_modified_date of this ServicesV30Rc1. # noqa: E501
:return: The last_modified_date of this ServicesV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this ServicesV30Rc1.
:param last_modified_date: The last_modified_date of this ServicesV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def affiliation_group(self):
"""Gets the affiliation_group of this ServicesV30Rc1. # noqa: E501
:return: The affiliation_group of this ServicesV30Rc1. # noqa: E501
:rtype: list[AffiliationGroupV30Rc1ServiceSummaryV30Rc1]
"""
return self._affiliation_group
@affiliation_group.setter
def affiliation_group(self, affiliation_group):
"""Sets the affiliation_group of this ServicesV30Rc1.
:param affiliation_group: The affiliation_group of this ServicesV30Rc1. # noqa: E501
:type: list[AffiliationGroupV30Rc1ServiceSummaryV30Rc1]
"""
self._affiliation_group = affiliation_group
@property
def path(self):
"""Gets the path of this ServicesV30Rc1. # noqa: E501
:return: The path of this ServicesV30Rc1. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ServicesV30Rc1.
:param path: The path of this ServicesV30Rc1. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ServicesV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServicesV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.216867
| 143
| 0.620996
|
4a167362dabebe28d03e306344265a6919d5d1e2
| 905
|
py
|
Python
|
barriers/views/assessments/base.py
|
felix781/market-access-python-frontend
|
3b0e49feb4fdf0224816326938a46002aa4a2b1c
|
[
"MIT"
] | 1
|
2021-12-15T04:14:03.000Z
|
2021-12-15T04:14:03.000Z
|
barriers/views/assessments/base.py
|
felix781/market-access-python-frontend
|
3b0e49feb4fdf0224816326938a46002aa4a2b1c
|
[
"MIT"
] | 19
|
2019-12-11T11:32:47.000Z
|
2022-03-29T15:40:57.000Z
|
barriers/views/assessments/base.py
|
felix781/market-access-python-frontend
|
3b0e49feb4fdf0224816326938a46002aa4a2b1c
|
[
"MIT"
] | 2
|
2021-02-09T09:38:45.000Z
|
2021-03-29T19:07:09.000Z
|
from django.urls import reverse
from django.views.generic import FormView
from ..mixins import BarrierMixin
class ArchiveAssessmentBase(BarrierMixin, FormView):
template_name = "barriers/assessments/archive.html"
title = "Archive assessment"
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["title"] = self.title
return context_data
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["id"] = self.kwargs["assessment_id"]
kwargs["token"] = self.request.session.get("sso_token")
return kwargs
def form_valid(self, form):
form.save()
return super().form_valid(form)
def get_success_url(self):
return reverse(
"barriers:assessment_detail",
kwargs={"barrier_id": self.kwargs.get("barrier_id")},
)
| 29.193548
| 65
| 0.661878
|
4a167383cab41e128a748c824b552c2e1372bd01
| 9,250
|
py
|
Python
|
pymongo/errors.py
|
blink1073/mongo-python-driver
|
98d393336411b7cd5ad4e184ca45192f76fb48e8
|
[
"Apache-2.0"
] | null | null | null |
pymongo/errors.py
|
blink1073/mongo-python-driver
|
98d393336411b7cd5ad4e184ca45192f76fb48e8
|
[
"Apache-2.0"
] | null | null | null |
pymongo/errors.py
|
blink1073/mongo-python-driver
|
98d393336411b7cd5ad4e184ca45192f76fb48e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by PyMongo."""
from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union
from bson.errors import InvalidDocument
try:
# CPython 3.7+
from ssl import SSLCertVerificationError as _CertificateError
except ImportError:
try:
from ssl import CertificateError as _CertificateError
except ImportError:
class _CertificateError(ValueError): # type: ignore
pass
class PyMongoError(Exception):
"""Base class for all PyMongo exceptions."""
def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None:
super(PyMongoError, self).__init__(message)
self._message = message
self._error_labels = set(error_labels or [])
def has_error_label(self, label: str) -> bool:
"""Return True if this error contains the given label.
.. versionadded:: 3.7
"""
return label in self._error_labels
def _add_error_label(self, label):
"""Add the given label to this error."""
self._error_labels.add(label)
def _remove_error_label(self, label):
"""Remove the given label from this error."""
self._error_labels.discard(label)
class ProtocolError(PyMongoError):
"""Raised for failures related to the wire protocol."""
class ConnectionFailure(PyMongoError):
"""Raised when a connection to the database cannot be made or is lost."""
class AutoReconnect(ConnectionFailure):
"""Raised when a connection to the database is lost and an attempt to
auto-reconnect will be made.
In order to auto-reconnect you must handle this exception, recognizing that
the operation which caused it has not necessarily succeeded. Future
operations will attempt to open a new connection to the database (and
will continue to raise this exception until the first successful
connection is made).
Subclass of :exc:`~pymongo.errors.ConnectionFailure`.
"""
errors: Union[Mapping[str, Any], Sequence]
details: Union[Mapping[str, Any], Sequence]
def __init__(
self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence]] = None
) -> None:
error_labels = None
if errors is not None:
if isinstance(errors, dict):
error_labels = errors.get("errorLabels")
super(AutoReconnect, self).__init__(message, error_labels)
self.errors = self.details = errors or []
class NetworkTimeout(AutoReconnect):
"""An operation on an open connection exceeded socketTimeoutMS.
The remaining connections in the pool stay open. In the case of a write
operation, you cannot know whether it succeeded or failed.
Subclass of :exc:`~pymongo.errors.AutoReconnect`.
"""
def _format_detailed_error(message, details):
if details is not None:
message = "%s, full error: %s" % (message, details)
return message
class NotPrimaryError(AutoReconnect):
"""The server responded "not primary" or "node is recovering".
These errors result from a query, write, or command. The operation failed
because the client thought it was using the primary but the primary has
stepped down, or the client thought it was using a healthy secondary but
the secondary is stale and trying to recover.
The client launches a refresh operation on a background thread, to update
its view of the server as soon as possible after throwing this exception.
Subclass of :exc:`~pymongo.errors.AutoReconnect`.
.. versionadded:: 3.12
"""
def __init__(
self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None
) -> None:
super(NotPrimaryError, self).__init__(
_format_detailed_error(message, errors), errors=errors
)
class ServerSelectionTimeoutError(AutoReconnect):
"""Thrown when no MongoDB server is available for an operation
If there is no suitable server for an operation PyMongo tries for
``serverSelectionTimeoutMS`` (default 30 seconds) to find one, then
throws this exception. For example, it is thrown after attempting an
operation when PyMongo cannot connect to any server, or if you attempt
an insert into a replica set that has no primary and does not elect one
within the timeout window, or if you attempt to query with a Read
Preference that the replica set cannot satisfy.
"""
class ConfigurationError(PyMongoError):
"""Raised when something is incorrectly configured."""
class OperationFailure(PyMongoError):
"""Raised when a database operation fails.
.. versionadded:: 2.7
The :attr:`details` attribute.
"""
def __init__(
self,
error: str,
code: Optional[int] = None,
details: Optional[Mapping[str, Any]] = None,
max_wire_version: Optional[int] = None,
) -> None:
error_labels = None
if details is not None:
error_labels = details.get("errorLabels")
super(OperationFailure, self).__init__(
_format_detailed_error(error, details), error_labels=error_labels
)
self.__code = code
self.__details = details
self.__max_wire_version = max_wire_version
@property
def _max_wire_version(self):
return self.__max_wire_version
@property
def code(self) -> Optional[int]:
"""The error code returned by the server, if any."""
return self.__code
@property
def details(self) -> Optional[Mapping[str, Any]]:
"""The complete error document returned by the server.
Depending on the error that occurred, the error document
may include useful information beyond just the error
message. When connected to a mongos the error document
may contain one or more subdocuments if errors occurred
on multiple shards.
"""
return self.__details
class CursorNotFound(OperationFailure):
"""Raised while iterating query results if the cursor is
invalidated on the server.
.. versionadded:: 2.7
"""
class ExecutionTimeout(OperationFailure):
"""Raised when a database operation times out, exceeding the $maxTimeMS
set in the query or command option.
.. note:: Requires server version **>= 2.6.0**
.. versionadded:: 2.7
"""
class WriteConcernError(OperationFailure):
"""Base exception type for errors raised due to write concern.
.. versionadded:: 3.0
"""
class WriteError(OperationFailure):
"""Base exception type for errors raised during write operations.
.. versionadded:: 3.0
"""
class WTimeoutError(WriteConcernError):
"""Raised when a database operation times out (i.e. wtimeout expires)
before replication completes.
With newer versions of MongoDB the `details` attribute may include
write concern fields like 'n', 'updatedExisting', or 'writtenTo'.
.. versionadded:: 2.7
"""
class DuplicateKeyError(WriteError):
"""Raised when an insert or update fails due to a duplicate key error."""
class BulkWriteError(OperationFailure):
"""Exception class for bulk write errors.
.. versionadded:: 2.7
"""
details: Mapping[str, Any]
def __init__(self, results: Mapping[str, Any]) -> None:
super(BulkWriteError, self).__init__("batch op errors occurred", 65, results)
def __reduce__(self) -> Tuple[Any, Any]:
return self.__class__, (self.details,)
class InvalidOperation(PyMongoError):
"""Raised when a client attempts to perform an invalid operation."""
class InvalidName(PyMongoError):
"""Raised when an invalid name is used."""
class CollectionInvalid(PyMongoError):
"""Raised when collection validation fails."""
class InvalidURI(ConfigurationError):
"""Raised when trying to parse an invalid mongodb URI."""
class DocumentTooLarge(InvalidDocument):
"""Raised when an encoded document is too large for the connected server."""
pass
class EncryptionError(PyMongoError):
"""Raised when encryption or decryption fails.
This error always wraps another exception which can be retrieved via the
:attr:`cause` property.
.. versionadded:: 3.9
"""
def __init__(self, cause: Exception) -> None:
super(EncryptionError, self).__init__(str(cause))
self.__cause = cause
@property
def cause(self) -> Exception:
"""The exception that caused this encryption or decryption error."""
return self.__cause
class _OperationCancelled(AutoReconnect):
"""Internal error raised when a socket operation is cancelled."""
pass
| 30.730897
| 96
| 0.691243
|
4a1673e20407147a9f33e507c8ac5f941efffe95
| 49,002
|
py
|
Python
|
tests/db_interaction/test_postgresql.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | 4
|
2021-01-05T14:03:54.000Z
|
2021-01-29T14:48:09.000Z
|
tests/db_interaction/test_postgresql.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | null | null | null |
tests/db_interaction/test_postgresql.py
|
TSPereira/support_toolkit
|
d9b0488d69dccc38b73cd67ea33f4f53983cf77f
|
[
"MIT"
] | null | null | null |
import logging
import os
import unittest
import pandas as pd
from pandas.io.sql import DatabaseError
import psycopg2
from ml_toolkit.db_interaction.api import PostgreSQLManager
from ml_toolkit.utils.io_utl import get_decorators
print({k: v for k, v in os.environ.items() if k.startswith('POSTGRES')})
CFG = dict(user=os.environ.get('POSTGRES_USER', 'postgres'),
password=os.environ.get('POSTGRES_PASSWORD', ''),
host=os.environ.get('POSTGRES_HOST', 'localhost'),
port=os.environ.get('POSTGRES_PORT', '5432'),
database=os.environ.get('POSTGRES_DB', None))
def open_db():
db = PostgreSQLManager(**CFG)
db.logger.setLevel(logging.ERROR + 1)
db.logger.setFormattersIsColored(False)
db.set_exception_handling('raise')
return db
class DBTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db = open_db()
cls.test_table = 'public.test_postgres'
@classmethod
def tearDownClass(cls):
del cls.db
class ConnectionCase(unittest.TestCase):
def test_connection(self):
# to run locally you might need to edit the pg_hba.conf file to use "method" "trust" for local connections
db = open_db()
self.assertEqual(db.name, CFG.get('database') or CFG.get('user'))
self.assertEqual(db.user, CFG.get('user'))
def test_connection_fail(self):
cfg = CFG.copy()
cfg['user'] = 'lskdjfl'
self.assertRaises(psycopg2.Error, PostgreSQLManager, **cfg)
class SchemaCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.test_schemas = ('test1', 'test2', 'test3')
def test_create_schema(self):
for schema in self.test_schemas:
self.db.create_schema(schema)
self.db.create_schema(self.test_schemas[0], True)
self.assertRaises(psycopg2.Error, self.db.create_schema, *(self.test_schemas[0], False))
def test_get_schemas(self):
self.assertIn('public', self.db.get_schemas())
def test_drop_schemas(self):
self.db.execute("CREATE TABLE test1.test ()")
self.db.execute("CREATE TABLE test2.test ()")
self.assertRaises(psycopg2.Error, self.db.drop_schema, *('test1', False))
self.assertRaises(psycopg2.Error, self.db.drop_schema, *('smth', True, False))
self.db.drop_schema('test1', True)
self.assertRaises(psycopg2.Error, self.db.drop_schema, *(['test2', 'test3'], False))
self.db.drop_schema(['test2', 'test3'], True)
def test_set_active_schema(self):
self.db.set_active_schema()
self.assertEqual('public', self.db.active_schema)
self.db.create_schema(self.test_schemas[0])
self.db.set_active_schema(self.test_schemas[0])
self.assertEqual(self.test_schemas[0], self.db.active_schema)
self.db.drop_schema(self.test_schemas[0])
self.db.set_active_schema('smth')
self.assertEqual('public', self.db.active_schema)
class DropTableCase(DBTestCase):
def setUp(self):
self.db.execute(f'CREATE TABLE IF NOT EXISTS {self.test_table} ()')
self.db.refresh()
def tearDown(self):
self.db.execute(f'DROP TABLE IF EXISTS {self.test_table} CASCADE')
self.db.refresh()
def test_drop(self):
self.db.drop_table(self.test_table)
self.assertNotIn(self.test_table, self.db.tables())
def test_drop_multiple(self):
self.db.execute('CREATE TABLE IF NOT EXISTS public.test_postgres1 ()')
self.db.refresh()
self.db.drop_table([self.test_table, 'public.test_postgres1'])
self.assertNotIn(self.test_table, self.db.tables())
self.assertNotIn('public.test_postgres1', self.db.tables())
def test_if_not_exists(self):
self.db.drop_table(self.test_table)
self.assertRaises(psycopg2.Error, self.db.drop_table, self.test_table, if_exists=False)
class CreateEmptyTableCase(DBTestCase):
def tearDown(self):
self.db.drop_table(self.test_table)
def test_new_table(self):
self.db.create_empty_table(self.test_table, if_not_exists=True)
self.assertIn(self.test_table, self.db.tables())
self.assertTrue(self.db.query(f"SELECT * FROM {self.test_table}").empty)
def test_if_not_exists(self):
self.db.create_empty_table(self.test_table)
self.assertRaises(psycopg2.Error, self.db.create_empty_table, self.test_table)
self.db.create_empty_table(self.test_table, if_not_exists=True)
def test_types_and_columns(self):
params = dict()
params['schema'], params['table_name'] = self.test_table.split('.')
# types
test_types = {'a': 'int', 'b': 'float', 'c': 'object'}
types_query = f"""SELECT column_name,
CASE
WHEN domain_name IS NOT NULL THEN domain_name
WHEN data_type='character varying' THEN 'varchar('||character_maximum_length||')'
WHEN data_type='numeric' THEN 'numeric('||numeric_precision||','||numeric_scale||')'
ELSE data_type
END AS data_type
FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.db.create_empty_table(self.test_table, types=test_types)
self.assertEqual(self.db.query(types_query, params=params)['data_type'].tolist(),
['integer', 'double precision', 'text'])
# columns
cols_query = f"""SELECT column_name FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.assertEqual(self.db.query(cols_query, params=params)['column_name'].to_list(), list(test_types))
def test_types_from_df(self):
params = dict()
params['schema'], params['table_name'] = self.test_table.split('.')
test_df = pd.DataFrame({'a': [1, 2, 3, 4, 5],
'b': [1.1, 2, 3.3, 4.4, 5.5],
'c': [1.1, 2, '3', 4, None]})
test_types = {'b': 'object'}
types_query = f"""SELECT column_name,
CASE
WHEN domain_name IS NOT NULL THEN domain_name
WHEN data_type='character varying' THEN 'varchar('||character_maximum_length||')'
WHEN data_type='numeric' THEN 'numeric('||numeric_precision||','||numeric_scale||')'
ELSE data_type
END AS data_type
FROM information_schema.columns
WHERE table_schema = %(schema)s AND table_name = %(table_name)s """
self.db.create_empty_table(self.test_table, test_types, test_df)
self.assertEqual(self.db.query(types_query, params=params)['data_type'].tolist(),
['integer', 'text', 'text'])
class GetTableCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.db.execute(f"CREATE TABLE {cls.test_table} (a integer, b text, c float)")
cls.db.execute(f"INSERT INTO {cls.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
cls.db.refresh()
@classmethod
def tearDownClass(cls):
cls.db.drop_table(cls.test_table)
super().tearDownClass()
def test_select_all(self):
df = self.db.get_table(self.test_table)
self.assertEqual(df.shape[0], 3)
self.assertEqual(df.shape[1], 3)
def test_select_columns(self):
cols_sets = (['a', 'b'], ['a', 'c'], ['b'], 'b')
for cols in cols_sets:
df = self.db.get_table(self.test_table, columns=cols)
if isinstance(cols, str):
cols = [cols]
self.assertEqual(df.shape[0], 3)
self.assertEqual(df.shape[1], len(cols))
self.assertEqual(df.columns.to_list(), cols)
def test_limit(self):
limits_sets = (0, 1, 2, 3)
for limit in limits_sets:
df = self.db.get_table(self.test_table, limit=limit)
self.assertEqual(df.shape[0], limit)
def test_select_where(self):
test_set = (dict(where="a = 1", result_set=[[1, 'b', 1], [1, 'a', 2.0]], shape=2),
dict(where="b = 'b'", result_set=[[1, 'b', 1]], shape=1),
dict(where="c is Null", result_set=[[2, 'c', None]], shape=1),
dict(where="a = 1 and b = 'b'", result_set=[[1, 'b', 1]], shape=1))
for test in test_set:
df = self.db.get_table(self.test_table, where=test['where'])
self.assertEqual(df.shape[0], test['shape'])
self.assertEqual(df.to_numpy(na_value=None).tolist(), test['result_set'])
def test_where_safety(self):
test_set = (f"a = 1; SELECT * FROM {self.test_table}",
f"'; SELECT * FROM {self.test_table} --")
for test in test_set:
self.assertRaises(DatabaseError, self.db.get_table, self.test_table, where=test)
def test_order_and_sort(self):
test_set = (('a', 'asc', [[1, 'b', 1], [1, 'a', 2.0], [2, 'c', None]]),
('b', 'asc', [[1, 'a', 2.0], [1, 'b', 1], [2, 'c', None]]),
(['a', 'b'], 'asc', [[1, 'a', 2.0], [1, 'b', 1], [2, 'c', None]]),
# sort dir
('a', 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]),
('b', 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]),
(['a', 'b'], 'desc', [[2, 'c', None], [1, 'b', 1], [1, 'a', 2.0]]))
for order, sort_dir, result in test_set:
df = self.db.get_table(self.test_table, order_by=order, sort_dir=sort_dir)
self.assertEqual(df.to_numpy(na_value=None).tolist(), result)
class UploadTableCase(DBTestCase):
def tearDown(self):
self.db.drop_table(self.test_table)
def test__commit_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.create_empty_table(self.test_table, from_df=values)
self.db._commit_table(self.test_table, values.to_numpy().tolist(), values.columns.to_list())
table = self.db.get_table(self.test_table)
self.assertTrue(values.equals(table))
def test_upload_columns(self):
values = [[1, 2], [4, 5]]
# raise error from creating a table without column definitions
self.assertRaises(TypeError, self.db.upload_table, *(self.test_table, values, ['a', 'b']))
# creates table without columns which results in error uploading data
self.assertRaises(KeyError, self.db.upload_table, *(self.test_table, values, None))
def test_upload_df(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.upload_table(self.test_table, values)
table = self.db.get_table(self.test_table)
self.assertTrue(values.equals(table))
def test_upload_values(self):
values = [[1, 2], [4, 5]]
columns = {'a': 'integer', 'b': 'float'}
self.db.upload_table(self.test_table, values, columns)
table = self.db.get_table(self.test_table)
self.assertEqual(values, table.to_numpy().tolist())
def test_upload_conflict(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.upload_table(self.test_table, values)
self.assertRaises(KeyError, self.db.upload_table, self.test_table, values, on_conflict='raise')
self.db.upload_table(self.test_table, values, on_conflict='drop')
class ColumnsCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.refresh()
def tearDown(self) -> None:
self.db.drop_table(self.test_table)
def test_add_columns(self):
self.db.add_columns(self.test_table, 'd')
self.assertRaises(psycopg2.Error, self.db.add_columns, *(self.test_table, 'd'))
self.db.add_columns(self.test_table, ['e', 'f'])
self.db.add_columns(self.test_table, {'g': 'int', 'h': 'string'})
def test_add_columns_not_null(self):
schema, table_name = self.test_table.split('.')
query = f"""SELECT column_name FROM information_schema.columns
WHERE table_schema = '{schema}'
AND table_name = '{table_name}'
AND is_nullable = 'YES';"""
self.db.add_columns(self.test_table, 'i', True)
self.assertNotIn('i', self.db.query(query)['column_name'].to_list())
self.db.add_columns(self.test_table, ['j', 'k'], True)
self.assertNotIn('j', self.db.query(query)['column_name'].to_list())
self.assertNotIn('k', self.db.query(query)['column_name'].to_list())
self.db.add_columns(self.test_table, ['l', 'm'], [True, False])
self.assertNotIn('l', self.db.query(query)['column_name'].to_list())
self.assertIn('m', self.db.query(query)['column_name'].to_list())
self.assertRaises(AssertionError, self.db.add_columns, *(self.test_table, ['n', 'o'], [True]))
def test_alter_columns(self):
self.db.execute(f"""INSERT INTO {self.test_table} VALUES (1, 'a', 1.0), (2, 'b', 4)""")
self.db.alter_columns(self.test_table, {'a': 'float'})
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'double precision', 'b': 'text', 'c': 'double precision'})
def test_alter_columns_using(self):
self.db.execute(f"""INSERT INTO {self.test_table} VALUES (1, '1', 1.0), ('2', '3', '4')""")
# not using
self.assertRaises(psycopg2.Error, self.db.alter_columns, *(self.test_table, {'b': 'integer'}))
# using
self.db.alter_columns(self.test_table, {'b': 'integer'}, using='integer')
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'double precision'})
# using multiple
# setup all as text
self.db.alter_columns(self.test_table, {'a': 'text', 'b': 'text', 'c': 'text'})
# fail
self.assertRaises(AssertionError, self.db.alter_columns,
*(self.test_table, {'a': 'integer', 'c': 'integer'}, ['integer']))
self.assertRaises(psycopg2.Error, self.db.alter_columns,
*(self.test_table, {'a': 'integer', 'b': 'integer'}, ['integer', 'timestamp']))
# convert multiple
self.db.alter_columns(self.test_table, {'a': 'integer', 'b': 'integer'}, ['integer', 'integer'])
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'text'})
self.db.alter_columns(self.test_table, {'a': 'integer', 'b': 'integer', 'c': 'integer'}, 'integer')
self.assertEqual(self.db.get_dtypes(self.test_table).to_dict(),
{'a': 'integer', 'b': 'integer', 'c': 'integer'})
def test_drop_columns(self):
self.assertRaises(psycopg2.Error, self.db.drop_columns, self.test_table, ['c', 'd'], if_exists=False)
self.db.drop_columns(self.test_table, ['c', 'd'])
self.assertNotIn('c', self.db.get_columns(self.test_table))
self.db.drop_columns(self.test_table, 'a')
self.assertNotIn('a', self.db.get_columns(self.test_table))
def test_drop_columns_cascade(self):
self.assertRaises(AssertionError, self.db.drop_columns, *(self.test_table, ['b', 'c'], [True]))
def test_rename_column(self):
self.db.rename_column(self.test_table, 'a', 'd')
self.assertIn('d', self.db.get_columns(self.test_table))
self.assertNotIn('a', self.db.get_columns(self.test_table))
self.assertRaises(psycopg2.Error, self.db.rename_column, self.test_table, 'e', 'f')
self.assertRaises(psycopg2.Error, self.db.rename_column, self.test_table, 'd', 'b')
class IndexCase(DBTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.db.create_schema('test1')
cls.db.create_schema('test2')
@classmethod
def tearDownClass(cls):
cls.db.drop_schema(['test1', 'test2'])
super().tearDownClass()
def setUp(self):
self.db.create_empty_table('test1.test', {'a': 'integer', 'b': 'float'})
self.db.create_empty_table('test1.test1', {'a': 'integer', 'b': 'float'})
self.db.create_empty_table('test2.test', {'a': 'integer', 'b': 'float'})
def tearDown(self):
self.db.drop_table(['test1.test', 'test1.test1', 'test2.test'])
def test_create(self):
self.db.create_index('test1.test', 'a')
self.db.create_index('test1.test1', ['a', 'b'])
self.assertRaises(psycopg2.Error, self.db.create_index, 'test2.test', 'c')
def test_create_with_name(self):
custom_index = 'custom_name'
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.assertEqual(self.db.query(query).shape[0], 0)
self.db.create_index('test1.test', 'a', custom_index)
self.assertEqual(self.db.query(query).shape[0], 1)
self.assertIn(custom_index, self.db.query(query)['indexname'].to_list())
def test_create_unique(self):
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.db.create_index('test1.test', 'a', unique=True)
self.assertIn('unique', self.db.query(query).loc[0, 'indexdef'].lower())
def test_create_non_unique(self):
query = """SELECT * FROM pg_indexes WHERE schemaname != 'pg_catalog'
AND schemaname = 'test1' AND tablename = 'test' """
self.db.create_index('test1.test', 'a', unique=False)
self.assertNotIn('unique', self.db.query(query).loc[0, 'indexdef'].lower())
def test_create_on_conflict(self):
self.db.create_index('test1.test', 'a')
self.db.create_index('test1.test', ['a', 'b']) # no conflict, works fine
self.assertRaises(IndexError, self.db.create_index, 'test1.test', ['a', 'b'])
self.db.create_index('test1.test', ['a', 'b'], on_conflict='drop')
def test_drop(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('test1.custom_name')
def test_drop_cascade(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('test1.custom_name', cascade=True)
def test_drop_return_query(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertEqual(self.db.drop_index('test1.custom_name'), None)
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertIsInstance(self.db.drop_index('test1.custom_name', return_query=True), str)
def test_drop_no_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.drop_index('custom_name')
def test_drop_no_schema_multiple_same_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertRaises(IndexError, self.db.drop_index, 'custom_name')
self.db.drop_index('test2.custom_name')
def test_get(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.assertIn('custom_name', self.db.get_index('custom_name')['indexname'].to_list())
def test_get_on_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_index('custom_name').shape[0], 2)
self.assertEqual(self.db.get_index('custom_name', 'test1').shape[0], 1)
def test_get_all(self):
idxs = (['test1', 'test', 'custom_name'],
['test1', 'test1', 'custom_name1'],
['test2', 'test', 'custom_name'])
for schema, table, idx_name in idxs:
self.db.create_index(f'{schema}.{table}', 'a', idx_name)
idxs_read = self.db.get_indexes()[['schemaname', 'tablename', 'indexname']].to_numpy().tolist()
for test in idxs:
self.assertIn(test, idxs_read)
def test_get_all_on_table_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(table_name='test').shape[0], 2)
self.assertEqual(self.db.get_indexes(table_name='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test3.test').shape[0], 0)
self.assertEqual(self.db.get_indexes(table_name='test1.test').shape[0], 1)
def test_get_all_on_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(schema='test1').shape[0], 2)
self.assertEqual(self.db.get_indexes(schema='test2').shape[0], 1)
def test_get_all_on_schema_and_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', 'a', 'custom_name1')
self.db.create_index('test2.test', 'a', 'custom_name')
self.assertEqual(self.db.get_indexes(table_name='test', schema='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test1', schema='test1').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test', schema='test2').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test1.test', schema='test3').shape[0], 1)
self.assertEqual(self.db.get_indexes(table_name='test3.test', schema='test1').shape[0], 0)
def test_get_indexes_columns_by_name(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1']).values.tolist(),
[['a'], ['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('some_other_name').empty)
def test_get_indexes_columns_by_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns(table_name='test').values.tolist(), [['a'], ['b']])
self.assertEqual(self.db.get_indexes_columns(table_name='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns(table_name='test2').empty)
def test_get_indexes_columns_by_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertTrue(self.db.get_indexes_columns(schema='test').empty)
self.assertEqual(self.db.get_indexes_columns(schema='test1').values.tolist(), [['a'], ['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(schema='test2').values.tolist(), [['b']])
def test_get_indexes_columns_by_name_table(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', 'test').values.tolist(), [['a'], ['b']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', 'test1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1'], 'test1').values.tolist(),
[['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('custom_name', 'test1').empty)
self.assertTrue(self.db.get_indexes_columns(['custom_name', 'custom_name1'], 'test3').empty)
def test_get_indexes_columns_by_name_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', schema='test1').values.tolist(), [['a', 'b']])
self.assertEqual(self.db.get_indexes_columns(['custom_name', 'custom_name1'], schema='test2').values.tolist(),
[['b']])
self.assertTrue(self.db.get_indexes_columns('custom_name', schema='test').empty)
self.assertTrue(self.db.get_indexes_columns(['custom_name', 'custom_name1'], schema='test').empty)
def test_get_indexes_columns_by_name_table_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns('custom_name', table_name='test',
schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns('custom_name1', table_name='test1',
schema='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns('custom_name2', table_name='test', schema='test1').empty)
def test_get_indexes_columns_by_table_schema(self):
self.db.create_index('test1.test', 'a', 'custom_name')
self.db.create_index('test2.test', 'b', 'custom_name')
self.db.create_index('test1.test1', ['a', 'b'], 'custom_name1')
self.assertEqual(self.db.get_indexes_columns(table_name='test', schema='test1').values.tolist(), [['a']])
self.assertEqual(self.db.get_indexes_columns(table_name='test', schema='test2').values.tolist(), [['b']])
self.assertEqual(self.db.get_indexes_columns(table_name='test1', schema='test1').values.tolist(), [['a', 'b']])
self.assertTrue(self.db.get_indexes_columns(table_name='test1', schema='test').empty)
class PrimaryKeysCase(DBTestCase):
def setUp(self) -> None:
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text PRIMARY KEY, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (3, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self) -> None:
self.db.drop_table(self.test_table)
def test_drop_primary_key(self):
self.assertEqual(self.db.get_constraints(self.test_table, 'p').shape[0], 1)
self.db.drop_primary_key(self.test_table)
self.db.drop_primary_key(self.test_table) # test it doesn't raise an error if it doesn't exist
def test_get_primary_key(self):
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 1)
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 0)
def test_get_primary_key_columns(self):
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['b'])
self.assertEqual(self.db.get_primary_key_columns(self.test_table, idx=True), [2])
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), [])
self.assertEqual(self.db.get_primary_key_columns(self.test_table, idx=True), [])
def test_set_primary_key(self):
self.db.drop_primary_key(self.test_table)
self.assertEqual(self.db.get_primary_key(self.test_table).shape[0], 0)
# set with one column
self.db.set_primary_key(self.test_table, 'b')
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['b'])
# try to set another and catch error
self.assertRaises(psycopg2.Error, self.db.set_primary_key, *(self.test_table, ['a', 'b']))
# set with on_conflict='drop'
self.db.set_primary_key(self.test_table, ['a', 'b'], on_conflict='drop')
self.assertEqual(self.db.get_primary_key_columns(self.test_table), ['a', 'b'])
def test_temporary_primary_key(self):
keys = (['a'], ['a', 'b'])
for key in keys:
existing_key = self.db.get_primary_key_columns(self.test_table)
with self.db._temporary_primary_key(key, self.test_table) as new_key:
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), existing_key)
def test_temporary_primary_key_no_existing_key(self):
self.db.drop_primary_key(self.test_table)
key = ['a']
with self.db._temporary_primary_key(key, self.test_table) as new_key:
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), [])
def test_temporary_primary_key_conflict(self):
key = ['a']
existing_key = self.db.get_primary_key_columns(self.test_table)
self.db.set_exception_handling('ignore')
with self.db._temporary_primary_key(['a'], self.test_table) as new_key:
self.db.execute(f"UPDATE {self.test_table} SET b = 'a' WHERE a = 1")
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.assertEqual(new_key, key)
self.assertNotEqual(self.db.get_primary_key_columns(self.test_table), existing_key)
self.assertEqual(self.db.get_primary_key_columns(self.test_table), key)
self.db.set_exception_handling('raise')
class MiscTableCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self):
self.db.drop_table(self.test_table)
def test_analyse_and_get_shape(self):
self.assertEqual(self.db.get_shape(self.test_table, True), (3, 3))
self.assertEqual(self.db.get_shape(self.test_table, False), (0, 3))
# check that analyse is working and that the "exact" now gets the correct number of rows
self.db.analyse(self.test_table)
self.assertEqual(self.db.get_shape(self.test_table, False), (3, 3))
def test_get_columns(self):
self.assertEqual(self.db.get_columns(self.test_table), ['a', 'b', 'c'])
def test_get_constraints(self):
# set constraints
self.db.execute(f"ALTER TABLE {self.test_table} ADD PRIMARY KEY (b)")
self.db.execute(f"ALTER TABLE {self.test_table} ADD UNIQUE (c)")
self.assertEqual(self.db.get_constraints(self.test_table).shape[0], 2)
self.assertEqual(self.db.get_constraints(self.test_table)['contype'].to_list(), ['p', 'u'])
self.assertEqual(self.db.get_constraints(self.test_table, 'primary').shape[0], 1)
self.assertNotIn('u', self.db.get_constraints(self.test_table, 'p')['contype'].to_list())
def test_get_dtypes(self):
test_set = ((None, {'a': 'integer', 'b': 'text', 'c': 'double precision'}),
(['a', 'b'], {'a': 'integer', 'b': 'text'}),
(['a', 'b', 'd'], {'a': 'integer', 'b': 'text'}),
(['a'], {'a': 'integer'}),
('a', {'a': 'integer'}))
for columns, expected in test_set:
self.assertEqual(self.db.get_dtypes(self.test_table, columns=columns).to_dict(), expected)
def test_get_na(self):
self.db.analyse(self.test_table)
self.assertEqual(self.db.get_na(self.test_table).to_dict(), {'a': 0, 'b': 0, 'c': 1})
self.assertEqual(self.db.get_na(self.test_table, ['a', 'b']).to_dict(), {'a': 0, 'b': 0})
self.assertEqual(self.db.get_na(self.test_table, 'a').to_dict(), {'a': 0})
self.assertEqual(self.db.get_na(self.test_table, ['a', 'b', 'd']).to_dict(), {'a': 0, 'b': 0})
na = self.db.get_na(self.test_table, relative=True).round(5)
expected = pd.Series({'a': 0.0, 'b': 0.0, 'c': 1/3}).round(5)
self.assertTrue(na.equals(expected))
def test_get_nunique(self):
self.assertEqual(self.db.get_nunique(self.test_table).to_dict(), {'a': 2, 'b': 3, 'c': 2})
self.assertEqual(self.db.get_nunique(self.test_table, count_null=True).to_dict(), {'a': 2, 'b': 3, 'c': 3})
self.assertEqual(self.db.get_nunique(self.test_table, ['a', 'b']).to_dict(), {'a': 2, 'b': 3})
self.assertEqual(self.db.get_nunique(self.test_table, 'a').to_dict(), {'a': 2})
self.assertEqual(self.db.get_nunique(self.test_table, ['a', 'b', 'd']).to_dict(), {'a': 2, 'b': 3})
def test_get_summary(self):
self.db.analyse(self.test_table)
summary = self.db.get_summary(self.test_table, count_null=True).round(5)
expected = pd.DataFrame([['integer', 2, 0, 0.0],
['text', 3, 0, 0.0],
['double precision', 3, 1, 1/3]],
columns=['type', 'distinct', 'missing_values', 'missing_values_per'],
index=['a', 'b', 'c']).round(5)
self.assertTrue(summary.equals(expected))
summary = self.db.get_summary(self.test_table, count_null=False).round(5)
expected = pd.DataFrame([['integer', 2, 0, 0.0],
['text', 3, 0, 0.0],
['double precision', 2, 1, 1/3]],
columns=['type', 'distinct', 'missing_values', 'missing_values_per'],
index=['a', 'b', 'c']).round(5)
self.assertTrue(summary.equals(expected))
def test_rename_table(self):
new_name = 'public.test_postgres_new'
self.db.drop_table(new_name)
self.db.rename_table(self.test_table, new_name)
self.assertIn(new_name, self.db.tables())
# check if exists
self.assertRaises(psycopg2.Error, self.db.rename_table, 'smth', 'smth_new')
self.db.rename_table('smth', 'smth_new', True)
self.db.rename_table(new_name, self.test_table)
self.assertNotIn(new_name, self.db.tables())
class DeleteRowsCase(DBTestCase):
def setUp(self):
self.db.execute(f"CREATE TABLE {self.test_table} (a integer, b text, c float)")
self.db.execute(f"INSERT INTO {self.test_table} VALUES (1, 'b', 1), (1, 'a', 2.0), (2, 'c', null)")
self.db.refresh()
def tearDown(self):
self.db.drop_table(self.test_table)
def test_delete_all(self):
self.db.delete_rows(self.test_table)
self.assertEqual(self.db.get_shape(self.test_table)[0], 0)
def test_delete_where_single(self):
test_sets = (dict(where="b = 'a'", col='b', result='a', shape=2),
dict(where="b = 'a'", col='b', result='a', shape=2), # repetition. shouldn't do anything
dict(where="c is Null", col='c', result=None, shape=1),
dict(where="a = 1", col='a', result='a', shape=0))
for test in test_sets:
self.db.delete_rows(self.test_table, where=test['where'])
self.assertNotIn(test['result'], self.db.get_table(self.test_table)[test['col']].to_list())
self.assertEqual(self.db.get_table(self.test_table).shape[0], test['shape'])
def test_delete_where_multiple(self):
self.db.delete_rows(self.test_table, where="a = 1")
self.assertNotIn(1, self.db.get_table(self.test_table)['a'].to_list())
self.assertEqual(self.db.get_table(self.test_table).shape[0], 1)
def test_delete_where_multiple_complex(self):
self.db.delete_rows(self.test_table, where="a = 1 and b = 'a' ")
self.assertNotIn([1, 'a'], self.db.get_table(self.test_table)[['a', 'b']].to_numpy().tolist())
self.assertEqual(self.db.get_table(self.test_table).shape[0], 2)
def test_check_where_safety(self):
test_set = (f"a = 1; SELECT * FROM {self.test_table}",
f"'; SELECT * FROM {self.test_table} --")
for test in test_set:
self.assertRaises(DatabaseError, self.db.delete_rows, self.test_table, where=test)
class CopyTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
self.new_table_name = 'public.test_postgres1'
def tearDown(self):
self.db.drop_table(self.test_table)
self.db.drop_table(self.new_table_name)
def test_copy_all(self):
self.db.copy_table(self.test_table, self.new_table_name)
self.assertTrue(self.db.get_table(self.test_table).equals(self.db.get_table(self.new_table_name)))
def test_columns(self):
self.db.copy_table(self.test_table, self.new_table_name, columns=['a', 'b'])
old = self.db.get_table(self.test_table, columns=['a', 'b'])
new = self.db.get_table(self.new_table_name)
self.assertTrue(new.equals(old))
def test_where(self):
self.db.copy_table(self.test_table, self.new_table_name, where="a in (1, 2)")
old = self.db.get_table(self.test_table, where="a in (1, 2)")
new = self.db.get_table(self.new_table_name)
self.assertTrue(new.equals(old))
def test_structure_only(self):
self.db.copy_table(self.test_table, self.new_table_name, structure_only=True)
old_columns = self.db.get_columns(self.test_table)
new_columns = self.db.get_columns(self.new_table_name)
self.assertEqual(old_columns, new_columns)
old_dtypes = self.db.get_dtypes(self.test_table)
new_dtypes = self.db.get_dtypes(self.new_table_name)
self.assertTrue(old_dtypes.equals(new_dtypes))
self.assertTrue(self.db.get_table(self.new_table_name).empty)
def test_another_schema(self):
self.db.create_schema('test1')
self.db.copy_table(self.test_table, 'test1', destination_schema='test1')
self.assertIn('test1.test1', self.db.tables('test1'))
self.assertTrue(self.db.get_table(self.test_table).equals(self.db.get_table('test1.test1')))
self.db.drop_schema('test1', cascade=True)
class AppendTableCase(DBTestCase):
def setUp(self):
self.db.create_empty_table(self.test_table, {'a': 'integer', 'b': 'float'})
def tearDown(self):
self.db.drop_table(self.test_table)
def test__check_integrity(self):
# DataFrame checks
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), ['a', 'b'])
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), ['a'])
self.db._check_integrity(pd.DataFrame({'a': [1, 2], 'b': [4, 5]}), None)
# list of lists checks
self.db._check_integrity([[1, 2], [4, 5]], ['a', 'b'])
self.assertRaises(ValueError, self.db._check_integrity, [[1, 2], [4, 5]], ['a'])
self.assertRaises(ValueError, self.db._check_integrity, [[1, 2], [4, 5]], 'a')
def test__update_table_schema_df_columns(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
column_sets = (['a'], ['a', 'b'], ['a', 'b', 'c'], {'a': 'integer', 'd': 'text'})
for columns in column_sets:
_values, _columns = self.db._update_table_schema(self.test_table, values, columns)
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
def test__update_table_schema_df_on_new_columns(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5], 'c': [6, 7]})
# 'raise' doesn't filter the dataframe and the error will be raised when the stmt is executed
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'raise')
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'ignore')
self.assertEqual(values[['a', 'b']].to_numpy().tolist(), _values)
self.assertEqual(['a', 'b'], _columns)
_values, _columns = self.db._update_table_schema(self.test_table, values, [], 'add')
self.assertEqual(values.to_numpy().tolist(), _values)
self.assertEqual(values.columns.to_list(), _columns)
self.assertIn('c', self.db.get_columns(self.test_table))
def test__update_table_schema_sequence_columns(self):
values = [[1, 2], [4, 5]]
column_sets = (['a', 'b'], {'a': 'integer', 'd': 'text'})
for columns in column_sets:
_values, _columns = self.db._update_table_schema(self.test_table, values, columns)
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
def test__update_table_schema_sequence_on_new_columns(self):
values = [[1, 2], [4, 5]]
# no new column definition
self.assertRaises(ValueError, self.db._update_table_schema, self.test_table, values, ['a', 'c'], 'add')
# new column - raise
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'raise')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
# new column - ignore
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'ignore')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
# new column - add
columns = {'a': 'integer', 'd': 'text'}
_values, _columns = self.db._update_table_schema(self.test_table, values, columns, 'add')
self.assertEqual(values, _values)
self.assertEqual(list(columns), _columns)
self.assertIn('d', self.db.get_columns(self.test_table))
def test_append_new_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.append_to_table('public.test_postgres1', values)
table = self.db.get_table('public.test_postgres1')
self.assertTrue(values.equals(table))
self.db.drop_table('public.test_postgres1')
def test_append_new_table_no_column_definition(self):
values = [[1, 2], [4, 5]]
self.assertRaises(TypeError, self.db.append_to_table, 'public.test_postgres1', values, ['a', 'b'])
self.assertRaises(TypeError, self.db.append_to_table, 'public.test_postgres2', values, None)
def test_append_to_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 5]})
self.db.append_to_table(self.test_table, values)
class UpdateTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
def tearDown(self):
self.db.drop_table(self.test_table)
def test_table_existence(self):
self.assertRaises(psycopg2.Error, self.db.update_table, 'public.test_smth', ['b'], 1)
self.db.update_table(self.test_table, ['b'], 1)
def test_integrity(self):
self.assertRaises(IndexError, self.db.update_table, self.test_table, ['b', 'a'], 1)
self.assertRaises(IndexError, self.db.update_table, self.test_table, ['b', 'a'], [2])
self.assertRaises(IndexError, self.db.update_table, self.test_table, 'b', [1, 2])
self.assertRaises(ValueError, self.db.update_table, self.test_table, [], [])
def test_update(self):
self.db.update_table(self.test_table, 'b', 1)
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].unique().tolist(), [1])
self.db.update_table(self.test_table, ['b'], 3)
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].unique().tolist(), [3])
def test_with_expressions(self):
self.db.update_table(self.test_table, ['b'], ['b+3'])
self.assertEqual(self.db.get_table(self.test_table, 'b')['b'].to_list(), [7, 8, 9])
self.db.update_table(self.test_table, ['b', 'c'], [2, 'a+b'])
self.assertEqual(self.db.get_table(self.test_table, ['b', 'c']).to_numpy().tolist(), [[2, 8], [2, 11], [2, 11]])
def test_where(self):
self.db.update_table(self.test_table, 'b', 1, where='a=1')
self.assertEqual(self.db.get_table(self.test_table).to_numpy().tolist(), [[3, 5, 0], [2, 6, 0], [1, 1, 0]])
self.db.update_table(self.test_table, ['b', 'c'], [3, 5], where='a != 1')
self.assertEqual(self.db.get_table(self.test_table).to_numpy().tolist(), [[1, 1, 0], [3, 3, 5], [2, 3, 5]])
def test_safety(self):
injection = "SELECT * FROM public.test --"
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', 1, where=f"b=4; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', 1, where=f"'; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, 'a', f"1; {injection}")
self.assertRaises(DatabaseError, self.db.update_table, self.test_table, ['a', 'b'], [f"1; {injection}", 2])
class UpsertTableCase(DBTestCase):
def setUp(self):
self.db.upload_table(self.test_table, pd.DataFrame({'a': [1, 3, 2], 'b': [4, 5, 6], 'c': [0, 0, 0]}))
def tearDown(self):
self.db.drop_table(self.test_table)
def test_upsert_new_table(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 6], 'c': [1, 3]})
self.db.upsert_table('public.test_postgres1', values)
self.db.drop_table('public.test_postgres1')
def test_upsert_no_pkey(self):
values = pd.DataFrame({'a': [1, 2], 'b': [4, 6], 'c': [1, 3]})
self.assertRaises(KeyError, self.db.upsert_table, self.test_table, values)
def test_upsert_no_pkey_existing_pkey(self):
values = pd.DataFrame({'a': [7, 1, 4], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.set_primary_key(self.test_table, 'b')
self.db.upsert_table(self.test_table, values)
expected = pd.DataFrame({'a': [1, 7, 1, 4], 'b': [4, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
def test_upsert_new_pkey(self):
values = pd.DataFrame({'a': [7, 1, 3], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.upsert_table(self.test_table, values, id_column_pkey='a')
expected = pd.DataFrame({'a': [2, 7, 1, 3], 'b': [6, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
values = pd.DataFrame({'a': [5, 1, 4], 'b': [5, 6, 7], 'c': [1, 3, 4]})
self.db.upsert_table(self.test_table, values, id_column_pkey=['c'])
expected = pd.DataFrame({'a': [2, 5, 1, 4], 'b': [6, 5, 6, 7], 'c': [0, 1, 3, 4]})
self.assertTrue(expected.equals(self.db.get_table(self.test_table)))
class MiscCase(DBTestCase):
def test_methods_parse_schema_table(self):
methods = ['analyse', 'add_columns', 'alter_columns', 'drop_columns', 'rename_column', 'create_index',
'drop_primary_key', 'get_primary_key', 'get_primary_key_columns', 'set_primary_key',
'append_to_table', 'delete_rows', 'copy_table', 'create_empty_table', 'get_table',
'rename_table', 'update_table', 'upload_table', 'upsert_table', 'get_columns', 'get_constraints',
'get_dtypes', 'get_na', 'get_nunique', 'get_shape', 'get_summary', '_commit_table',
'_update_table_schema']
decorators = get_decorators(PostgreSQLManager)
methods_registered = [k for k, v in decorators.items() if 'parse_schema_table' in v]
self.assertEqual(sorted(methods), sorted(methods_registered))
def test_get_transactions(self):
self.assertTrue(not self.db.get_transactions().empty)
def test_get_transactions_state(self):
states = set(self.db.get_transactions('active')['state'].to_list())
self.assertEqual({'active'}, states)
if __name__ == '__main__':
unittest.main()
| 47.026871
| 120
| 0.62226
|
4a1673e5433e5323a9fa05cabaf816a05f2ee0c1
| 5,559
|
py
|
Python
|
test/unit/test_laziness.py
|
tj-sun/PyOP2
|
72f49dd562e34efaf9482c3638572879fcb7921d
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/test_laziness.py
|
tj-sun/PyOP2
|
72f49dd562e34efaf9482c3638572879fcb7921d
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/test_laziness.py
|
tj-sun/PyOP2
|
72f49dd562e34efaf9482c3638572879fcb7921d
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of PyOP2
#
# PyOP2 is Copyright (c) 2012, Imperial College London and
# others. Please see the AUTHORS file in the main source directory for
# a full list of copyright holders. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Imperial College London or that of other
# contributors may not be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS
# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Lazy evaluation unit tests.
"""
import pytest
import numpy
from pyop2 import op2, base
nelems = 42
class TestLaziness:
@pytest.fixture
def iterset(cls):
return op2.Set(nelems, name="iterset")
def test_stable(self, skip_greedy, iterset):
a = op2.Global(1, 0, numpy.uint32, "a")
kernel = """
void
count(unsigned int* x)
{
(*x) += 1;
}
"""
op2.par_loop(op2.Kernel(kernel, "count"), iterset, a(op2.INC))
assert a._data[0] == 0
assert a.data[0] == nelems
assert a.data[0] == nelems
def test_reorder(self, skip_greedy, iterset):
a = op2.Global(1, 0, numpy.uint32, "a")
b = op2.Global(1, 0, numpy.uint32, "b")
kernel = """
void
count(unsigned int* x)
{
(*x) += 1;
}
"""
op2.par_loop(op2.Kernel(kernel, "count"), iterset, a(op2.INC))
op2.par_loop(op2.Kernel(kernel, "count"), iterset, b(op2.INC))
assert a._data[0] == 0
assert b._data[0] == 0
assert b.data[0] == nelems
assert a._data[0] == 0
assert a.data[0] == nelems
def test_ro_accessor(self, skip_greedy, iterset):
"""Read-only access to a Dat should force computation that writes to it."""
base._trace.clear()
d = op2.Dat(iterset, numpy.zeros(iterset.total_size), dtype=numpy.float64)
k = op2.Kernel('void k(double *x) { *x = 1.0; }', 'k')
op2.par_loop(k, iterset, d(op2.WRITE))
assert all(d.data_ro == 1.0)
assert len(base._trace._trace) == 0
def test_rw_accessor(self, skip_greedy, iterset):
"""Read-write access to a Dat should force computation that writes to it,
and any pending computations that read from it."""
base._trace.clear()
d = op2.Dat(iterset, numpy.zeros(iterset.total_size), dtype=numpy.float64)
d2 = op2.Dat(iterset, numpy.empty(iterset.total_size), dtype=numpy.float64)
k = op2.Kernel('void k(double *x) { *x = 1.0; }', 'k')
k2 = op2.Kernel('void k2(double *x, double *y) { *x = *y; }', 'k2')
op2.par_loop(k, iterset, d(op2.WRITE))
op2.par_loop(k2, iterset, d2(op2.WRITE), d(op2.READ))
assert all(d.data == 1.0)
assert len(base._trace._trace) == 0
def test_chain(self, skip_greedy, iterset):
a = op2.Global(1, 0, numpy.uint32, "a")
x = op2.Dat(iterset, numpy.zeros(nelems), numpy.uint32, "x")
y = op2.Dat(iterset, numpy.zeros(nelems), numpy.uint32, "y")
kernel_add_one = """
void
add_one(unsigned int* x)
{
(*x) += 1;
}
"""
kernel_copy = """
void
copy(unsigned int* dst, unsigned int* src)
{
(*dst) = (*src);
}
"""
kernel_sum = """
void
sum(unsigned int* sum, unsigned int* x)
{
(*sum) += (*x);
}
"""
pl_add = op2.par_loop(op2.Kernel(kernel_add_one, "add_one"), iterset, x(op2.RW))
pl_copy = op2.par_loop(op2.Kernel(kernel_copy, "copy"), iterset, y(op2.WRITE), x(op2.READ))
pl_sum = op2.par_loop(op2.Kernel(kernel_sum, "sum"), iterset, a(op2.INC), x(op2.READ))
# check everything is zero at first
assert sum(x._data) == 0
assert sum(y._data) == 0
assert a._data[0] == 0
assert base._trace.in_queue(pl_add)
assert base._trace.in_queue(pl_copy)
assert base._trace.in_queue(pl_sum)
# force computation affecting 'a' (1st and 3rd par_loop)
assert a.data[0] == nelems
assert not base._trace.in_queue(pl_add)
assert base._trace.in_queue(pl_copy)
assert not base._trace.in_queue(pl_sum)
assert sum(x.data) == nelems
# force the last par_loop remaining (2nd)
assert sum(y.data) == nelems
assert not base._trace.in_queue(pl_copy)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
| 33.690909
| 99
| 0.648678
|
4a1674d9fa96065214786f2dcc8f898a77e14fc4
| 1,319
|
py
|
Python
|
analyzer/settings_analyzer/migrations/0002_auto_20171105_0501.py
|
LaSoftRepo/analyzer
|
320536cc86df7514c8dea979585ea70665ff83eb
|
[
"MIT"
] | null | null | null |
analyzer/settings_analyzer/migrations/0002_auto_20171105_0501.py
|
LaSoftRepo/analyzer
|
320536cc86df7514c8dea979585ea70665ff83eb
|
[
"MIT"
] | null | null | null |
analyzer/settings_analyzer/migrations/0002_auto_20171105_0501.py
|
LaSoftRepo/analyzer
|
320536cc86df7514c8dea979585ea70665ff83eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-05 03:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings_analyzer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='date_from',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='settings',
name='date_to',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='settings',
name='price_hrn_from',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='settings',
name='price_hrn_to',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='settings',
name='price_usd_from',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='settings',
name='price_usd_to',
field=models.IntegerField(blank=True, null=True),
),
]
| 28.673913
| 61
| 0.570887
|
4a1674f5b43882d4a6cc396fca0125df626a8738
| 2,286
|
py
|
Python
|
api_test.py
|
Narutlih/Api
|
5835c8cffaa5d719898cb0e69348675c81de7525
|
[
"BSD-3-Clause"
] | null | null | null |
api_test.py
|
Narutlih/Api
|
5835c8cffaa5d719898cb0e69348675c81de7525
|
[
"BSD-3-Clause"
] | null | null | null |
api_test.py
|
Narutlih/Api
|
5835c8cffaa5d719898cb0e69348675c81de7525
|
[
"BSD-3-Clause"
] | null | null | null |
import vk
import time
import collections
import xlsxwriter
session = vk.Session(
access_token='a59e5aaa3a3775c46c9a7a522ec899a4c26dc533b97e507fd4fa194a3eea4aa7836c78fc11b429235c23d')
vk_api = vk.API(session)
deq = collections.deque(maxlen=4)
def pause_request():
deq.appendleft(time.time())
if len(deq) == 4:
time.sleep(max(2 + deq[3] - deq[0], 0))
group = 2976989
count = vk_api.groups.getMembers(group_id=group, v='5.103')['count']
members = []
if count > 1000:
for i in range(0, 1 + count // 1000):
pause_request()
members.extend(
vk_api.groups.getMembers(group_id=group, offset=i * 1000, count=1000, fields=('sex', 'city', 'bdate'),
lang=3,
v='5.103')['items'])
else:
members = vk_api.groups.getMembers(group_id=group, fields=('sex', 'city', 'bdate'), lang=3, v='5.103')['items']
users = []
for x in members:
if 'deactivated' not in x:
users.append(x)
workbook = xlsxwriter.Workbook('group_info.xlsx')
worksheet = workbook.add_worksheet()
row = 0
col = 0
worksheet.write(row, col, 'Group')
worksheet.write(row, col + 1, 'id_person')
worksheet.write(row, col + 2, 'group_id')
worksheet.write(row, col + 3, 'group_name')
row += 1
count = 0
for x in range(10000,15000):
if vk_api.users.get(user_id=users[x]['id'], fields='blacklisted', v='5.103')[0]['blacklisted'] == 0:
if not users[x]['is_closed']:
pause_request()
print(users[x]['id'])
groups = vk_api.groups.get(user_id=users[x]['id'], extended=1, fields='members_count', offset=0, v='5.103')['items']
users[x]['groups'] = groups
if 'groups' in users[x]:
for y in users[x]['groups']:
if 'members_count' in y and y['members_count'] > 5000:
worksheet.write(row, col, 'Fantastica')
worksheet.write(row, col + 1, users[x]['id'])
worksheet.write(row, col + 2, y['id'])
worksheet.write(row, col + 3, y['name'])
row += 1
count += 1
print(count)
workbook.close()
| 32.657143
| 129
| 0.555556
|
4a167582011af7acbf30c0298f015353262aa38c
| 2,062
|
py
|
Python
|
joker/server/connection_utils.py
|
PuNkYsHuNgRy/joker-blockchain
|
e49d6b9aa46e6097e216561bd7563b50519aae13
|
[
"Apache-2.0"
] | 4
|
2022-03-04T06:08:15.000Z
|
2022-03-17T19:14:22.000Z
|
joker/server/connection_utils.py
|
zcomputerwiz/joker-blockchain
|
72cf94708acd49e0cbcc63c74d5ddb1e1045b8a5
|
[
"Apache-2.0"
] | 1
|
2022-03-29T13:20:11.000Z
|
2022-03-29T13:20:11.000Z
|
joker/server/connection_utils.py
|
zcomputerwiz/joker-blockchain
|
72cf94708acd49e0cbcc63c74d5ddb1e1045b8a5
|
[
"Apache-2.0"
] | 3
|
2022-03-10T22:26:44.000Z
|
2022-03-15T08:46:15.000Z
|
import asyncio
import random
from typing import Any, List, Optional, Tuple
from joker.server.ws_connection import WSJokerConnection
async def send_all_first_reply(
func: str, arg: Any, peers: List[WSJokerConnection], timeout=15
) -> Optional[Tuple[Any, WSJokerConnection]]:
"""performs an API request to peers and returns the result of the first response and the peer that sent it."""
async def do_func(peer_x: WSJokerConnection, func_x: str, arg_x: Any):
method_to_call = getattr(peer_x, func_x)
result_x = await method_to_call(arg_x)
if result_x is not None:
return result_x, peer_x
else:
await asyncio.sleep(timeout)
return None
tasks = []
for peer in peers:
tasks.append(do_func(peer, func, arg))
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
if len(done) > 0:
d = done.pop()
result = d.result()
if result is None:
return None
response, peer = result
return response, peer
else:
return None
async def send_to_random(func: str, arg: Any, peers: List[WSJokerConnection]) -> Optional[Tuple[Any, WSJokerConnection]]:
"""performs an API request to peers and returns the result of the first response and the peer that sent it."""
async def do_func(peer_x: WSJokerConnection, func_x: str, arg_x: Any):
method_to_call = getattr(peer_x, func_x)
result_x = await method_to_call(arg_x)
if result_x is not None:
return result_x, peer_x
else:
await asyncio.sleep(15)
return None
tasks = []
random_peer = random.choice(peers)
tasks.append(do_func(random_peer, func, arg))
done, pending = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
if len(done) > 0:
d = done.pop()
result = d.result()
if result is None:
return None
response, peer = result
return response, peer
else:
return None
| 30.776119
| 121
| 0.64355
|
4a1675bdee4bf9b2bd87d5c8f5285017e3d88e9d
| 482
|
py
|
Python
|
tests/test_tutorial/test_response_change_status_code/test_tutorial001.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 2
|
2020-04-09T07:11:28.000Z
|
2020-12-12T14:04:35.000Z
|
tests/test_tutorial/test_response_change_status_code/test_tutorial001.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 1
|
2021-03-27T18:37:32.000Z
|
2021-05-25T15:08:24.000Z
|
tests/test_tutorial/test_response_change_status_code/test_tutorial001.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 1
|
2021-02-03T00:43:04.000Z
|
2021-02-03T00:43:04.000Z
|
from fastapi.testclient import TestClient
from response_change_status_code.tutorial001 import app
client = TestClient(app)
def test_path_operation():
response = client.put("/get-or-create-task/foo")
print(response.content)
assert response.status_code == 200
assert response.json() == "Listen to the Bar Fighters"
response = client.put("/get-or-create-task/bar")
assert response.status_code == 201
assert response.json() == "This didn't exist before"
| 30.125
| 58
| 0.732365
|
4a1675f3675bc773b04679b34bdd50a37fe73af3
| 761
|
py
|
Python
|
spark_auto_mapper_fhir/value_sets/condition_cause_codes.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/value_sets/condition_cause_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/value_sets/condition_cause_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ConditionCauseCodesCode(GenericTypeCode):
"""
ConditionCauseCodes
From: http://hl7.org/fhir/ValueSet/condition-cause in valuesets.xml
Example value set for Cause of Condition codes
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://snomed.info/sct
"""
codeset: FhirUri = "http://snomed.info/sct"
| 30.44
| 84
| 0.767411
|
4a16760b02edae0c2ecc99b2909230a1bfd30a71
| 3,038
|
py
|
Python
|
terrascript/vsphere/r.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vsphere/r.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vsphere/r.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
from terrascript import _resource
class vsphere_custom_attribute(_resource): pass
custom_attribute = vsphere_custom_attribute
class vsphere_datastore_cluster(_resource): pass
datastore_cluster = vsphere_datastore_cluster
class vsphere_resource_pool(_resource): pass
resource_pool = vsphere_resource_pool
class vsphere_storage_drs_vm_override(_resource): pass
storage_drs_vm_override = vsphere_storage_drs_vm_override
class vsphere_datacenter(_resource): pass
datacenter = vsphere_datacenter
class vsphere_compute_cluster(_resource): pass
compute_cluster= vsphere_compute_cluster
class vsphere_compute_cluster_vm_affinity_rule(_resource): pass
compute_cluster_vm_affinity_rule = vsphere_compute_cluster_vm_affinity_rule
class vsphere_compute_cluster_vm_anti_affinity_rule(_resource): pass
compute_cluster_vm_anti_affinity_rule = vsphere_compute_cluster_vm_anti_affinity_rule
class vsphere_compute_cluster_vm_dependency_rule(_resource): pass
compute_cluster_vm_dependency_rule = vsphere_compute_cluster_vm_dependency_rule
class vsphere_compute_cluster_vm_group(_resource): pass
compute_cluster_vm_group = vsphere_compute_cluster_vm_group
class vsphere_compute_cluster_vm_host_rule(_resource): pass
compute_cluster_vm_host_rule = vsphere_compute_cluster_vm_host_rule
class vsphere_drs_vm_override(_resource): pass
drs_vm_override = vsphere_drs_vm_override
class vsphere_dpm_host_override(_resource): pass
dpm_host_override = vsphere_dpm_host_override
class vsphere_ha_vm_override(_resource): pass
ha_vm_override = vsphere_ha_vm_override
class vsphere_compute_cluster_host_group(_resource): pass
compute_cluster_host_group = vsphere_compute_cluster_host_group
class vsphere_datastore_cluster_vm_anti_affinity_rule(_resource): pass
datastore_cluster_vm_anti_affinity_rule = vsphere_datastore_cluster_vm_anti_affinity_rule
class vsphere_distributed_port_group(_resource): pass
distributed_port_group = vsphere_distributed_port_group
class vsphere_distributed_virtual_switch(_resource): pass
distributed_virtual_switch = vsphere_distributed_virtual_switch
class vsphere_file(_resource): pass
file = vsphere_file
class vsphere_folder(_resource): pass
folder = vsphere_folder
class vsphere_host_port_group(_resource): pass
host_port_group = vsphere_host_port_group
class vsphere_host_virtual_switch(_resource): pass
host_virtual_switch = vsphere_host_virtual_switch
class vsphere_license(_resource): pass
license = vsphere_license
class vsphere_tag(_resource): pass
tag = vsphere_tag
class vsphere_tag_category(_resource): pass
tag_category = vsphere_tag_category
class vsphere_virtual_disk(_resource): pass
virtual_disk = vsphere_virtual_disk
class vsphere_virtual_machine(_resource): pass
virtual_machine = vsphere_virtual_machine
class vsphere_nas_datastore(_resource): pass
nas_datastore = vsphere_nas_datastore
class vsphere_vmfs_datastore(_resource): pass
vmfs_datastore = vsphere_vmfs_datastore
class vsphere_virtual_machine_snapshot(_resource): pass
virtual_machine_snapshot = vsphere_virtual_machine_snapshot
| 33.384615
| 89
| 0.890718
|
4a1677625406ca42701a0f8b37f4bb6373b89704
| 205
|
py
|
Python
|
basic/yield_t.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
basic/yield_t.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
basic/yield_t.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
class Demo(object):
@property
def names(self):
for i in range(10):
yield i
if __name__ == '__main__':
demo = Demo()
for name in demo.names:
print(name)
| 20.5
| 28
| 0.521951
|
4a1677f5ca54e31bb5b52bad8636d0cd99dfd126
| 830
|
py
|
Python
|
almoxarifado/migrations/0028_auto_20171020_2006.py
|
rvmoura96/projeto-almoxarifado
|
4ca5e5d00f449a940f7c601479bb3fe14c54f012
|
[
"MIT"
] | 1
|
2019-05-24T17:39:01.000Z
|
2019-05-24T17:39:01.000Z
|
almoxarifado/migrations/0028_auto_20171020_2006.py
|
rvmoura96/projeto-almoxarifado
|
4ca5e5d00f449a940f7c601479bb3fe14c54f012
|
[
"MIT"
] | null | null | null |
almoxarifado/migrations/0028_auto_20171020_2006.py
|
rvmoura96/projeto-almoxarifado
|
4ca5e5d00f449a940f7c601479bb3fe14c54f012
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-20 22:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('almoxarifado', '0027_auto_20171020_2006'),
]
operations = [
migrations.AlterField(
model_name='modelo',
name='modelo',
field=models.CharField(max_length=170, unique=True),
),
migrations.AlterField(
model_name='tipo',
name='tipo',
field=models.CharField(max_length=50, unique=True),
),
migrations.AlterField(
model_name='tipo_itens',
name='tipo',
field=models.CharField(max_length=50, unique=True),
),
]
| 26.774194
| 65
| 0.563855
|
4a1679b8c5212e88aa0275d51cbea986b1984d4b
| 7,977
|
py
|
Python
|
sdk/datalake/azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/_locations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/datalake/azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/_locations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/datalake/azure-mgmt-datalake-store/azure/mgmt/datalake/store/operations/_locations_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LocationsOperations(object):
"""LocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datalake.store.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_capability(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.CapabilityInformation"]
"""Gets subscription-level properties and limits for Data Lake Store specified by resource
location.
:param location: The resource location without whitespace.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CapabilityInformation, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.CapabilityInformation or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.CapabilityInformation"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
# Construct URL
url = self.get_capability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CapabilityInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_capability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/locations/{location}/capability'} # type: ignore
def get_usage(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsageListResult"]
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The resource location without whitespace.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datalake.store.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_usage.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_usage.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeStore/locations/{location}/usages'} # type: ignore
| 45.067797
| 154
| 0.655384
|
4a167b558344719984a31126be631fbbb5e0110c
| 2,740
|
py
|
Python
|
app.py
|
certanet/quipto
|
e70a7021bb0859f707af7d162e295583d6178d48
|
[
"MIT"
] | null | null | null |
app.py
|
certanet/quipto
|
e70a7021bb0859f707af7d162e295583d6178d48
|
[
"MIT"
] | null | null | null |
app.py
|
certanet/quipto
|
e70a7021bb0859f707af7d162e295583d6178d48
|
[
"MIT"
] | null | null | null |
import base64
import os
from getpass import getpass
from Crypto import Random
from Crypto.Cipher import AES
from pbkdf2 import PBKDF2
class Quipto:
def __init__(self):
pass
def pad_data(self, data):
if len(data) % 16 == 0:
return data
databytes = bytearray(data)
padding_required = 15 - (len(databytes) % 16)
databytes.extend(b'\x80')
databytes.extend(b'\x00' * padding_required)
return bytes(databytes)
def unpad_data(self, data):
if not data:
return data
data = data.rstrip(b'\x00')
if data[-1] == 128:
return data[:-1]
else:
return data
def Encrypt(self, pt, secret):
secret_enc = secret.encode('utf-8')
pt_enc = self.pad_data(pt.encode('utf-8'))
key = PBKDF2(secret_enc, salt).read(32)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
ct = iv + cipher.encrypt(pt_enc)
ct_enc = base64.b64encode(ct)
return ct_enc
def Decrypt(self, ct, secret):
secret_enc = secret.encode('utf-8')
try:
ct_dec = base64.b64decode(ct.decode())
except:
return "ERROR: CT looks invalid"
key = PBKDF2(secret_enc, salt).read(32)
iv = ct_dec[:AES.block_size]
try:
cipher = AES.new(key, AES.MODE_CBC, iv)
except:
return "ERROR: Decryption error, check CT"
pt = cipher.decrypt(ct_dec[AES.block_size:])
try:
pt_dec = self.unpad_data(pt).decode('utf-8')
except:
return "ERROR: Decryption error, check secret or salt"
return pt_dec
if __name__ == '__main__':
if "QUIPTO_SALT" in os.environ:
print("Stored salt detected!")
salt = eval(os.environ['QUIPTO_SALT'])
else:
print("No salt found, generating salt")
salt = os.urandom(8)
os.environ['QUIPTO_SALT'] = str(salt)
salt = eval(os.environ['QUIPTO_SALT'])
print("ENV SALT = " + str(salt))
if "QUIPTO_SECRET" in os.environ:
print("Stored secret detected!")
secret = os.environ['QUIPTO_SECRET']
else:
print("No secret found...")
secret = str(getpass("Enter secret: "))
enigma = Quipto()
while True:
go = input("(e)ncrypt / (d)ecrypt / (q)uit?: ")
if go == "e":
pt = str(input("Enter PT: "))
ct = enigma.Encrypt(pt, secret)
print("CT data is: " + str(ct)[1:])
elif go == "d":
ct = str.encode(input("Enter CT: "))
print(str(enigma.Decrypt(ct, secret)))
elif go == 'q':
break
| 26.862745
| 66
| 0.55146
|
4a167bacb330ca50b453df079b6c6582d6b34b0d
| 89,159
|
py
|
Python
|
test/test_collection.py
|
infinite-skx/mongo-python-driver
|
bdafc357331813222b1e677b66041dad1fc852a5
|
[
"Apache-2.0"
] | null | null | null |
test/test_collection.py
|
infinite-skx/mongo-python-driver
|
bdafc357331813222b1e677b66041dad1fc852a5
|
[
"Apache-2.0"
] | 1
|
2021-12-24T11:32:17.000Z
|
2021-12-24T11:32:17.000Z
|
test/test_collection.py
|
Surfndez/mongo-python-driver
|
51691246e9b2ef8446f3716c9ba7bab1a9f4e1ad
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the collection module."""
import contextlib
import re
import sys
from codecs import utf_8_decode
from collections import defaultdict
sys.path[0:0] = [""]
from bson import encode
from bson.raw_bson import RawBSONDocument
from bson.regex import Regex
from bson.codec_options import CodecOptions
from bson.objectid import ObjectId
from bson.son import SON
from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT
from pymongo.bulk import BulkWriteError
from pymongo.collection import Collection, ReturnDocument
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import CursorType
from pymongo.errors import (ConfigurationError,
DocumentTooLarge,
DuplicateKeyError,
ExecutionTimeout,
InvalidDocument,
InvalidName,
InvalidOperation,
OperationFailure,
WriteConcernError)
from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command
from pymongo.mongo_client import MongoClient
from pymongo.operations import *
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.results import (InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult)
from pymongo.write_concern import WriteConcern
from test import client_context, unittest
from test.test_client import IntegrationTest
from test.utils import (get_pool, is_mongos,
rs_or_single_client, single_client,
wait_until, EventListener,
IMPOSSIBLE_WRITE_CONCERN)
class TestCollectionNoConnect(unittest.TestCase):
"""Test Collection features on a client that does not connect.
"""
@classmethod
def setUpClass(cls):
cls.db = MongoClient(connect=False).pymongo_test
def test_collection(self):
self.assertRaises(TypeError, Collection, self.db, 5)
def make_col(base, name):
return base[name]
self.assertRaises(InvalidName, make_col, self.db, "")
self.assertRaises(InvalidName, make_col, self.db, "te$t")
self.assertRaises(InvalidName, make_col, self.db, ".test")
self.assertRaises(InvalidName, make_col, self.db, "test.")
self.assertRaises(InvalidName, make_col, self.db, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "")
self.assertRaises(InvalidName, make_col, self.db.test, "te$t")
self.assertRaises(InvalidName, make_col, self.db.test, ".test")
self.assertRaises(InvalidName, make_col, self.db.test, "test.")
self.assertRaises(InvalidName, make_col, self.db.test, "tes..t")
self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t")
def test_getattr(self):
coll = self.db.test
self.assertTrue(isinstance(coll['_does_not_exist'], Collection))
with self.assertRaises(AttributeError) as context:
coll._does_not_exist
# Message should be:
# "AttributeError: Collection has no attribute '_does_not_exist'. To
# access the test._does_not_exist collection, use
# database['test._does_not_exist']."
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
coll2 = coll.with_options(write_concern=WriteConcern(w=0))
self.assertEqual(coll2.write_concern, WriteConcern(w=0))
self.assertNotEqual(coll.write_concern, coll2.write_concern)
coll3 = coll2.subcoll
self.assertEqual(coll2.write_concern, coll3.write_concern)
coll4 = coll2["subcoll"]
self.assertEqual(coll2.write_concern, coll4.write_concern)
def test_iteration(self):
self.assertRaises(TypeError, next, self.db)
class TestCollection(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestCollection, cls).setUpClass()
cls.w = client_context.w
@classmethod
def tearDownClass(cls):
cls.db.drop_collection("test_large_limit")
def setUp(self):
self.db.test.drop()
def tearDown(self):
self.db.test.drop()
@contextlib.contextmanager
def write_concern_collection(self):
if client_context.is_rs:
with self.assertRaises(WriteConcernError):
# Unsatisfiable write concern.
yield Collection(
self.db, 'test',
write_concern=WriteConcern(w=len(client_context.nodes) + 1))
else:
yield self.db.test
def test_equality(self):
self.assertTrue(isinstance(self.db.test, Collection))
self.assertEqual(self.db.test, self.db["test"])
self.assertEqual(self.db.test, Collection(self.db, "test"))
self.assertEqual(self.db.test.mike, self.db["test.mike"])
self.assertEqual(self.db.test["mike"], self.db["test.mike"])
def test_hashable(self):
self.assertIn(self.db.test.mike, {self.db["test.mike"]})
def test_create(self):
# No Exception.
db = client_context.client.pymongo_test
db.create_test_no_wc.drop()
wait_until(
lambda: 'create_test_no_wc' not in db.list_collection_names(),
'drop create_test_no_wc collection')
Collection(db, name='create_test_no_wc', create=True)
wait_until(
lambda: 'create_test_no_wc' in db.list_collection_names(),
'create create_test_no_wc collection')
# SERVER-33317
if (not client_context.is_mongos or not
client_context.version.at_least(3, 7, 0)):
with self.assertRaises(OperationFailure):
Collection(
db, name='create-test-wc',
write_concern=IMPOSSIBLE_WRITE_CONCERN,
create=True)
def test_drop_nonexistent_collection(self):
self.db.drop_collection('test')
self.assertFalse('test' in self.db.list_collection_names())
# No exception
self.db.drop_collection('test')
def test_create_indexes(self):
db = self.db
self.assertRaises(TypeError, db.test.create_indexes, 'foo')
self.assertRaises(TypeError, db.test.create_indexes, ['foo'])
self.assertRaises(TypeError, IndexModel, 5)
self.assertRaises(ValueError, IndexModel, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)])])
# Tuple instead of list.
db.test.create_indexes([IndexModel((("world", ASCENDING),))])
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)],
name="hello_world")])
self.assertEqual(names, ["hello_world"])
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_indexes([IndexModel("hello")])
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
names = db.test.create_indexes([IndexModel([("hello", DESCENDING),
("world", ASCENDING)]),
IndexModel("hello")])
info = db.test.index_information()
for name in names:
self.assertTrue(name in info)
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError,
db.test.create_indexes,
[IndexModel('a', unique=True)])
with self.write_concern_collection() as coll:
coll.create_indexes([IndexModel('hello')])
@client_context.require_version_max(4, 3, -1)
def test_create_indexes_commitQuorum_requires_44(self):
db = self.db
with self.assertRaisesRegex(
ConfigurationError,
'Must be connected to MongoDB 4\.4\+ to use the commitQuorum '
'option for createIndexes'):
db.coll.create_indexes([IndexModel('a')], commitQuorum="majority")
@client_context.require_no_standalone
@client_context.require_version_min(4, 4, -1)
def test_create_indexes_commitQuorum(self):
self.db.coll.create_indexes([IndexModel('a')], commitQuorum="majority")
def test_create_index(self):
db = self.db
self.assertRaises(TypeError, db.test.create_index, 5)
self.assertRaises(TypeError, db.test.create_index, {"hello": 1})
self.assertRaises(ValueError, db.test.create_index, [])
db.test.drop_indexes()
db.test.insert_one({})
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
# Tuple instead of list.
db.test.create_index((("world", ASCENDING),))
self.assertEqual(len(db.test.index_information()), 4)
db.test.drop_indexes()
ix = db.test.create_index([("hello", DESCENDING),
("world", ASCENDING)], name="hello_world")
self.assertEqual(ix, "hello_world")
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index("hello")
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
self.assertEqual(len(db.test.index_information()), 1)
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)])
self.assertTrue("hello_-1_world_1" in db.test.index_information())
db.test.drop()
db.test.insert_one({'a': 1})
db.test.insert_one({'a': 1})
self.assertRaises(
DuplicateKeyError, db.test.create_index, 'a', unique=True)
with self.write_concern_collection() as coll:
coll.create_index([('hello', DESCENDING)])
def test_drop_index(self):
db = self.db
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index(name)
# Drop it again.
with self.assertRaises(OperationFailure):
db.test.drop_index(name)
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
db.test.drop_indexes()
db.test.create_index("hello")
name = db.test.create_index("goodbye")
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual(name, "goodbye_1")
db.test.drop_index([("goodbye", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 2)
self.assertTrue("hello_1" in db.test.index_information())
with self.write_concern_collection() as coll:
coll.drop_index('hello_1')
@client_context.require_no_mongos
@client_context.require_test_commands
def test_index_management_max_time_ms(self):
coll = self.db.test
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
self.assertRaises(
ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1)
self.assertRaises(
ExecutionTimeout,
coll.create_indexes,
[IndexModel("foo")],
maxTimeMS=1)
self.assertRaises(
ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1)
self.assertRaises(
ExecutionTimeout, coll.drop_indexes, maxTimeMS=1)
finally:
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
def test_list_indexes(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
def map_indexes(indexes):
return dict([(index["name"], index) for index in indexes])
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 1)
self.assertTrue("_id_" in map_indexes(indexes))
db.test.create_index("hello")
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 2)
self.assertEqual(map_indexes(indexes)["hello_1"]["key"],
SON([("hello", ASCENDING)]))
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
indexes = list(db.test.list_indexes())
self.assertEqual(len(indexes), 3)
index_map = map_indexes(indexes)
self.assertEqual(index_map["hello_-1_world_1"]["key"],
SON([("hello", DESCENDING), ("world", ASCENDING)]))
self.assertEqual(True, index_map["hello_-1_world_1"]["unique"])
# List indexes on a collection that does not exist.
indexes = list(db.does_not_exist.list_indexes())
self.assertEqual(len(indexes), 0)
# List indexes on a database that does not exist.
indexes = list(self.client.db_does_not_exist.coll.list_indexes())
self.assertEqual(len(indexes), 0)
def test_index_info(self):
db = self.db
db.test.drop()
db.test.insert_one({}) # create collection
self.assertEqual(len(db.test.index_information()), 1)
self.assertTrue("_id_" in db.test.index_information())
db.test.create_index("hello")
self.assertEqual(len(db.test.index_information()), 2)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)],
unique=True)
self.assertEqual(db.test.index_information()["hello_1"]["key"],
[("hello", ASCENDING)])
self.assertEqual(len(db.test.index_information()), 3)
self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)],
db.test.index_information()["hello_-1_world_1"]["key"]
)
self.assertEqual(
True, db.test.index_information()["hello_-1_world_1"]["unique"])
def test_index_geo2d(self):
db = self.db
db.test.drop_indexes()
self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)]))
index_info = db.test.index_information()['loc_2d']
self.assertEqual([('loc', '2d')], index_info['key'])
# geoSearch was deprecated in 4.4 and removed in 5.0
@client_context.require_version_max(4, 5)
@client_context.require_no_mongos
def test_index_haystack(self):
db = self.db
db.test.drop()
_id = db.test.insert_one({
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}).inserted_id
db.test.insert_one({
"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"
})
db.test.insert_one({
"pos": {"long": 59.1, "lat": 87.2}, "type": "office"
})
db.test.create_index(
[("pos", "geoHaystack"), ("type", ASCENDING)],
bucketSize=1
)
results = db.command(SON([
("geoSearch", "test"),
("near", [33, 33]),
("maxDistance", 6),
("search", {"type": "restaurant"}),
("limit", 30),
]))['results']
self.assertEqual(2, len(results))
self.assertEqual({
"_id": _id,
"pos": {"long": 34.2, "lat": 33.3},
"type": "restaurant"
}, results[0])
@client_context.require_no_mongos
def test_index_text(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("t_text", db.test.create_index([("t", TEXT)]))
index_info = db.test.index_information()["t_text"]
self.assertTrue("weights" in index_info)
db.test.insert_many([
{'t': 'spam eggs and spam'},
{'t': 'spam'},
{'t': 'egg sausage and bacon'}])
# MongoDB 2.6 text search. Create 'score' field in projection.
cursor = db.test.find(
{'$text': {'$search': 'spam'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
results = list(cursor)
self.assertTrue(results[0]['score'] >= results[1]['score'])
db.test.drop_indexes()
def test_index_2dsphere(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("geo_2dsphere",
db.test.create_index([("geo", GEOSPHERE)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'geo' and idx_type == '2dsphere':
break
else:
self.fail("2dsphere index not found.")
poly = {"type": "Polygon",
"coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}
query = {"geo": {"$within": {"$geometry": poly}}}
# This query will error without a 2dsphere index.
db.test.find(query)
db.test.drop_indexes()
def test_index_hashed(self):
db = self.db
db.test.drop_indexes()
self.assertEqual("a_hashed",
db.test.create_index([("a", HASHED)]))
for dummy, info in db.test.index_information().items():
field, idx_type = info['key'][0]
if field == 'a' and idx_type == 'hashed':
break
else:
self.fail("hashed index not found.")
db.test.drop_indexes()
def test_index_sparse(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('key', ASCENDING)], sparse=True)
self.assertTrue(db.test.index_information()['key_1']['sparse'])
def test_index_background(self):
db = self.db
db.test.drop_indexes()
db.test.create_index([('keya', ASCENDING)])
db.test.create_index([('keyb', ASCENDING)], background=False)
db.test.create_index([('keyc', ASCENDING)], background=True)
self.assertFalse('background' in db.test.index_information()['keya_1'])
self.assertFalse(db.test.index_information()['keyb_1']['background'])
self.assertTrue(db.test.index_information()['keyc_1']['background'])
def _drop_dups_setup(self, db):
db.drop_collection('test')
db.test.insert_one({'i': 1})
db.test.insert_one({'i': 2})
db.test.insert_one({'i': 2}) # duplicate
db.test.insert_one({'i': 3})
def test_index_dont_drop_dups(self):
# Try *not* dropping duplicates
db = self.db
self._drop_dups_setup(db)
# There's a duplicate
def test_create():
db.test.create_index(
[('i', ASCENDING)],
unique=True,
dropDups=False
)
self.assertRaises(DuplicateKeyError, test_create)
# Duplicate wasn't dropped
self.assertEqual(4, db.test.count_documents({}))
# Index wasn't created, only the default index on _id
self.assertEqual(1, len(db.test.index_information()))
# Get the plan dynamically because the explain format will change.
def get_plan_stage(self, root, stage):
if root.get('stage') == stage:
return root
elif "inputStage" in root:
return self.get_plan_stage(root['inputStage'], stage)
elif "inputStages" in root:
for i in root['inputStages']:
stage = self.get_plan_stage(i, stage)
if stage:
return stage
elif "queryPlan" in root:
# queryPlan (and slotBasedPlan) are new in 5.0.
return self.get_plan_stage(root["queryPlan"], stage)
elif "shards" in root:
for i in root['shards']:
stage = self.get_plan_stage(i['winningPlan'], stage)
if stage:
return stage
return {}
def test_index_filter(self):
db = self.db
db.drop_collection("test")
# Test bad filter spec on create.
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression=5)
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression={"x": {"$asdasd": 3}})
self.assertRaises(OperationFailure, db.test.create_index, "x",
partialFilterExpression={"$and": 5})
self.assertEqual("x_1", db.test.create_index(
[('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}))
db.test.insert_one({"x": 5, "a": 2})
db.test.insert_one({"x": 6, "a": 1})
# Operations that use the partial index.
explain = db.test.find({"x": 6, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'IXSCAN')
self.assertEqual("x_1", stage.get('indexName'))
self.assertTrue(stage.get('isPartial'))
# Operations that do not use the partial index.
explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
explain = db.test.find({"x": 6}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
# Test drop_indexes.
db.test.drop_index("x_1")
explain = db.test.find({"x": 6, "a": 1}).explain()
stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'],
'COLLSCAN')
self.assertNotEqual({}, stage)
def test_field_selection(self):
db = self.db
db.drop_collection("test")
doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}}
db.test.insert_one(doc)
# Test field inclusion
doc = next(db.test.find({}, ["_id"]))
self.assertEqual(list(doc), ["_id"])
doc = next(db.test.find({}, ["a"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "a"])
doc = next(db.test.find({}, ["b"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b"])
doc = next(db.test.find({}, ["c"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, ["a"]))
self.assertEqual(doc["a"], 1)
doc = next(db.test.find({}, ["b"]))
self.assertEqual(doc["b"], 5)
doc = next(db.test.find({}, ["c"]))
self.assertEqual(doc["c"], {"d": 5, "e": 10})
# Test inclusion of fields with dots
doc = next(db.test.find({}, ["c.d"]))
self.assertEqual(doc["c"], {"d": 5})
doc = next(db.test.find({}, ["c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["c"], {"e": 10})
doc = next(db.test.find({}, ["b", "c.e"]))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "b", "c"])
doc = next(db.test.find({}, ["b", "c.e"]))
self.assertEqual(doc["b"], 5)
# Test field exclusion
doc = next(db.test.find({}, {"a": False, "b": 0}))
l = list(doc)
l.sort()
self.assertEqual(l, ["_id", "c"])
doc = next(db.test.find({}, {"_id": False}))
l = list(doc)
self.assertFalse("_id" in l)
def test_options(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=4096)
result = db.test.options()
self.assertEqual(result, {"capped": True, 'size': 4096})
db.drop_collection("test")
def test_insert_one(self):
db = self.db
db.test.drop()
document = {"_id": 1000}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, int))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(1, db.test.count_documents({}))
document = {"foo": "bar"}
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertTrue(result.acknowledged)
self.assertIsNotNone(db.test.find_one({"_id": document["_id"]}))
self.assertEqual(2, db.test.count_documents({}))
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertTrue(isinstance(result.inserted_id, ObjectId))
self.assertEqual(document["_id"], result.inserted_id)
self.assertFalse(result.acknowledged)
# The insert failed duplicate key...
wait_until(lambda: 2 == db.test.count_documents({}),
'forcing duplicate key error')
document = RawBSONDocument(
encode({'_id': ObjectId(), 'foo': 'bar'}))
result = db.test.insert_one(document)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(result.inserted_id, None)
def test_insert_many(self):
db = self.db
db.test.drop()
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, ObjectId))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count_documents({'_id': _id}))
self.assertTrue(result.acknowledged)
docs = [{"_id": i} for i in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual(5, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count_documents({"_id": _id}))
self.assertTrue(result.acknowledged)
docs = [RawBSONDocument(encode({"_id": i + 5}))
for i in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(isinstance(result.inserted_ids, list))
self.assertEqual([], result.inserted_ids)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
docs = [{} for _ in range(5)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertFalse(result.acknowledged)
self.assertEqual(20, db.test.count_documents({}))
def test_insert_many_generator(self):
coll = self.db.test
coll.delete_many({})
def gen():
yield {'a': 1, 'b': 1}
yield {'a': 1, 'b': 2}
yield {'a': 2, 'b': 3}
yield {'a': 3, 'b': 5}
yield {'a': 5, 'b': 8}
result = coll.insert_many(gen())
self.assertEqual(5, len(result.inserted_ids))
def test_insert_many_invalid(self):
db = self.db
with self.assertRaisesRegex(
TypeError, "documents must be a non-empty list"):
db.test.insert_many({})
with self.assertRaisesRegex(
TypeError, "documents must be a non-empty list"):
db.test.insert_many([])
with self.assertRaisesRegex(
TypeError, "documents must be a non-empty list"):
db.test.insert_many(1)
with self.assertRaisesRegex(
TypeError, "documents must be a non-empty list"):
db.test.insert_many(RawBSONDocument(encode({'_id': 2})))
def test_delete_one(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"z": 1})
result = self.db.test.delete_one({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(2, self.db.test.count_documents({}))
result = self.db.test.delete_one({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(1, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(1, self.db.test.count_documents({}))
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_one({"z": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(lambda: 0 == db.test.count_documents({}), 'delete 1 documents')
def test_delete_many(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"x": 1})
self.db.test.insert_one({"y": 1})
self.db.test.insert_one({"y": 1})
result = self.db.test.delete_many({"x": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertEqual(2, result.deleted_count)
self.assertTrue(result.acknowledged)
self.assertEqual(0, self.db.test.count_documents({"x": 1}))
db = self.db.client.get_database(self.db.name,
write_concern=WriteConcern(w=0))
result = db.test.delete_many({"y": 1})
self.assertTrue(isinstance(result, DeleteResult))
self.assertRaises(InvalidOperation, lambda: result.deleted_count)
self.assertFalse(result.acknowledged)
wait_until(
lambda: 0 == db.test.count_documents({}), 'delete 2 documents')
def test_command_document_too_large(self):
large = '*' * (client_context.max_bson_size + _COMMAND_OVERHEAD)
coll = self.db.test
self.assertRaises(
DocumentTooLarge, coll.insert_one, {'data': large})
# update_one and update_many are the same
self.assertRaises(
DocumentTooLarge, coll.replace_one, {}, {'data': large})
self.assertRaises(
DocumentTooLarge, coll.delete_one, {'data': large})
def test_write_large_document(self):
max_size = client_context.max_bson_size
half_size = int(max_size / 2)
max_str = "x" * max_size
half_str = "x" * half_size
self.assertEqual(max_size, 16777216)
self.assertRaises(OperationFailure, self.db.test.insert_one,
{"foo": max_str})
self.assertRaises(OperationFailure, self.db.test.replace_one,
{}, {"foo": max_str}, upsert=True)
self.assertRaises(OperationFailure, self.db.test.insert_many,
[{"x": 1}, {"foo": max_str}])
self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}])
self.db.test.insert_one({"bar": "x"})
# Use w=0 here to test legacy doc size checking in all server versions
unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0))
self.assertRaises(DocumentTooLarge, unack_coll.replace_one,
{"bar": "x"}, {"bar": "x" * (max_size - 14)})
self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)})
def test_insert_bypass_document_validation(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$exists": True}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test insert_one
self.assertRaises(OperationFailure, db.test.insert_one,
{"_id": 1, "x": 100})
result = db.test.insert_one({"_id": 1, "x": 100},
bypass_document_validation=True)
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(1, result.inserted_id)
result = db.test.insert_one({"_id":2, "a":0})
self.assertTrue(isinstance(result, InsertOneResult))
self.assertEqual(2, result.inserted_id)
db_w0.test.insert_one({"y": 1}, bypass_document_validation=True)
wait_until(lambda: db_w0.test.find_one({"y": 1}),
"find w:0 inserted document")
# Test insert_many
docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)]
self.assertRaises(OperationFailure, db.test.insert_many, docs)
result = db.test.insert_many(docs, bypass_document_validation=True)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(97, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count_documents({"x": doc["x"]}))
self.assertTrue(result.acknowledged)
docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)]
result = db.test.insert_many(docs)
self.assertTrue(isinstance(result, InsertManyResult))
self.assertTrue(97, len(result.inserted_ids))
for doc in docs:
_id = doc["_id"]
self.assertTrue(isinstance(_id, int))
self.assertTrue(_id in result.inserted_ids)
self.assertEqual(1, db.test.count_documents({"a": doc["a"]}))
self.assertTrue(result.acknowledged)
self.assertRaises(OperationFailure, db_w0.test.insert_many,
[{"x": 1}, {"x": 2}],
bypass_document_validation=True)
def test_replace_bypass_document_validation(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$exists": True}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test replace_one
db.test.insert_one({"a": 101})
self.assertRaises(OperationFailure, db.test.replace_one,
{"a": 101}, {"y": 1})
self.assertEqual(0, db.test.count_documents({"y": 1}))
self.assertEqual(1, db.test.count_documents({"a": 101}))
db.test.replace_one({"a": 101}, {"y": 1},
bypass_document_validation=True)
self.assertEqual(0, db.test.count_documents({"a": 101}))
self.assertEqual(1, db.test.count_documents({"y": 1}))
db.test.replace_one({"y": 1}, {"a": 102})
self.assertEqual(0, db.test.count_documents({"y": 1}))
self.assertEqual(0, db.test.count_documents({"a": 101}))
self.assertEqual(1, db.test.count_documents({"a": 102}))
db.test.insert_one({"y": 1}, bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.replace_one,
{"y": 1}, {"x": 101})
self.assertEqual(0, db.test.count_documents({"x": 101}))
self.assertEqual(1, db.test.count_documents({"y": 1}))
db.test.replace_one({"y": 1}, {"x": 101},
bypass_document_validation=True)
self.assertEqual(0, db.test.count_documents({"y": 1}))
self.assertEqual(1, db.test.count_documents({"x": 101}))
db.test.replace_one({"x": 101}, {"a": 103},
bypass_document_validation=False)
self.assertEqual(0, db.test.count_documents({"x": 101}))
self.assertEqual(1, db.test.count_documents({"a": 103}))
db.test.insert_one({"y": 1}, bypass_document_validation=True)
db_w0.test.replace_one({"y": 1}, {"x": 1},
bypass_document_validation=True)
wait_until(lambda: db_w0.test.find_one({"x": 1}),
"find w:0 replaced document")
def test_update_bypass_document_validation(self):
db = self.db
db.test.drop()
db.test.insert_one({"z": 5})
db.command(SON([("collMod", "test"),
("validator", {"z": {"$gte": 0}})]))
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
# Test update_one
self.assertRaises(OperationFailure, db.test.update_one,
{"z": 5}, {"$inc": {"z": -10}})
self.assertEqual(0, db.test.count_documents({"z": -5}))
self.assertEqual(1, db.test.count_documents({"z": 5}))
db.test.update_one({"z": 5}, {"$inc": {"z": -10}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count_documents({"z": 5}))
self.assertEqual(1, db.test.count_documents({"z": -5}))
db.test.update_one({"z": -5}, {"$inc": {"z": 6}},
bypass_document_validation=False)
self.assertEqual(1, db.test.count_documents({"z": 1}))
self.assertEqual(0, db.test.count_documents({"z": -5}))
db.test.insert_one({"z": -10},
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_one,
{"z": -10}, {"$inc": {"z": 1}})
self.assertEqual(0, db.test.count_documents({"z": -9}))
self.assertEqual(1, db.test.count_documents({"z": -10}))
db.test.update_one({"z": -10}, {"$inc": {"z": 1}},
bypass_document_validation=True)
self.assertEqual(1, db.test.count_documents({"z": -9}))
self.assertEqual(0, db.test.count_documents({"z": -10}))
db.test.update_one({"z": -9}, {"$inc": {"z": 9}},
bypass_document_validation=False)
self.assertEqual(0, db.test.count_documents({"z": -9}))
self.assertEqual(1, db.test.count_documents({"z": 0}))
db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True)
db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}},
bypass_document_validation=True)
wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}),
"find w:0 updated document")
# Test update_many
db.test.insert_many([{"z": i} for i in range(3, 101)])
db.test.insert_one({"y": 0},
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_many, {},
{"$inc": {"z": -100}})
self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}}))
self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}}))
self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100}))
db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}}))
self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}}))
db.test.update_many({"z": {"$gt": -50}}, {"$inc": {"z": 100}},
bypass_document_validation=False)
self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}}))
self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}}))
db.test.insert_many([{"z": -i} for i in range(50)],
bypass_document_validation=True)
self.assertRaises(OperationFailure, db.test.update_many,
{}, {"$inc": {"z": 1}})
self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}}))
self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}}))
db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}},
bypass_document_validation=True)
self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}}))
self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}}))
db.test.update_many({"z": {"$lte": 0}}, {"$inc": {"z": 100}},
bypass_document_validation=False)
self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}}))
self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}}))
db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True)
db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True)
db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}},
bypass_document_validation=True)
wait_until(
lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2,
"find w:0 updated documents")
def test_bypass_document_validation_bulk_write(self):
db = self.db
db.test.drop()
db.create_collection("test", validator={"a": {"$gte": 0}})
db_w0 = self.db.client.get_database(
self.db.name, write_concern=WriteConcern(w=0))
ops = [InsertOne({"a": -10}),
InsertOne({"a": -11}),
InsertOne({"a": -12}),
UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}),
UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}),
ReplaceOne({"a": {"$lte": -10}}, {"a": -1})]
db.test.bulk_write(ops, bypass_document_validation=True)
self.assertEqual(3, db.test.count_documents({}))
self.assertEqual(1, db.test.count_documents({"a": -11}))
self.assertEqual(1, db.test.count_documents({"a": -1}))
self.assertEqual(1, db.test.count_documents({"a": -9}))
# Assert that the operations would fail without bypass_doc_val
for op in ops:
self.assertRaises(BulkWriteError, db.test.bulk_write, [op])
self.assertRaises(OperationFailure, db_w0.test.bulk_write, ops,
bypass_document_validation=True)
def test_find_by_default_dct(self):
db = self.db
db.test.insert_one({'foo': 'bar'})
dct = defaultdict(dict, [('foo', 'bar')])
self.assertIsNotNone(db.test.find_one(dct))
self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')]))
def test_find_w_fields(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": 1, "mike": "awesome",
"extra thing": "abcdefghijklmnopqrstuvwxyz"})
self.assertEqual(1, db.test.count_documents({}))
doc = next(db.test.find({}))
self.assertTrue("x" in doc)
doc = next(db.test.find({}))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}))
self.assertTrue("extra thing" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("x" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["x", "mike"]))
self.assertFalse("extra thing" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("x" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertTrue("mike" in doc)
doc = next(db.test.find({}, ["mike"]))
self.assertFalse("extra thing" in doc)
def test_fields_specifier_as_dict(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"})
self.assertEqual([1, 2, 3], db.test.find_one()["x"])
self.assertEqual([2, 3],
db.test.find_one(
projection={"x": {"$slice": -2}})["x"])
self.assertTrue("x" not in db.test.find_one(projection={"x": 0}))
self.assertTrue("mike" in db.test.find_one(projection={"x": 0}))
def test_find_w_regex(self):
db = self.db
db.test.delete_many({})
db.test.insert_one({"x": "hello_world"})
db.test.insert_one({"x": "hello_mike"})
db.test.insert_one({"x": "hello_mikey"})
db.test.insert_one({"x": "hello_test"})
self.assertEqual(len(list(db.test.find())), 4)
self.assertEqual(len(list(db.test.find({"x":
re.compile("^hello.*")}))), 4)
self.assertEqual(len(list(db.test.find({"x":
re.compile("ello")}))), 4)
self.assertEqual(len(list(db.test.find({"x":
re.compile("^hello$")}))), 0)
self.assertEqual(len(list(db.test.find({"x":
re.compile("^hello_mi.*$")}))), 2)
def test_id_can_be_anything(self):
db = self.db
db.test.delete_many({})
auto_id = {"hello": "world"}
db.test.insert_one(auto_id)
self.assertTrue(isinstance(auto_id["_id"], ObjectId))
numeric = {"_id": 240, "hello": "world"}
db.test.insert_one(numeric)
self.assertEqual(numeric["_id"], 240)
obj = {"_id": numeric, "hello": "world"}
db.test.insert_one(obj)
self.assertEqual(obj["_id"], numeric)
for x in db.test.find():
self.assertEqual(x["hello"], "world")
self.assertTrue("_id" in x)
def test_unique_index(self):
db = self.db
db.drop_collection("test")
db.test.create_index("hello")
# No error.
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
db.drop_collection("test")
db.test.create_index("hello", unique=True)
with self.assertRaises(DuplicateKeyError):
db.test.insert_one({"hello": "world"})
db.test.insert_one({"hello": "world"})
def test_duplicate_key_error(self):
db = self.db
db.drop_collection("test")
db.test.create_index("x", unique=True)
db.test.insert_one({"_id": 1, "x": 1})
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
with self.assertRaises(DuplicateKeyError) as context:
db.test.insert_one({"x": 1})
self.assertIsNotNone(context.exception.details)
self.assertEqual(1, db.test.count_documents({}))
def test_write_error_text_handling(self):
db = self.db
db.drop_collection("test")
db.test.create_index("text", unique=True)
# Test workaround for SERVER-24007
data = (b'a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83'
b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83')
text = utf_8_decode(data, None, True)
db.test.insert_one({"text": text})
# Should raise DuplicateKeyError, not InvalidBSON
self.assertRaises(DuplicateKeyError,
db.test.insert_one,
{"text": text})
self.assertRaises(DuplicateKeyError,
db.test.replace_one,
{"_id": ObjectId()},
{"text": text},
upsert=True)
# Should raise BulkWriteError, not InvalidBSON
self.assertRaises(BulkWriteError,
db.test.insert_many,
[{"text": text}])
def test_write_error_unicode(self):
coll = self.db.test
self.addCleanup(coll.drop)
coll.create_index('a', unique=True)
coll.insert_one({'a': 'unicode \U0001f40d'})
with self.assertRaisesRegex(
DuplicateKeyError,
'E11000 duplicate key error') as ctx:
coll.insert_one({'a': 'unicode \U0001f40d'})
# Once more for good measure.
self.assertIn('E11000 duplicate key error',
str(ctx.exception))
def test_wtimeout(self):
# Ensure setting wtimeout doesn't disable write concern altogether.
# See SERVER-12596.
collection = self.db.test
collection.drop()
collection.insert_one({'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(w=1, wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
coll = collection.with_options(
write_concern=WriteConcern(wtimeout=1000))
self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1})
def test_error_code(self):
try:
self.db.test.update_many({}, {"$thismodifierdoesntexist": 1})
except OperationFailure as exc:
self.assertTrue(exc.code in (9, 10147, 16840, 17009))
# Just check that we set the error document. Fields
# vary by MongoDB version.
self.assertTrue(exc.details is not None)
else:
self.fail("OperationFailure was not raised")
def test_index_on_subfield(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
db.test.insert_one({"hello": {"a": 4, "b": 10}})
db.drop_collection("test")
db.test.create_index("hello.a", unique=True)
db.test.insert_one({"hello": {"a": 4, "b": 5}})
db.test.insert_one({"hello": {"a": 7, "b": 2}})
self.assertRaises(DuplicateKeyError,
db.test.insert_one,
{"hello": {"a": 4, "b": 10}})
def test_replace_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.replace_one({}, {"$set": {"x": 1}}))
id1 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.replace_one({"x": 1}, {"y": 1})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count_documents({"y": 1}))
self.assertEqual(0, db.test.count_documents({"x": 1}))
self.assertEqual(db.test.find_one(id1)["y"], 1)
replacement = RawBSONDocument(encode({"_id": id1, "z": 1}))
result = db.test.replace_one({"y": 1}, replacement, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count_documents({"z": 1}))
self.assertEqual(0, db.test.count_documents({"y": 1}))
self.assertEqual(db.test.find_one(id1)["z"], 1)
result = db.test.replace_one({"x": 2}, {"y": 2}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count_documents({"y": 2}))
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.replace_one({"x": 0}, {"y": 0})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_one(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_one({}, {"x": 1}))
id1 = db.test.insert_one({"x": 5}).inserted_id
result = db.test.update_one({}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 6)
id2 = db.test.insert_one({"x": 1}).inserted_id
result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(db.test.find_one(id1)["x"], 7)
self.assertEqual(db.test.find_one(id2)["x"], 1)
result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_many(self):
db = self.db
db.drop_collection("test")
self.assertRaises(ValueError,
lambda: db.test.update_many({}, {"x": 1}))
db.test.insert_one({"x": 4, "y": 3})
db.test.insert_one({"x": 5, "y": 5})
db.test.insert_one({"x": 4, "y": 4})
result = db.test.update_many({"x": 4}, {"$set": {"y": 5}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(2, result.matched_count)
self.assertTrue(result.modified_count in (None, 2))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(3, db.test.count_documents({"y": 5}))
result = db.test.update_many({"x": 5}, {"$set": {"y": 6}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(1, result.matched_count)
self.assertTrue(result.modified_count in (None, 1))
self.assertIsNone(result.upserted_id)
self.assertTrue(result.acknowledged)
self.assertEqual(1, db.test.count_documents({"y": 6}))
result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True)
self.assertTrue(isinstance(result, UpdateResult))
self.assertEqual(0, result.matched_count)
self.assertTrue(result.modified_count in (None, 0))
self.assertTrue(isinstance(result.upserted_id, ObjectId))
self.assertTrue(result.acknowledged)
db = db.client.get_database(db.name,
write_concern=WriteConcern(w=0))
result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}})
self.assertTrue(isinstance(result, UpdateResult))
self.assertRaises(InvalidOperation, lambda: result.matched_count)
self.assertRaises(InvalidOperation, lambda: result.modified_count)
self.assertRaises(InvalidOperation, lambda: result.upserted_id)
self.assertFalse(result.acknowledged)
def test_update_check_keys(self):
self.db.drop_collection("test")
self.assertTrue(self.db.test.insert_one({"hello": "world"}))
# Modify shouldn't check keys...
self.assertTrue(self.db.test.update_one({"hello": "world"},
{"$set": {"foo.bar": "baz"}},
upsert=True))
# I know this seems like testing the server but I'd like to be notified
# by CI if the server's behavior changes here.
doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")])
self.assertRaises(OperationFailure, self.db.test.update_one,
{"hello": "world"}, doc, upsert=True)
# This is going to cause keys to be checked and raise InvalidDocument.
# That's OK assuming the server's behavior in the previous assert
# doesn't change. If the behavior changes checking the first key for
# '$' in update won't be good enough anymore.
doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})])
self.assertRaises(OperationFailure, self.db.test.replace_one,
{"hello": "world"}, doc, upsert=True)
# Replace with empty document
self.assertNotEqual(0,
self.db.test.replace_one(
{"hello": "world"}, {}).matched_count)
def test_acknowledged_delete(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000)
db.test.insert_one({"x": 1})
self.assertEqual(1, db.test.count_documents({}))
# Can't remove from capped collection.
self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1})
db.drop_collection("test")
db.test.insert_one({"x": 1})
db.test.insert_one({"x": 1})
self.assertEqual(2, db.test.delete_many({}).deleted_count)
self.assertEqual(0, db.test.delete_many({}).deleted_count)
@client_context.require_version_max(4, 9)
def test_manual_last_error(self):
coll = self.db.get_collection("test", write_concern=WriteConcern(w=0))
coll.insert_one({"x": 1})
self.db.command("getlasterror", w=1, wtimeout=1)
def test_count_documents(self):
db = self.db
db.drop_collection("test")
self.addCleanup(db.drop_collection, "test")
self.assertEqual(db.test.count_documents({}), 0)
db.wrong.insert_many([{}, {}])
self.assertEqual(db.test.count_documents({}), 0)
db.test.insert_many([{}, {}])
self.assertEqual(db.test.count_documents({}), 2)
db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}])
self.assertEqual(db.test.count_documents({'foo': 'bar'}), 1)
self.assertEqual(
db.test.count_documents({'foo': re.compile(r'ba.*')}), 2)
def test_estimated_document_count(self):
db = self.db
db.drop_collection("test")
self.addCleanup(db.drop_collection, "test")
self.assertEqual(db.test.estimated_document_count(), 0)
db.wrong.insert_many([{}, {}])
self.assertEqual(db.test.estimated_document_count(), 0)
db.test.insert_many([{}, {}])
self.assertEqual(db.test.estimated_document_count(), 2)
def test_aggregate(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
result = db.test.aggregate([pipeline])
self.assertTrue(isinstance(result, CommandCursor))
self.assertEqual([{'foo': [1, 2]}], list(result))
# Test write concern.
with self.write_concern_collection() as coll:
coll.aggregate([{'$out': 'output-collection'}])
def test_aggregate_raw_bson(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({'foo': [1, 2]})
self.assertRaises(TypeError, db.test.aggregate, "wow")
pipeline = {"$project": {"_id": False, "foo": True}}
coll = db.get_collection(
'test',
codec_options=CodecOptions(document_class=RawBSONDocument))
result = coll.aggregate([pipeline])
self.assertTrue(isinstance(result, CommandCursor))
first_result = next(result)
self.assertIsInstance(first_result, RawBSONDocument)
self.assertEqual([1, 2], list(first_result['foo']))
def test_aggregation_cursor_validation(self):
db = self.db
projection = {'$project': {'_id': '$_id'}}
cursor = db.test.aggregate([projection], cursor={})
self.assertTrue(isinstance(cursor, CommandCursor))
def test_aggregation_cursor(self):
db = self.db
if client_context.has_secondaries:
# Test that getMore messages are sent to the right server.
db = self.client.get_database(
db.name,
read_preference=ReadPreference.SECONDARY,
write_concern=WriteConcern(w=self.w))
for collection_size in (10, 1000):
db.drop_collection("test")
db.test.insert_many([{'_id': i} for i in range(collection_size)])
expected_sum = sum(range(collection_size))
# Use batchSize to ensure multiple getMore messages
cursor = db.test.aggregate(
[{'$project': {'_id': '$_id'}}],
batchSize=5)
self.assertEqual(
expected_sum,
sum(doc['_id'] for doc in cursor))
# Test that batchSize is handled properly.
cursor = db.test.aggregate([], batchSize=5)
self.assertEqual(5, len(cursor._CommandCursor__data))
# Force a getMore
cursor._CommandCursor__data.clear()
next(cursor)
# batchSize - 1
self.assertEqual(4, len(cursor._CommandCursor__data))
# Exhaust the cursor. There shouldn't be any errors.
for doc in cursor:
pass
def test_aggregation_cursor_alive(self):
self.db.test.delete_many({})
self.db.test.insert_many([{} for _ in range(3)])
self.addCleanup(self.db.test.delete_many, {})
cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2})
n = 0
while True:
cursor.next()
n += 1
if 3 == n:
self.assertFalse(cursor.alive)
break
self.assertTrue(cursor.alive)
def test_large_limit(self):
db = self.db
db.drop_collection("test_large_limit")
db.test_large_limit.create_index([('x', 1)])
my_str = "mongomongo" * 1000
db.test_large_limit.insert_many(
{"x": i, "y": my_str} for i in range(2000))
i = 0
y = 0
for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]):
i += 1
y += doc["x"]
self.assertEqual(1900, i)
self.assertEqual((1900 * 1899) / 2, y)
def test_find_kwargs(self):
db = self.db
db.drop_collection("test")
db.test.insert_many({"x": i} for i in range(10))
self.assertEqual(10, db.test.count_documents({}))
total = 0
for x in db.test.find({}, skip=4, limit=2):
total += x["x"]
self.assertEqual(9, total)
def test_rename(self):
db = self.db
db.drop_collection("test")
db.drop_collection("foo")
self.assertRaises(TypeError, db.test.rename, 5)
self.assertRaises(InvalidName, db.test.rename, "")
self.assertRaises(InvalidName, db.test.rename, "te$t")
self.assertRaises(InvalidName, db.test.rename, ".test")
self.assertRaises(InvalidName, db.test.rename, "test.")
self.assertRaises(InvalidName, db.test.rename, "tes..t")
self.assertEqual(0, db.test.count_documents({}))
self.assertEqual(0, db.foo.count_documents({}))
db.test.insert_many({"x": i} for i in range(10))
self.assertEqual(10, db.test.count_documents({}))
db.test.rename("foo")
self.assertEqual(0, db.test.count_documents({}))
self.assertEqual(10, db.foo.count_documents({}))
x = 0
for doc in db.foo.find():
self.assertEqual(x, doc["x"])
x += 1
db.test.insert_one({})
self.assertRaises(OperationFailure, db.foo.rename, "test")
db.foo.rename("test", dropTarget=True)
with self.write_concern_collection() as coll:
coll.rename('foo')
def test_find_one(self):
db = self.db
db.drop_collection("test")
_id = db.test.insert_one({"hello": "world", "foo": "bar"}).inserted_id
self.assertEqual("world", db.test.find_one()["hello"])
self.assertEqual(db.test.find_one(_id), db.test.find_one())
self.assertEqual(db.test.find_one(None), db.test.find_one())
self.assertEqual(db.test.find_one({}), db.test.find_one())
self.assertEqual(db.test.find_one({"hello": "world"}),
db.test.find_one())
self.assertTrue("hello" in db.test.find_one(projection=["hello"]))
self.assertTrue("hello" not in db.test.find_one(projection=["foo"]))
self.assertTrue("hello" in db.test.find_one(projection=("hello",)))
self.assertTrue("hello" not in db.test.find_one(projection=("foo",)))
self.assertTrue("hello" in db.test.find_one(projection=set(["hello"])))
self.assertTrue("hello" not in db.test.find_one(projection=set(["foo"])))
self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"])))
self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"])))
self.assertEqual(["_id"], list(db.test.find_one(projection={'_id':
True})))
self.assertTrue("hello" in list(db.test.find_one(projection={})))
self.assertTrue("hello" in list(db.test.find_one(projection=[])))
self.assertEqual(None, db.test.find_one({"hello": "foo"}))
self.assertEqual(None, db.test.find_one(ObjectId()))
def test_find_one_non_objectid(self):
db = self.db
db.drop_collection("test")
db.test.insert_one({"_id": 5})
self.assertTrue(db.test.find_one(5))
self.assertFalse(db.test.find_one(6))
def test_find_one_with_find_args(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": i} for i in range(1, 4)])
self.assertEqual(1, db.test.find_one()["x"])
self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"])
def test_find_with_sort(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}])
self.assertEqual(2, db.test.find_one()["x"])
self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"])
self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"])
def to_list(things):
return [thing["x"] for thing in things]
self.assertEqual([2, 1, 3], to_list(db.test.find()))
self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)])))
self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)])))
self.assertRaises(TypeError, db.test.find, sort=5)
self.assertRaises(TypeError, db.test.find, sort="hello")
self.assertRaises(ValueError, db.test.find, sort=["hello", 1])
# TODO doesn't actually test functionality, just that it doesn't blow up
def test_cursor_timeout(self):
list(self.db.test.find(no_cursor_timeout=True))
list(self.db.test.find(no_cursor_timeout=False))
def test_exhaust(self):
if is_mongos(self.db.client):
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST)
return
# Limit is incompatible with exhaust.
self.assertRaises(InvalidOperation,
self.db.test.find,
cursor_type=CursorType.EXHAUST,
limit=5)
cur = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
self.db.drop_collection("test")
# Insert enough documents to require more than one batch
self.db.test.insert_many([{'i': i} for i in range(150)])
client = rs_or_single_client(maxPoolSize=1)
self.addCleanup(client.close)
pool = get_pool(client)
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST)
next(cur)
self.assertEqual(0, len(pool.sockets))
for _ in cur:
pass
self.assertEqual(1, len(pool.sockets))
# Same as previous but don't call next()
for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST):
pass
self.assertEqual(1, len(pool.sockets))
# If the Cursor instance is discarded before being completely iterated
# and the socket has pending data (more_to_come=True) we have to close
# and discard the socket.
cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST,
batch_size=2)
if client_context.version.at_least(4, 2):
# On 4.2+ we use OP_MSG which only sets more_to_come=True after the
# first getMore.
for _ in range(3):
next(cur)
else:
next(cur)
self.assertEqual(0, len(pool.sockets))
if sys.platform.startswith('java') or 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
cur.close()
cur = None
# Wait until the background thread returns the socket.
wait_until(lambda: pool.active_sockets == 0, 'return socket')
# The socket should be discarded.
self.assertEqual(0, len(pool.sockets))
def test_distinct(self):
self.db.drop_collection("test")
test = self.db.test
test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}])
distinct = test.distinct("a")
distinct.sort()
self.assertEqual([1, 2, 3], distinct)
distinct = test.find({'a': {'$gt': 1}}).distinct("a")
distinct.sort()
self.assertEqual([2, 3], distinct)
distinct = test.distinct('a', {'a': {'$gt': 1}})
distinct.sort()
self.assertEqual([2, 3], distinct)
self.db.drop_collection("test")
test.insert_one({"a": {"b": "a"}, "c": 12})
test.insert_one({"a": {"b": "b"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
test.insert_one({"a": {"b": "c"}, "c": 12})
distinct = test.distinct("a.b")
distinct.sort()
self.assertEqual(["a", "b", "c"], distinct)
def test_query_on_query_field(self):
self.db.drop_collection("test")
self.db.test.insert_one({"query": "foo"})
self.db.test.insert_one({"bar": "foo"})
self.assertEqual(1,
self.db.test.count_documents({"query": {"$ne": None}}))
self.assertEqual(1,
len(list(self.db.test.find({"query": {"$ne": None}})))
)
def test_min_query(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"x": 1}, {"x": 2}])
self.db.test.create_index("x")
cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}},
hint="x_1")
docs = list(cursor)
self.assertEqual(1, len(docs))
self.assertEqual(2, docs[0]["x"])
def test_numerous_inserts(self):
# Ensure we don't exceed server's maxWriteBatchSize size limit.
self.db.test.drop()
n_docs = client_context.max_write_batch_size + 100
self.db.test.insert_many([{} for _ in range(n_docs)])
self.assertEqual(n_docs, self.db.test.count_documents({}))
self.db.test.drop()
def test_insert_many_large_batch(self):
# Tests legacy insert.
db = self.client.test_insert_large_batch
self.addCleanup(self.client.drop_database, 'test_insert_large_batch')
max_bson_size = client_context.max_bson_size
# Write commands are limited to 16MB + 16k per batch
big_string = 'x' * int(max_bson_size / 2)
# Batch insert that requires 2 batches.
successful_insert = [{'x': big_string}, {'x': big_string},
{'x': big_string}, {'x': big_string}]
db.collection_0.insert_many(successful_insert)
self.assertEqual(4, db.collection_0.count_documents({}))
db.collection_0.drop()
# Test that inserts fail after first error.
insert_second_fails = [{'_id': 'id0', 'x': big_string},
{'_id': 'id0', 'x': big_string},
{'_id': 'id1', 'x': big_string},
{'_id': 'id2', 'x': big_string}]
with self.assertRaises(BulkWriteError):
db.collection_1.insert_many(insert_second_fails)
self.assertEqual(1, db.collection_1.count_documents({}))
db.collection_1.drop()
# 2 batches, 2nd insert fails, unacknowledged, ordered.
unack_coll = db.collection_2.with_options(
write_concern=WriteConcern(w=0))
unack_coll.insert_many(insert_second_fails)
wait_until(lambda: 1 == db.collection_2.count_documents({}),
'insert 1 document', timeout=60)
db.collection_2.drop()
# 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are
# dupes. Acknowledged, unordered.
insert_two_failures = [{'_id': 'id0', 'x': big_string},
{'_id': 'id0', 'x': big_string},
{'_id': 'id1', 'x': big_string},
{'_id': 'id1', 'x': big_string}]
with self.assertRaises(OperationFailure) as context:
db.collection_3.insert_many(insert_two_failures, ordered=False)
self.assertIn('id1', str(context.exception))
# Only the first and third documents should be inserted.
self.assertEqual(2, db.collection_3.count_documents({}))
db.collection_3.drop()
# 2 batches, 2 errors, unacknowledged, unordered.
unack_coll = db.collection_4.with_options(
write_concern=WriteConcern(w=0))
unack_coll.insert_many(insert_two_failures, ordered=False)
# Only the first and third documents are inserted.
wait_until(lambda: 2 == db.collection_4.count_documents({}),
'insert 2 documents', timeout=60)
db.collection_4.drop()
def test_messages_with_unicode_collection_names(self):
db = self.db
db["Employés"].insert_one({"x": 1})
db["Employés"].replace_one({"x": 1}, {"x": 2})
db["Employés"].delete_many({})
db["Employés"].find_one()
list(db["Employés"].find())
def test_drop_indexes_non_existent(self):
self.db.drop_collection("test")
self.db.test.drop_indexes()
# This is really a bson test but easier to just reproduce it here...
# (Shame on me)
def test_bad_encode(self):
c = self.db.test
c.drop()
self.assertRaises(InvalidDocument, c.insert_one, {"x": c})
class BadGetAttr(dict):
def __getattr__(self, name):
pass
bad = BadGetAttr([('foo', 'bar')])
c.insert_one({'bad': bad})
self.assertEqual('bar', c.find_one()['bad']['foo'])
def test_array_filters_validation(self):
# array_filters must be a list.
c = self.db.test
with self.assertRaises(TypeError):
c.update_one({}, {'$set': {'a': 1}}, array_filters={})
with self.assertRaises(TypeError):
c.update_many({}, {'$set': {'a': 1}}, array_filters={})
with self.assertRaises(TypeError):
c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={})
def test_array_filters_unacknowledged(self):
c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0))
with self.assertRaises(ConfigurationError):
c_w0.update_one({}, {'$set': {'y.$[i].b': 5}},
array_filters=[{'i.b': 1}])
with self.assertRaises(ConfigurationError):
c_w0.update_many({}, {'$set': {'y.$[i].b': 5}},
array_filters=[{'i.b': 1}])
with self.assertRaises(ConfigurationError):
c_w0.find_one_and_update({}, {'$set': {'y.$[i].b': 5}},
array_filters=[{'i.b': 1}])
def test_find_one_and(self):
c = self.db.test
c.drop()
c.insert_one({'_id': 1, 'i': 1})
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_delete({'_id': 1}))
self.assertEqual(None, c.find_one({'_id': 1}))
self.assertEqual(None,
c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}}))
self.assertEqual({'_id': 1, 'i': 1},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER,
upsert=True))
self.assertEqual({'_id': 1, 'i': 2},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
return_document=ReturnDocument.AFTER))
self.assertEqual({'_id': 1, 'i': 3},
c.find_one_and_replace(
{'_id': 1}, {'i': 3, 'j': 1},
projection=['i'],
return_document=ReturnDocument.AFTER))
self.assertEqual({'i': 4},
c.find_one_and_update(
{'_id': 1}, {'$inc': {'i': 1}},
projection={'i': 1, '_id': 0},
return_document=ReturnDocument.AFTER))
c.drop()
for j in range(5):
c.insert_one({'j': j, 'i': 0})
sort = [('j', DESCENDING)]
self.assertEqual(4, c.find_one_and_update({},
{'$inc': {'i': 1}},
sort=sort)['j'])
def test_find_one_and_write_concern(self):
listener = EventListener()
db = single_client(event_listeners=[listener])[self.db.name]
# non-default WriteConcern.
c_w0 = db.get_collection(
'test', write_concern=WriteConcern(w=0))
# default WriteConcern.
c_default = db.get_collection('test', write_concern=WriteConcern())
results = listener.results
# Authenticate the client and throw out auth commands from the listener.
db.command('ping')
results.clear()
c_w0.find_one_and_update(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_w0.find_one_and_delete({'_id': 1})
self.assertEqual(
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
# Test write concern errors.
if client_context.is_rs:
c_wc_error = db.get_collection(
'test',
write_concern=WriteConcern(
w=len(client_context.nodes) + 1))
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_update,
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_replace,
{'w': 0}, results['started'][0].command['writeConcern'])
self.assertRaises(
WriteConcernError,
c_wc_error.find_one_and_delete,
{'w': 0}, results['started'][0].command['writeConcern'])
results.clear()
c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_one_and_replace({'_id': 1}, {'foo': 'bar'})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
c_default.find_one_and_delete({'_id': 1})
self.assertNotIn('writeConcern', results['started'][0].command)
results.clear()
def test_find_with_nested(self):
c = self.db.test
c.drop()
c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4]
self.assertEqual(
[2],
[i['i'] for i in c.find({
'$and': [
{
# This clause gives us [1,2,4]
'$or': [
{'i': {'$lte': 2}},
{'i': {'$gt': 3}},
],
},
{
# This clause gives us [2,3]
'$or': [
{'i': 2},
{'i': 3},
]
},
]
})]
)
self.assertEqual(
[0, 1, 2],
[i['i'] for i in c.find({
'$or': [
{
# This clause gives us [2]
'$and': [
{'i': {'$gte': 2}},
{'i': {'$lt': 3}},
],
},
{
# This clause gives us [0,1]
'$and': [
{'i': {'$gt': -100}},
{'i': {'$lt': 2}},
]
},
]
})]
)
def test_find_regex(self):
c = self.db.test
c.drop()
c.insert_one({'r': re.compile('.*')})
self.assertTrue(isinstance(c.find_one()['r'], Regex))
for doc in c.find():
self.assertTrue(isinstance(doc['r'], Regex))
def test_find_command_generation(self):
cmd = _gen_find_command('coll', {'$query': {'foo': 1}, '$dumb': 2},
None, 0, 0, 0, None, DEFAULT_READ_CONCERN,
None, None)
self.assertEqual(
cmd.to_dict(),
SON([('find', 'coll'),
('$dumb', 2),
('filter', {'foo': 1})]).to_dict())
def test_bool(self):
with self.assertRaises(NotImplementedError):
bool(Collection(self.db, 'test'))
@client_context.require_version_min(5, 0, 0)
def test_helpers_with_let(self):
c = self.db.test
helpers = [(c.delete_many, ({}, {})), (c.delete_one, ({}, {})),
(c.find, ({})), (c.update_many, ({}, {'$inc': {'x': 3}})),
(c.update_one, ({}, {'$inc': {'x': 3}})),
(c.find_one_and_delete, ({}, {})),
(c.find_one_and_replace, ({}, {})),
(c.aggregate, ([], {}))]
for let in [10, "str"]:
for helper, args in helpers:
with self.assertRaisesRegex(TypeError,
"let must be an instance of dict"):
helper(*args, let=let)
for helper, args in helpers:
helper(*args, let={})
if __name__ == "__main__":
unittest.main()
| 40.545248
| 87
| 0.566236
|
4a167c22faac175ab381de646ded195c3ce51750
| 3,155
|
py
|
Python
|
pic.py
|
JettChenT/photoship
|
4da7ad7275b29a07b8cf64ba2aa66c5dd14cc3a0
|
[
"MIT"
] | null | null | null |
pic.py
|
JettChenT/photoship
|
4da7ad7275b29a07b8cf64ba2aa66c5dd14cc3a0
|
[
"MIT"
] | null | null | null |
pic.py
|
JettChenT/photoship
|
4da7ad7275b29a07b8cf64ba2aa66c5dd14cc3a0
|
[
"MIT"
] | null | null | null |
import pygame, sys, random, math
SCREEN_W, SCREEN_H = 1024, 768
class CLS_pic(object):
def __init__(self,fileName):
img = pygame.image.load(fileName)
self.img = pygame.transform.scale(img,(SCREEN_W,SCREEN_H))
self.x,self.y = 0,0
self.w,self.h = self.img.get_size()
def draw(self,scr, effNum =0, spd = 5):
if effNum == 0 or spd == 0:
scr.blit(self.img,(0,0))
elif effNum == 1:
for x in range(-SCREEN_W,0,spd):
scr.blit(self.img,(x,0))
pygame.display.update()
elif effNum == 2:
for y in range(-SCREEN_W,0,spd):
scr.blit(self.img,(0,y))
pygame.display.update()
elif effNum == 3:
for x in range(-SCREEN_W,0,spd*10):
for y in range(-SCREEN_H,0,spd):
scr.blit(self.img,(x,y))
pygame.display.update()
elif effNum == 4:
for i in range(1,100,spd):
scr.blit(self.img,(SCREEN_W/i,SCREEN_H/i))
pygame.display.update()
elif effNum == 5:
for y in range(-SCREEN_H*4,0,spd):
scr.blit(self.img,(0,SCREEN_H%y))
pygame.display.update()
elif effNum == 6:
for w in range(1,SCREEN_W,spd):
h = int(w*SCREEN_H/SCREEN_W)
img = pygame.transform.scale(self.img,(w,h))
x = (SCREEN_W-w)/2
y = (SCREEN_H-h)/2
scr.blit(img,(x,y))
pygame.display.update()
elif effNum == 7:
w,h= spd*8,int(SCREEN_H/SCREEN_W*spd*8)
xNum,yNum = SCREEN_W//w,SCREEN_H//h
mList = []
for i in range(xNum*yNum):
mList.append(i)
for i in range(xNum*yNum):
num = random.choice(mList)
mList.remove(num)
x = num%yNum*w
y = num//yNum*h
scr.blit(self.img,(x,y),area=(x,y,w,h))
pygame.display.update()
elif effNum == 10:
oldImg = scr.copy()
for x in range(-SCREEN_W,0,spd):
scr.blit(self.img,(x,0))
scr.blit(oldImg,(x+SCREEN_W,0))
pygame.display.update()
elif effNum == 11:
oldImg = scr.copy()
for x in range(SCREEN_W,0,-spd):
scr.blit(self.img,(x,0))
scr.blit(oldImg,(x-SCREEN_W,0))
pygame.display.update()
elif effNum == 12:
for w in range(1,SCREEN_W,spd):
h = int(w * SCREEN_H/SCREEN_W)
img = pygame.transform.scale(self.img,(w,h))
scr.blit(img,((SCREEN_W-w)/2,(SCREEN_H-h)/2))
pygame.display.update()
elif effNum == 13:
oldImg = scr.copy()
for w in range(SCREEN_W,0,-spd):
h = int(w*SCREEN_H/SCREEN_W)
img = pygame.transform.scale(self.img,(w,h))
scr.blit(img,((SCREEN_W-w)/2,(SCREEN_H-h)/2))
pygame.display.update()
| 38.012048
| 66
| 0.475119
|
4a167d633b45b95930a06f6a297d0ee4b527e622
| 2,794
|
py
|
Python
|
data/pre-processing scripts/german_data_categorizing.py
|
UCLA-StarAI/LearnFairNB
|
f922d885399955737bd9f16a104f700004cd3846
|
[
"Fair"
] | 3
|
2019-07-07T17:29:49.000Z
|
2021-03-20T18:52:28.000Z
|
data/pre-processing scripts/german_data_categorizing.py
|
UCLA-StarAI/LearnFairNB
|
f922d885399955737bd9f16a104f700004cd3846
|
[
"Fair"
] | 1
|
2019-11-23T22:26:05.000Z
|
2019-11-23T22:26:05.000Z
|
data/pre-processing scripts/german_data_categorizing.py
|
UCLA-StarAI/LearnFairNB
|
f922d885399955737bd9f16a104f700004cd3846
|
[
"Fair"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
df = pd.read_csv('../data/german.data',delimiter=r"\s+")
df.columns = [
"Attribute1", "Attribute2", "Attribute3", "Attribute4", "Attribute5",
"Attribute6", "Attribute7", "Attribute8", "Attribute9", "Attribute10",
"Attribute11", "Attribute12", "Attribute13", "Attribute14", "Attribute15",
"Attribute16", "Attribute17", "Attribute18", "Attribute19", "Attribute20",
"Decision"
]
# In[3]:
df.Attribute1 = pd.Categorical(df.Attribute1)
df['Attribute1'] = df.Attribute1.cat.codes
df.Attribute3 = pd.Categorical(df.Attribute3)
df['Attribute3'] = df.Attribute3.cat.codes
df.Attribute4 = pd.Categorical(df.Attribute4)
df['Attribute4'] = df.Attribute4.cat.codes
df.Attribute6 = pd.Categorical(df.Attribute6)
df['Attribute6'] = df.Attribute6.cat.codes
df.Attribute7 = pd.Categorical(df.Attribute7)
df['Attribute7'] = df.Attribute7.cat.codes
df.Attribute9 = pd.Categorical(df.Attribute9)
df['Attribute9'] = df.Attribute9.cat.codes
df.Attribute10 = pd.Categorical(df.Attribute10)
df['Attribute10'] = df.Attribute10.cat.codes
df.Attribute12 = pd.Categorical(df.Attribute12)
df['Attribute12'] = df.Attribute12.cat.codes
df.Attribute14 = pd.Categorical(df.Attribute14)
df['Attribute14'] = df.Attribute14.cat.codes
df.Attribute15 = pd.Categorical(df.Attribute15)
df['Attribute15'] = df.Attribute15.cat.codes
df.Attribute17 = pd.Categorical(df.Attribute17)
df['Attribute17'] = df.Attribute17.cat.codes
df.Attribute19 = pd.Categorical(df.Attribute19)
df['Attribute19'] = df.Attribute19.cat.codes
df.Attribute20 = pd.Categorical(df.Attribute20)
df['Attribute20'] = df.Attribute20.cat.codes
df.Attribute8 = pd.Categorical(df.Attribute8)
df['Attribute8'] = df.Attribute8.cat.codes
df.Attribute11 = pd.Categorical(df.Attribute11)
df['Attribute11'] = df.Attribute11.cat.codes
df.Attribute16 = pd.Categorical(df.Attribute16)
df['Attribute16'] = df.Attribute16.cat.codes
df.Attribute18 = pd.Categorical(df.Attribute18)
df['Attribute18'] = df.Attribute18.cat.codes
df["Decisione"] = df["Decision"].map({ "1": 0 , "2": 1 })
# In[4]:
bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
labels = [1,2,3,4,5,6,7,8,9,10]
df['Attribute13'] = pd.cut(df['Attribute13'], bins=bins, labels=labels, include_lowest=True)
df['Attribute2'] = pd.cut(df['Attribute2'], bins=bins, labels=labels, include_lowest=True)
bins = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000]
labels = [1,2,3,4,5,6,7,8,9,10, 11,12,13,14,15,16,17,18,19]
df['Attribute5'] = pd.cut(df['Attribute5'], bins=bins, labels=labels, include_lowest=True)
# In[5]:
df
# In[6]:
df.to_csv('./german_categorized.data')
# In[ ]:
| 24.508772
| 134
| 0.710809
|
4a167e3834f30c1e27497d347339cd88d8c0d4fa
| 1,617
|
py
|
Python
|
tests/tensorflow/test_helpers.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 136
|
2020-06-01T14:03:31.000Z
|
2020-10-28T06:10:50.000Z
|
tests/tensorflow/test_helpers.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 133
|
2020-05-26T13:48:04.000Z
|
2020-10-28T05:25:55.000Z
|
tests/tensorflow/test_helpers.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 36
|
2020-05-28T08:18:39.000Z
|
2020-10-27T14:46:58.000Z
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from tests.tensorflow.helpers import get_basic_conv_test_model
from tests.tensorflow.helpers import TFTensorListComparator
def test_basic_model_has_expected_params():
default_weight = tf.constant([[[[0., -1.],
[-1., 0.]]], [[[0., -1.],
[-1., 0.]]]])
default_weight = tf.transpose(default_weight, (2, 3, 1, 0))
default_bias = tf.constant([-2., -2.])
model = get_basic_conv_test_model()
act_weights = model.layers[1].weights[0]
ref_weights = default_weight
act_bias = model.layers[1].weights[1]
ref_bias = default_bias
TFTensorListComparator.check_equal(act_bias, ref_bias)
TFTensorListComparator.check_equal(act_weights, ref_weights)
def test_basic_model_is_valid():
model = get_basic_conv_test_model()
input_ = tf.ones([1, 4, 4, 1])
ref_output = tf.ones((1, 3, 3, 2)) * (-4)
act_output = model(input_)
TFTensorListComparator.check_equal(ref_output, act_output)
| 38.5
| 73
| 0.693878
|
4a167e5b2fda2df432243f281d34abb7f126f3d8
| 813
|
py
|
Python
|
maple/envs/wrappers/discretize_env.py
|
UT-Austin-RPL/maple
|
aef9fe9869945df5bbd1b02fd40813aac135cf5a
|
[
"MIT"
] | 9
|
2021-10-08T23:54:58.000Z
|
2022-03-24T08:51:53.000Z
|
maple/envs/wrappers/discretize_env.py
|
UT-Austin-RPL/maple
|
aef9fe9869945df5bbd1b02fd40813aac135cf5a
|
[
"MIT"
] | 1
|
2022-02-14T02:32:27.000Z
|
2022-02-21T15:02:00.000Z
|
maple/envs/wrappers/discretize_env.py
|
UT-Austin-RPL/maple
|
aef9fe9869945df5bbd1b02fd40813aac135cf5a
|
[
"MIT"
] | 1
|
2022-01-05T11:47:19.000Z
|
2022-01-05T11:47:19.000Z
|
import itertools
import numpy as np
from gym import Env
from gym.spaces import Discrete
from maple.envs.proxy_env import ProxyEnv
class DiscretizeEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, num_bins):
super().__init__(wrapped_env)
low = self.wrapped_env.action_space.low
high = self.wrapped_env.action_space.high
action_ranges = [
np.linspace(low[i], high[i], num_bins)
for i in range(len(low))
]
self.idx_to_continuous_action = [
np.array(x) for x in itertools.product(*action_ranges)
]
self.action_space = Discrete(len(self.idx_to_continuous_action))
def step(self, action):
continuous_action = self.idx_to_continuous_action[action]
return super().step(continuous_action)
| 28.034483
| 72
| 0.672817
|
4a167eeae6075630d74f69178dfbfed15abe2f90
| 23,366
|
py
|
Python
|
att_SVHN_1hop.py
|
Yu-Hyun/Hopfield-ODE
|
24a3bdc70ddefd69e69e04332c2ea51fb5d1b5a2
|
[
"MIT"
] | null | null | null |
att_SVHN_1hop.py
|
Yu-Hyun/Hopfield-ODE
|
24a3bdc70ddefd69e69e04332c2ea51fb5d1b5a2
|
[
"MIT"
] | null | null | null |
att_SVHN_1hop.py
|
Yu-Hyun/Hopfield-ODE
|
24a3bdc70ddefd69e69e04332c2ea51fb5d1b5a2
|
[
"MIT"
] | 1
|
2021-07-19T05:36:56.000Z
|
2021-07-19T05:36:56.000Z
|
import os
import argparse
import logging
import time
import numpy as np
import scipy.linalg
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.autograd import Function
import torch.jit
from torch.nn.parameter import Parameter
import math
from torch.autograd import Variable
import torch.nn.utils.weight_norm as weight_norm
parser = argparse.ArgumentParser()
parser.add_argument('--network', type=str, choices=['resnet', 'odenet'], default='odenet')
parser.add_argument('--tol', type=float, default=1e-3)
parser.add_argument('--adjoint', type=eval, default=False, choices=[True, False])
parser.add_argument('--downsampling-method', type=str, default='conv', choices=['conv', 'res'])
parser.add_argument('--nepochs', type=int, default=160)
parser.add_argument('--data_aug', type=eval, default=False, choices=[True, False])
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--save', type=str, default='./att_SVHN_1HOP')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--epsilon', type=float, default = 0.01)
parser.add_argument('--tau', type=float, default = 1.0)
parser.add_argument('--run', type=int, default = 1)
args = parser.parse_args()
#from torchdiffeq import odeint_adjoint as odeint
from torchdiffeq import odeint
import torch.nn.functional as F
from torch.nn.modules.module import Module
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU()
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(
dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias
)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
class NoiseBlock(nn.Module):
def __init__(self, sigma):
super(NoiseBlock, self).__init__()
self.sigma = sigma
def forward(self, x): # sigma is noise power (standard deviation)
out = x + self.sigma * torch.randn_like(x)
return out
def set_sigma(self, x):
self.sigma = x
return 1
class ODEfunc(nn.Module):
def __init__(self, dim):
super(ODEfunc, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU()
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm3 = norm(dim)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(t, out)
out = self.norm3(out)
return out
def concat_zero(x, additional_dim):
B,C,H,W = x.size()
zeros = torch.zeros(B,additional_dim,H,W).to(device)
out = torch.cat((x,zeros),dim=1)
return out
class concatBlock(nn.Module):
def __init__(self):
super(concatBlock, self).__init__()
def forward(self, x):
B,C,H,W = x.size()
zeros = torch.zeros(B,6,H,W).to(device)
out = torch.cat((x,zeros),dim=1)
return out
class ODEBlock(nn.Module):
def __init__(self, odefunc, t):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.integration_time = torch.tensor([0, t]).float()
def forward(self, x):
self.integration_time = self.integration_time.type_as(x)
out = odeint(self.odefunc, x, self.integration_time, rtol=args.tol, atol=args.tol)#, method='fixed_adams')
#, method = 'tsit5')
#, method = 'fixed_adams')
return out[1]
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
class ODEBlock_t(nn.Module):
def __init__(self, odefunc, t):
super(ODEBlock_t, self).__init__()
self.odefunc = odefunc
self.integration_time = torch.tensor([0,t]).float()
def forward(self, x):
self.odefunc.set_x0(x)
self.integration_time = self.integration_time.type_as(x)
out = odeint(self.odefunc, x, self.integration_time, rtol=args.tol, atol=args.tol, method='fixed_adams')
return out[1]
class ODEfunc_single_conv_nonConcat(nn.Module): # applies two convolution with the same weights
def __init__(self, dim, N, epsilon):
super(ODEfunc_single_conv_nonConcat, self).__init__()
self.dim = dim
self.relu = nn.ReLU()
module = nn.Conv2d
moduleT = nn.ConvTranspose2d
self.conv1 = weight_norm( module(6+dim, 6+dim, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True) )
self.Tconv1 = weight_norm( moduleT(6+dim, 6+dim, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True) )
self.Tconv1.weight_v = self.conv1.weight_v
self.Tconv1.weight_g = self.conv1.weight_g
self.epsilon = epsilon
self.nfe = 0
self.x0 = 0
self.conv_0 = weight_norm( module(6+dim, 6+dim, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=True) )
self.input_noisy = 0
self.running_epsilon = 0
#self.momentum = 0.1
#self.norm02 = norm(dim+6)
def forward(self, t, x):
self.nfe += 1
out_0 = self.x0
out = x
out_e = x
out_0 = self.relu(out_0)
out_0 = self.conv_0(out_0)
#out_0 = self.norm02(out_0)
#out = self.relu(out)
out = self.conv1(out)
out = self.relu(out)
out = self.Tconv1(-out)
#out = self.norm3(out)
#out_e = self.norm1(out_e)
out_e = -self.epsilon * torch.sum(self.conv_0.weight_g) * (self.dim + 6) * out_e
#out_e = self.norm_e(out_e)
out = out + out_e + out_0
self.running_epsilon = 0.9*self.running_epsilon + 0.1*self.epsilon*torch.sum(self.conv_0.weight_g.data)*(self.dim+6)
return out
def del_x0(self):
del self.x0
def set_x0(self,x0):
self.x0 = x0
class ODEfunc_double_conv(nn.Module): # applies two convolution with the same weights
def __init__(self, dim, N):
super(ODEfunc_double_conv, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU()
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
#self.conv2 = module(dim, dim, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.norm3 = norm(dim)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm3(out)
return out
class ODEfunc_single_conv(nn.Module): # applies two convolution with the same weights
def __init__(self, dim, N):
super(ODEfunc_single_conv, self).__init__()
self.norm1 = norm(dim)
self.relu = nn.ReLU()
self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)
self.norm2 = norm(dim)
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(t, out)
out = self.norm2(out)
return out
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
shape = torch.prod(torch.tensor(x.shape[1:])).item()
return x.view(-1, shape)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def get_cifar10_loaders(data_aug=False, batch_size=128, test_batch_size = 1000, perc=1.0):
if data_aug:
transform_train = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
train_loader = DataLoader(
datasets.CIFAR10(root='.data/cifar10', train=True, download=True, transform=transform_train),
batch_size=batch_size, shuffle=True, num_workers=3, drop_last=True
)
train_eval_loader = DataLoader(
datasets.CIFAR10(root='.data/cifar10', train=True, download=True, transform=transform_test),
batch_size=test_batch_size, shuffle=True, num_workers=3, drop_last=True
)
test_loader = DataLoader(
datasets.CIFAR10(root='.data/cifar10', train=False, download=True, transform=transform_test),
batch_size=test_batch_size, shuffle=True, num_workers=3, drop_last=True
)
return train_loader, test_loader, train_eval_loader
def get_svhn_loaders(data_aug=False, batch_size=128, test_batch_size = 1000, perc=1.0):
if data_aug:
transform_train = transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
train_loader = DataLoader(
datasets.SVHN(root='.data/svhn', split='train', download=True, transform=transform_train),
batch_size=batch_size, shuffle=True, num_workers=3, drop_last=True
)
train_eval_loader = DataLoader(
datasets.SVHN(root='.data/svhn', split='train', download=True, transform=transform_test),
batch_size=test_batch_size, shuffle=True, num_workers=3, drop_last=True
)
test_loader = DataLoader(
datasets.SVHN(root='.data/svhn', split='test', download=True, transform=transform_test),
batch_size=test_batch_size, shuffle=True, num_workers=3, drop_last=True
)
return train_loader, test_loader, train_eval_loader
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def learning_rate_with_decay(batch_size, batch_denom, batches_per_epoch, boundary_epochs, decay_rates):
initial_learning_rate = args.lr * batch_size / batch_denom
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(itr):
lt = [itr < b for b in boundaries] + [True]
i = np.argmax(lt)
return vals[i]
return learning_rate_fn
def one_hot(x, K):
return np.array(x[:, None] == np.arange(K)[None, :], dtype=int)
def accuracy(model, dataset_loader):
total_correct = 0
for x, y in dataset_loader:
x = x.to(device)
y = one_hot(np.array(y.numpy()), 10)
target_class = np.argmax(y, axis=1)
predicted_class = np.argmax(model(x).cpu().detach().numpy(), axis=1)
total_correct += np.sum(predicted_class == target_class)
return total_correct / len(dataset_loader.dataset)
def accuracy_reif(model, dataset_loader):
total_correct = 0
# original_sigma = model[].sigma
# model[].set_sigma(0)
for x, y in dataset_loader:
x = x.to(device)
y = one_hot(np.array(y.numpy()), 10)
target_class = np.argmax(y, axis=1)
predicted_class = np.argmax(model(x).cpu().detach().numpy(), axis=1)
total_correct += np.sum(predicted_class == target_class)
# model[].set_sigma(original_sigma)
return total_correct / len(dataset_loader.dataset)
def accuracy_reif_noisy(model, dataset_loader, sigma):
total_correct = 0
# original_sigma = model[].sigma
# model[].set_sigma(0)
for x, y in dataset_loader:
x = x.to(device) + (torch.randn_like(x) * math.sqrt(sigma)).to(device)
y = one_hot(np.array(y.numpy()), 10)
target_class = np.argmax(y, axis=1)
predicted_class = np.argmax(model(x).cpu().detach().numpy(), axis=1)
total_correct += np.sum(predicted_class == target_class)
# model[].set_sigma(original_sigma)
return total_correct / len(dataset_loader.dataset)
def loss_reif(model, dataset_loader, criterion):
loss = 0
for x, y in dataset_loader:
x = x.to(device)
y = y.to(device)
logits = model(x)
loss = loss + criterion(logits, y)
return loss
def loss_reif_noisy(model, dataset_loader, sigma, criterion):
loss = 0
for x, y in dataset_loader:
x = x.to(device) + (torch.randn_like(x) * math.sqrt(sigma)).to(device)
y = y.to(device)
logits = model(x)
loss = loss + criterion(logits, y)
return loss
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode="a")
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, "r") as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, "r") as package_f:
logger.info(package_f.read())
return logger
if __name__ == '__main__':
args.save = args.save + str(args.tau) +"_sec_" + str(args.epsilon) + "_epsilon" +"_run"+ str(args.run)
makedirs(args.save)
logger = get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
logger.propagate = False
logger.info(args)
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
logger.info('designated gpu id by user : '+ str(args.gpu))
if torch.cuda.is_available() :
logger.info('GPU availability : True' )
else :
logger.info('\n!!!GPU NOT available!!!\n')
#device = torch.device('cpu')
is_odenet = args.network == 'odenet'
NC = 16 # number of channels
noise_power = 0.01
reif_time = args.tau
lambda_reif = 0.01
epoch_start = 0
epoch_end = 240
downsampling_layers = [
nn.Conv2d(3, NC, 3, 1), # cifar10 : 32X32 -> 30X30 # mnist : 28X28 -> 26X26
norm(NC),
nn.ReLU(),
nn.Conv2d(NC, NC, 4, 2, 1), # cifar10 : 30X30 -> 15X15 # mnist : 26X26 -> 13X13
norm(NC),
nn.ReLU(),
nn.Conv2d(NC, NC, 4, 2, 1), # cifar10 : 15X15 -> 7X7 # mnist : 13X13 -> 6X6
]
concat_layer = [concatBlock()]
feature_layers = [ODEBlock(ODEfunc(NC), reif_time)]
norm_layer_before_reif = [norm(NC+6)]
reification_layers = [ODEBlock_t(ODEfunc_single_conv_nonConcat(dim = NC, N = 7, epsilon = args.epsilon), reif_time)] # 6 for mnist, 7 for cifar10 ###
# 6 is correct for default setting, which is data_aug = TRUE.
fc_layers = [norm(NC+6), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), Flatten(), nn.Linear(NC+6, 10)]
criterion = nn.CrossEntropyLoss().to(device)
train_loader, test_loader, train_eval_loader = get_svhn_loaders(
args.data_aug, args.batch_size, args.test_batch_size
)
data_gen = inf_generator(train_loader)
batches_per_epoch = len(train_loader)
model = nn.Sequential(*downsampling_layers, *feature_layers, *concat_layer, *norm_layer_before_reif, *reification_layers, *fc_layers).to(device)
lr_fn = learning_rate_with_decay(
args.batch_size, batch_denom=128, batches_per_epoch=batches_per_epoch, boundary_epochs=[60, 100 , 140, 200],
decay_rates=[1, 0.5, 0.3, 0.1, 0.03]
)
model_optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
best_acc = 0
best_noisy_acc = 0
best_ce_loss = math.inf
best_ce_t_loss = math.inf
best_sum_loss = math.inf
batch_time_meter = RunningAverageMeter()
f_nfe_meter = RunningAverageMeter()
b_nfe_meter = RunningAverageMeter()
CE_loss_meter = RunningAverageMeter()
CE_loss_t_meter = RunningAverageMeter()
end = time.time()
for itr in range(1+ epoch_start * batches_per_epoch,1+ epoch_end * batches_per_epoch):
for param_group in model_optimizer.param_groups:
param_group['lr'] = lr_fn(itr)
model_optimizer.zero_grad()
x, y = data_gen.__next__()
x = x.to(device)
x_t = torch.add(x, math.sqrt(noise_power)*torch.randn_like(x).to(device))
y = y.to(device)
'''
state_without_noise = model(x)
#state_without_noise_concat = concat_zero(state_without_noise, additional_dim = 6).to(device)
orig_state_reified = model_reif(state_without_noise)
logits = model_fc( orig_state_reified )
'''
logits = model(x)
CE_loss = criterion(logits, y)
CE_loss.backward()
'''
state_noised = model(x_t)
#state_noised_concat = concat_zero(state_noised, additional_dim = 6).to(device)
state_reified = model_reif(state_noised)
logits_t = model_fc( state_reified )
'''
logits_t = model(x_t)
CE_loss_t = criterion(logits_t, y)
CE_loss_t.backward()
#REIF_loss = torch.norm(state_without_noise_concat[:,0:NC,:,:] - state_reified[:,0:NC,:,:], 2)/(128) #128 is batch size
#REIF_loss = REIF_loss + torch.norm(state_without_noise_concat[:,0:NC,:,:] - orig_state_reified[:,0:NC,:,:], 2)/(128)
#REIF_loss = REIF_loss / 2
#loss = CE_loss +CE_loss_t
#loss_with_reif = CE_loss +CE_loss_t + lambda_reif * REIF_loss /reif_time
CE_loss_meter.update(CE_loss.data)
CE_loss_t_meter.update(CE_loss_t.data)
model_optimizer.step()
batch_time_meter.update(time.time() - end)
end = time.time()
if itr %( batches_per_epoch) == 0:
with torch.no_grad():
train_acc = accuracy_reif(model, train_eval_loader)
val_acc = accuracy_reif(model, test_loader)
noisy_acc = accuracy_reif_noisy(model, test_loader, noise_power)
vloss = loss_reif(model, test_loader, criterion)
vtloss = loss_reif_noisy(model, test_loader, noise_power, criterion)
if val_acc > best_acc :
torch.save(model, os.path.join(args.save, 'model.pth'))
best_acc = val_acc
if noisy_acc > best_noisy_acc :
torch.save(model, os.path.join(args.save, 'model_noisy.pth'))
best_noisy_acc = noisy_acc
if vloss < best_ce_loss :
torch.save(model, os.path.join(args.save, 'model_vloss.pth'))
best_ce_loss = vloss
if vtloss < best_ce_t_loss :
torch.save(model, os.path.join(args.save, 'model_vtloss.pth'))
best_ce_t_loss = vtloss
if (vloss + vtloss) < best_sum_loss :
torch.save(model, os.path.join(args.save, 'model_sumloss.pth'))
best_sum_loss = vloss + vtloss
logger.info(
"Epoch {:04d} | Time ({:.3f}) | "
"Train Acc {:.4f} | Test Acc {:.4f} | Noisy Acc {:.4f}| CEloss {:.4f} | CEtloss {:.4f} | "
"sumloss {:.4f}| vloss {:.4f} | vtloss {:.4f} | vsumloss {:.4f} | running_eps {:.4f}".format(
itr // batches_per_epoch, batch_time_meter.avg,
train_acc, val_acc, noisy_acc, CE_loss_meter.avg, CE_loss_t_meter.avg, CE_loss_meter.avg+CE_loss_t_meter.avg,
vloss, vtloss, vloss + vtloss, model[10].odefunc.running_epsilon
)
)
| 33.765896
| 154
| 0.598904
|
4a168102d14a7aabe149a24b76d2a7ab0b4c621f
| 2,058
|
py
|
Python
|
PythonBaseDemo/GraphicInterfaceProgrammin/11.5/Listbox_test3.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/GraphicInterfaceProgrammin/11.5/Listbox_test3.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/GraphicInterfaceProgrammin/11.5/Listbox_test3.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
from tkinter import *
# 导入ttk
from tkinter import ttk
class App:
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
topF = Frame(self.master)
topF.pack(fill=Y, expand=YES)
# 创建Listbox组件
self.lb = Listbox(topF)
self.lb.pack(side=LEFT, fill=Y, expand=YES)
for item in range(20):
self.lb.insert(END, str(item))
# 创建Scrollbar组件,设置该组件与self.lb的纵向滚动关联
scroll = Scrollbar(topF, command=self.lb.yview)
scroll.pack(side=RIGHT, fill=Y)
# 设置self.lb的纵向滚动影响scroll滚动条
self.lb.configure(yscrollcommand=scroll.set)
# 为双击事件绑定事件处理方法
self.lb.bind("<Double-1>", self.click)
def click(self, event):
from tkinter import messagebox
# 获取Listbox当前选中项
messagebox.showinfo(title=None, message=str(self.lb.curselection()))
root = Tk()
root.title("Listbox测试")
# 改变窗口图标
root.iconbitmap('images/fklogo.ico')
App(root)
root.mainloop()
| 42.875
| 76
| 0.392128
|
4a16812120954745ef61049ecc31aad183f3ae71
| 5,592
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
Zoras2/CryptoDezireCash
|
e7c108f4dab1289459e2cfc0651836c1a4769996
|
[
"MIT"
] | 1
|
2019-01-25T16:46:28.000Z
|
2019-01-25T16:46:28.000Z
|
contrib/seeds/makeseeds.py
|
Zoras2/CryptoDezireCash
|
e7c108f4dab1289459e2cfc0651836c1a4769996
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
Zoras2/CryptoDezireCash
|
e7c108f4dab1289459e2cfc0651836c1a4769996
|
[
"MIT"
] | 1
|
2019-02-26T20:43:54.000Z
|
2019-02-26T20:43:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Copyright (c) 2018 The Crypto Dezire Cash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Crypto Dezire CashCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple cryptodezirecash ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.323699
| 186
| 0.570458
|
4a168121c92bbeee725e722dc396427f5761f7fe
| 685
|
py
|
Python
|
stai/types/announcement.py
|
STATION-I/staicoin-blockchain
|
b8686c75dd5fe7883115d9613858c9c8cadfc4a7
|
[
"Apache-2.0"
] | 10
|
2021-10-02T18:33:56.000Z
|
2021-11-14T17:10:48.000Z
|
stai/types/announcement.py
|
STATION-I/staicoin-blockchain
|
b8686c75dd5fe7883115d9613858c9c8cadfc4a7
|
[
"Apache-2.0"
] | 14
|
2021-10-07T22:10:15.000Z
|
2021-12-21T09:13:49.000Z
|
stai/types/announcement.py
|
STATION-I/staicoin-blockchain
|
b8686c75dd5fe7883115d9613858c9c8cadfc4a7
|
[
"Apache-2.0"
] | 6
|
2021-10-29T19:36:59.000Z
|
2021-12-19T19:52:57.000Z
|
from dataclasses import dataclass
from typing import Optional
from stai.types.blockchain_format.sized_bytes import bytes32
from stai.util.hash import std_hash
@dataclass(frozen=True)
class Announcement:
origin_info: bytes32
message: bytes
morph_bytes: Optional[bytes] = None # CATs morph their announcements and other puzzles may choose to do so too
def name(self) -> bytes32:
if self.morph_bytes is not None:
message: bytes = std_hash(self.morph_bytes + self.message)
else:
message = self.message
return std_hash(bytes(self.origin_info + message))
def __str__(self):
return self.name().decode("utf-8")
| 29.782609
| 115
| 0.705109
|
4a16822d175607267baef3fff301104566fc7f12
| 3,271
|
py
|
Python
|
audit_web_client/flask/api/user_db.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | 2
|
2022-03-27T19:24:30.000Z
|
2022-03-29T16:15:31.000Z
|
audit_web_client/flask/api/user_db.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | 1
|
2021-04-23T21:03:45.000Z
|
2021-04-23T21:03:45.000Z
|
audit_web_client/flask/api/user_db.py
|
levon003/wiki-ores-feedback
|
29e7f1a41b16a7c57448d5bbc5801653debbc115
|
[
"MIT"
] | null | null | null |
import click
from flask import current_app, g
from flask.cli import with_appcontext
import sqlalchemy as sa
from sqlalchemy import create_engine, Table, Column, Integer, SmallInteger, String, MetaData, ForeignKey, Text, Boolean, Float, Index, bindparam
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.dialects.mysql import TINYINT
import os
import json
import logging
from collections import defaultdict
from tqdm import tqdm
from datetime import datetime
import pytz
import numpy as np
from . import db
def get_metadata():
if 'oidb_user_metadata' in g:
return g.oidb_user_metadata
g.oidb_user_metadata = MetaData()
Table('activity_log', g.oidb_user_metadata,
Column('activity_id', Integer, primary_key=True, autoincrement=True),
Column('timestamp', Integer, nullable=False),
Column('user_token', Text, nullable=False),
Column('activity_type', Text, nullable=False),
Column('new_state', Text, nullable=False),
)
Table('rev_annotation', g.oidb_user_metadata,
Column('annotation_id', Integer, primary_key=True, autoincrement=True),
Column('timestamp', Integer, nullable=False),
Column('user_token', Text, nullable=False),
Column('rev_id', Integer, nullable=False),
Column('annotation_type', Text, nullable=False),
Column('annotation_data', Text, nullable=False),
Index('rev_annotation_multiindex', 'user_token', 'rev_id', 'annotation_type', 'timestamp'),
)
return g.oidb_user_metadata
def get_table(table_name):
metadata = get_metadata()
return metadata.tables[table_name]
def get_activity_log_table():
return get_table('activity_log')
def get_rev_annotation_table():
return get_table('rev_annotation')
def create_tables(engine):
metadata = get_metadata()
metadata.create_all(engine, checkfirst=True)
@click.command('create-user-db')
@with_appcontext
def create_user_db_command():
logger = logging.getLogger('cli.create-user-db.main')
logger.info("Creating and populating user tables in Tools OIDB database.")
start = datetime.now()
engine = db.get_oidb_engine()
create_tables(engine)
logger.info(f"Finished creating tables after {datetime.now() - start}.")
@click.command('drop-user-db')
@click.option('--all', 'drop_all', default=False, is_flag=True)
@with_appcontext
def drop_user_db_command(drop_all):
logger = logging.getLogger('cli.drop-user-db.main')
logger.info("Dropping user tables in Tools OIDB database.")
start = datetime.now()
engine = db.get_oidb_engine()
metadata = MetaData(bind=engine)
metadata.reflect()
logger.info(f"Existing tables ({len(metadata.tables)} total):")
for key, value in metadata.tables.items():
logger.info(f"{key}")
if drop_all:
logger.info("Dropping all user tables identified via reflection.")
metadata.drop_all(tables=[metadata.tables['rev_annotation'], metadata.tables['activity_log'], ])
else:
logger.info("Only listing tables without --all.")
logger.info(f"Finished dropping table data after {datetime.now() - start}.")
def init_app(app):
app.cli.add_command(create_user_db_command)
app.cli.add_command(drop_user_db_command)
| 33.721649
| 144
| 0.722409
|
4a16824ad0a8aea2f4a61f86f7cebca56fabe7b7
| 9,402
|
py
|
Python
|
main.py
|
hacker1383/id-bot
|
233a88b2f2c2c97fc2f260494c00458b3aa6b5df
|
[
"MIT"
] | null | null | null |
main.py
|
hacker1383/id-bot
|
233a88b2f2c2c97fc2f260494c00458b3aa6b5df
|
[
"MIT"
] | null | null | null |
main.py
|
hacker1383/id-bot
|
233a88b2f2c2c97fc2f260494c00458b3aa6b5df
|
[
"MIT"
] | null | null | null |
#info bot created by negative
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import telebot
from telebot import types
import json
import os
import config
import random
import requests as req
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands=['start', 'help'])
def welcome(m):
cid = m.chat.id
markup = types.InlineKeyboardMarkup()
a = types.InlineKeyboardButton("Tele King Team \xE2\x9C\x8C", url="https://telegram.me/KING_CH")
c = types.InlineKeyboardButton("Add group \xE2\x9C\x8C", url="https://telegram.me/KINGIDBOT?startgroup=new")
markup.add(a, c)
b = types.InlineKeyboardButton("Developer ID Bot \xE2\x9C\x8C", url="https://telegram.me/XSUDOX")
markup.add(b)
nn = types.InlineKeyboardButton("Inline Mode", switch_inline_query='')
markup.add(nn)
ret_msg = bot.send_message(cid, "Hello I'm ID bot \n\n Send : \n /id or /me or /info \n\n\n get your id : \n /idme (just pv) \nsend Your feedback : /feedback [msg]\n\n\n list inline mod : \ntype @KINGIDBOT\n\nBot version 3", disable_notification=True, reply_markup=markup)
assert ret_msg.message_id
@bot.message_handler(commands=['id', 'ids', 'info', 'me'])
def id(m): # info menu
cid = m.chat.id
title = m.chat.title
usr = m.chat.username
f = m.chat.first_name
l = m.chat.last_name
t = m.chat.type
d = m.date
text = m.text
p = m.pinned_message
fromm = m.forward_from
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton("\xF0\x9F\x98\x8A Tele King Team \xF0\x9F\x98\x8A", url="https://telegram.me/king_ch"))
#info text
bot.send_chat_action(cid, "typing")
bot.reply_to(m, "*ID from* : ```{}``` \n\n *Chat name* : ```{}``` \n\n\n *Your Username* : ```{}``` \n\n *Your First Name* : ```{}```\n\n *Your Last Name* : ```{}```\n\n *Type From* : ```{}``` \n\n *Msg data* : ```{}```\n\n *Your Msg* : ```{}```\n\n* pind msg * : ```{}```\n\n *from* : ```{}```".format(cid,title,usr,f,l,t,d,text,p,fromm), parse_mode="Markdown", reply_markup=markup)
@bot.message_handler(commands=['contavbct'])
def c(m):
uid = m.chat.id
bot.send_chat_action(uid, 'typing')
bot.send_contact(uid, phone_number="+98 937 909 7344", first_name="Negative")
@bot.message_handler(commands=['abouyghght']) # copy right Taylor Team
def p(m):
uid = m.chat.id
markup = types.InlineKeyboardMarkup()
v = types.InlineKeyboardButton('\xF0\x9F\x91\x87 \xF0\x9F\x91\xA5 Thanks to \xF0\x9F\x91\xA5 \xF0\x9F\x91\x87', callback_data='Team')
a = types.InlineKeyboardButton('Negative', url='https://telegram.me/negative_officiall')
b = types.InlineKeyboardButton('Parham', url='https://telegram.me/UnFriendlly')
c = types.InlineKeyboardButton('Arsalan', url='https://telegram.me/mute_all')
n = types.InlineKeyboardButton('Amircc_CreeD', url='https://telegram.me/Amircc_CreeD')
m = types.InlineKeyboardButton('sorblack', url='https://telegram.me/sorblack')
k = types.InlineKeyboardButton('MrJacki', url='https://telegram.me/MrJacki')
j = types.InlineKeyboardButton('allwen', url='https://telegram.me/allwen')
o = types.InlineKeyboardButton('Randall', url='https://telegram.me/Xx_Randall_Xx')
p = types.InlineKeyboardButton('NeonGame', url='https://telegram.me/pokr_face')
y = types.InlineKeyboardButton('\xF0\x9F\x92\x8E End \xF0\x9F\x92\x8E', callback_data='Team')
ch = types.InlineKeyboardButton('Channel', url='https://telegram.me/idbot_channel')
git = types.InlineKeyboardButton('Github', url='https://github.com/taylor-team')
markup.add(v)
markup.add(a, j)
markup.add(b, c)
markup.add(n, m)
markup.add(k, o, p)
markup.add(y)
markup.add(ch, git)
bot.send_chat_action(uid, 'typing')
bot.send_message(uid, "Taylor Team development Telegram bot and web mastering \n\n developers : \n [King](https://telegram.me/XSUDOX) \n [Parham](https://telegram.me/UnFriendlly)", parse_mode="Markdown")
bot.send_photo(uid, open('taylor.jpg'), caption="@Taylor_Team", reply_markup=markup)
@bot.message_handler(commands=['idbot'])
def handler(m):
cid = m.chat.id
bot.send_message(cid, "My Name is ID bot \n creator and developer : [king](https://telegram.me/xsudox) \n development channel : [Tele King Team](https://telegram.me/king_ch)", parse_mode="Markdown")
bot.send_chat_action(cid, "upload_photo")
bot.send_photo(cid, open('slackbot-story1-582x436.jpg'), caption="@ID_bot_robot \xF0\x9F\x98\x9C")
@bot.message_handler(commands=['idme'])
def test_handler(m):
cid = m.from_user.id
fl = m.from_user.first_name
bot.send_message(cid, "*{}* Your ID = ```{}```".format(fl,cid), parse_mode="Markdown")
@bot.message_handler(commands=['feedback'])
def feedback(m):
senderid = m.chat.id
first = m.from_user.first_name
usr = m.from_user.username
str = m.text
txt = str.replace('/feedback', '')
bot.send_message(senderid, "_Thank Your Msg Posted admin_", parse_mode="Markdown")
bot.send_message(config.is_sudo, "msg : {}\nid : {}\nname : {}\nUsername : @{}".format(txt,senderid,first,usr))
@bot.message_handler(commands=['j'])
def j(m):
sudo = config
tmt = m.from_user.id
idA, cid = m.chat.id, m.chat.id
if str(tmt) not in config.is_sudo:
bot.send_message(cid, "Just for admin", parse_mode="Markdown")
return
to_id = m.text.split()[1]
txt = m.text.split()[2:]
text = ' '.join(txt)
bot.send_message(to_id, "<b>\xD8\xAF\xD8\xB1\x20\xD8\xAC\xD9\x88\xD8\xA7\xD8\xA8\x20\xD8\xB4\xD9\x85\xD8\xA7 :</b>\n <code>{}</code>".format(text), parse_mode="HTML")
@bot.inline_handler(lambda query: len(query.query) is 0)
def query_text(query):
user = query.from_user.username
name = query.from_user.first_name
lname = query.from_user.last_name
uid = query.from_user.id
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton('\xE2\x9C\x85 {} \xE2\x9C\x85'.format(user), url="https://telegram.me/{}".format(user)))
thumb_url = 'http://millingtonlibrary.info/wp-content/uploads/2015/02/Info-I-Logo.png'
info = types.InlineQueryResultArticle('1',
'\xF0\x9F\x8C\x8E Your Info \xF0\x9F\x8C\x8E',
types.InputTextMessageContent('*Username : @{}\nYour First Name : {}\nYour Last Name : {}\nYour ID : {}*'.format(user,name,lname,uid), parse_mode="Markdown"),
reply_markup=markup,
thumb_url=thumb_url)
#pic = types.InlineQueryResultPhoto('2',
#'http://vip.opload.ir/vipdl/95/3/negative23/photo-2016-06-09-01-09-41.jpg',
#'http://vip.opload.ir/vipdl/95/3/negative23/photo-2016-06-09-01-09-41.jpg',
#input_message_content=types.InputTextMessageContent('@Taylor_Team')
#gif = types.InlineQueryResultGif('2',
# 'http://andrewtrimmer.com/wp-content/uploads/2014/09/Coming-Soon_Light-Bulbs_Cropped-Animation-Set_03c.gif',
#'http://andrewtrimmer.com/wp-content/uploads/2014/09/Coming-Soon_Light-Bulbs_Cropped-Animation-Set_03c.gif',
#gif_width=70,
#gif_height=40,
#title="Soon Update",
# input_message_content=types.InputTextMessageContent('New Update #Soon'))
tumsss = 'http://images.clipartpanda.com/contact-clipart-contact-phone-md.png'
random_text = random.randint(1, 100)
tmpp = 'http://sugartin.info/wp-content/uploads/2013/11/logo.png'
randowm = types.InlineQueryResultArticle('2', '\xD8\xB9\xD8\xAF\xD8\xAF\x20\xD8\xB4\xD8\xA7\xD9\x86\xD8\xB3\xDB\x8C\x20\xF0\x9F\x99\x88',
types.InputTextMessageContent('\xD8\xB9\xD8\xAF\xD8\xAF\x20\xD8\xB4\xD8\xA7\xD9\x86\xD8\xB3\xDB\x8C : {}'.format(random_text)), thumb_url=tmpp)
url = req.get('http://api.gpmod.ir/time/')
data = url.json()
EN = data['ENtime']
time_tmp = 'http://prek-8.com//images/time21.jpg'
timesend = types.InlineQueryResultArticle('3', 'Time / \xD8\xB3\xD8\xA7\xD8\xB9\xD8\xAA', types.InputTextMessageContent('`Tehran` : *{}*'.format(EN), parse_mode='Markdown'), thumb_url=time_tmp)
bot.answer_inline_query(query.id, [info, randowm, timesend], cache_time=5, switch_pm_text='Start bot')
@bot.message_handler(commands=['uptime'])
def ss(m):
cc = os.popen("uptime").read()
bot.send_message(m.chat.id, '{}'.format(cc))
@bot.message_handler(commands=['leave'])
def leavehandler(m):
if m.from_user.id == config.is_sudo:
bot.leave_chat(m.chat.id)
@bot.message_handler(commands=['whois'])
def whois(m):
text = m.text
repll = text.replace('/whois', '')
whois = os.popen('whois {}'.format(repll)).read()
bot.send_message(m.chat.id, '{}'.format(whois))
bot.polling(True)
#end
# _____ _ _____
#|_ _|_ _ _ _| | ___ _ __ |_ _|__ __ _ _ __ ___
# | |/ _` | | | | |/ _ \| '__| | |/ _ \/ _` | '_ ` _ \
# | | (_| | |_| | | (_) | | | | __/ (_| | | | | | |
# |_|\__,_|\__, |_|\___/|_| |_|\___|\__,_|_| |_| |_|
# |___/
#Copy right 2016 Negative - Taylor Team
#MIT license
| 50.278075
| 387
| 0.634546
|
4a16829e5944d3dbd9d0bcb1eae537fa5c910b25
| 6,522
|
py
|
Python
|
grin-py/grinbase/model/blocks.py
|
hunternsk/grin-pool
|
06cbf33acedc3cff63ed05aedda1191b2312d9a4
|
[
"Apache-2.0"
] | 130
|
2018-06-05T21:45:29.000Z
|
2022-01-13T22:37:20.000Z
|
grin-py/grinbase/model/blocks.py
|
hunternsk/grin-pool
|
06cbf33acedc3cff63ed05aedda1191b2312d9a4
|
[
"Apache-2.0"
] | 30
|
2018-07-10T22:23:50.000Z
|
2022-01-22T11:01:49.000Z
|
grin-py/grinbase/model/blocks.py
|
hunternsk/grin-pool
|
06cbf33acedc3cff63ed05aedda1191b2312d9a4
|
[
"Apache-2.0"
] | 78
|
2018-07-05T00:32:22.000Z
|
2021-05-30T07:57:50.000Z
|
#!/usr/bin/python3
import datetime
import operator
from sqlalchemy import Column, Integer, String, BigInteger, SmallInteger, Boolean, DateTime, func, asc, and_
from sqlalchemy.orm import relationship
from grinbase.dbaccess import database
from grinbase.model import Base
# This table contains a record for each block as it is first seen by our pool
class Blocks(Base):
__tablename__ = 'blocks'
# id = Column(Integer, primary_key=True)
height = Column(BigInteger, index=True, primary_key=True, autoincrement=False)
hash = Column(String(64))
version = Column(SmallInteger)
previous = Column(String(64))
timestamp = Column(DateTime, nullable=False, index=True)
output_root = Column(String(64))
range_proof_root = Column(String(64))
kernel_root = Column(String(64))
nonce = Column(String(20), nullable=False)
edge_bits = Column(SmallInteger)
total_difficulty = Column(BigInteger)
secondary_scaling = Column(BigInteger)
num_inputs = Column(Integer)
num_outputs = Column(Integer)
num_kernels = Column(Integer)
fee = Column(BigInteger)
lock_height = Column(BigInteger)
total_kernel_offset = Column(String(64))
state = Column(String(64))
def __repr__(self):
return "{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}".format(
self.hash,
self.version,
self.height,
self.previous,
self.timestamp,
self.output_root,
self.range_proof_root,
self.kernel_root,
self.nonce,
self.edge_bits,
self.total_difficulty,
self.secondary_scaling,
self.num_inputs,
self.num_outputs,
self.num_kernels,
self.fee,
self.lock_height,
self.total_kernel_offset,
self.state)
def __init__(self, hash, version, height, previous, timestamp, output_root, range_proof_root, kernel_root, nonce, edge_bits, total_difficulty, secondary_scaling, num_inputs, num_outputs, num_kernels, fee, lock_height, total_kernel_offset, state):
self.hash = hash
self.version = version
self.height = height
self.previous = previous
self.timestamp = timestamp
self.output_root = output_root
self.range_proof_root = range_proof_root
self.kernel_root = kernel_root
self.nonce = nonce
self.edge_bits = edge_bits
self.total_difficulty = total_difficulty
self.secondary_scaling = secondary_scaling
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_kernels = num_kernels
self.fee = fee
self.lock_height = lock_height
self.total_kernel_offset = total_kernel_offset
self.state = state
def to_json(self, fields=None):
obj = { 'hash': self.hash,
'version': self.version,
'height': self.height,
'previous': self.previous,
'timestamp': self.timestamp.timestamp(),
'output_root': self.output_root,
'range_proof_root': self.range_proof_root,
'kernel_root': self.kernel_root,
'nonce': self.nonce,
'edge_bits': self.edge_bits,
'total_difficulty': self.total_difficulty,
'secondary_scaling': self.secondary_scaling,
'num_inputs': self.num_inputs,
'num_outputs': self.num_outputs,
'num_kernels': self.num_kernels,
'fee': self.fee,
'lock_height': self.lock_height,
'total_kernel_offset': self.total_kernel_offset,
'state': self.state
}
# Filter by field(s)
if fields != None:
for k in list(obj.keys()):
if k not in fields:
del obj[k]
return obj
# Get a list of all records in the table
# XXX Please dont call this except in testing
@classmethod
def getAll(cls):
return list(database.db.getSession().query(Blocks))
# Get the count of the number of blocks we have
@classmethod
def count(cls):
return database.db.getSession().query(func.count(Blocks.height)).scalar()
# Get the first block we have thats > 0
@classmethod
def get_earliest(cls):
lowest = database.db.getSession().query(func.min(Blocks.height)).scalar()
return database.db.getSession().query(Blocks).filter(Blocks.height == lowest).first()
# Get a single record by nonce
@classmethod
def get_by_nonce(cls, nonce):
return database.db.getSession().query(Blocks).filter(Blocks.nonce == nonce).first()
# Get the latest block record
@classmethod
def get_latest(cls, n=None):
highest = database.db.getSession().query(func.max(Blocks.height)).scalar()
if n == None:
return database.db.getSession().query(Blocks).filter(Blocks.height == highest).first()
else:
return list(database.db.getSession().query(Blocks).filter(Blocks.height >= highest-n).order_by(asc(Blocks.height)))
# Get record(s) by height and range
@classmethod
def get_by_height(cls, height, range=None):
if range == None:
return database.db.getSession().query(Blocks).filter(Blocks.height == height).first()
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Blocks).filter(and_(Blocks.height >= h_start, Blocks.height <= h_end)).order_by(asc(Blocks.height)))
# Get stats records falling within requested range
@classmethod
def get_by_time(cls, ts, range=None):
if range == None:
# XXX TODO: Test this
return database.db.getSession().query(Blocks).filter(Blocks.timestamp <= ts).first()
else:
ts_start = ts-range
ts_end = ts
return list(database.db.getSession().query(Blocks).filter(and_(Blocks.timestamp >= ts_start, Blocks.timestamp <= ts_end)).order_by(asc(Blocks.height)))
# def main():
# PROCESS = "GrinPoolBaseModelBlockTest"
# from grinlib import lib
# config = lib.get_config()
# logger = lib.get_logger(PROCESS)
# logger.error("test")
# database = lib.get_db()
#
#
# if __name__ == "__main__":
# main()
| 37.918605
| 250
| 0.613615
|
4a1682a09eb0d7deb4c9a68617db0269db947f72
| 698
|
py
|
Python
|
utils/api_utils.py
|
samujjwaal/multilingual-chatbot
|
c7d75a1d60a8e2b726012ba87b8a3813f5e92e35
|
[
"MIT"
] | null | null | null |
utils/api_utils.py
|
samujjwaal/multilingual-chatbot
|
c7d75a1d60a8e2b726012ba87b8a3813f5e92e35
|
[
"MIT"
] | null | null | null |
utils/api_utils.py
|
samujjwaal/multilingual-chatbot
|
c7d75a1d60a8e2b726012ba87b8a3813f5e92e35
|
[
"MIT"
] | null | null | null |
# import libraries
import json
import requests
# huggingface API key
API_TOKEN = "Replace with your API"
API_URL = "https://api-inference.huggingface.co/models"
headers = {"Authorization": f"Bearer {API_TOKEN}"}
# to generate payload for API call
def query(payload, model_name):
data = json.dumps(payload)
response = requests.request(
"POST", f"{API_URL}/{model_name}", headers=headers, data=data
)
return json.loads(response.content.decode("utf-8"))
# # to execute huggingface inference API
def translate(text, model_name):
payload = {"inputs": text}
translation = query(payload, model_name)[0]["translation_text"]
return translation
| 29.083333
| 70
| 0.691977
|
4a1684474ad19e2b204ad283780350ea17014d66
| 309
|
py
|
Python
|
181/order.py
|
xtakacsx/bitesofpy
|
91487cbf58af5eb58a0343ff2231a90c1032acb0
|
[
"MIT"
] | 1
|
2020-01-10T00:05:34.000Z
|
2020-01-10T00:05:34.000Z
|
181/order.py
|
xtakacsx/bitesofpy
|
91487cbf58af5eb58a0343ff2231a90c1032acb0
|
[
"MIT"
] | null | null | null |
181/order.py
|
xtakacsx/bitesofpy
|
91487cbf58af5eb58a0343ff2231a90c1032acb0
|
[
"MIT"
] | null | null | null |
import bisect
class OrderedList:
def __init__(self):
self._numbers = []
def add(self, num):
# self._numbers.insert(bisect.bisect(self._numbers, num), num)
bisect.insort(self._numbers, num)
def __str__(self):
return ', '.join(str(num) for num in self._numbers)
| 20.6
| 70
| 0.624595
|
4a1684723da1c91143c40c40ac5e80127ef5154b
| 10,412
|
py
|
Python
|
neural_compressor/ux/utils/parser.py
|
mdfaijul/neural-compressor
|
e1d59da3790f9ff9647a21d2ff23da98c78a9682
|
[
"Apache-2.0"
] | 100
|
2020-12-01T02:40:12.000Z
|
2021-09-09T08:14:22.000Z
|
neural_compressor/ux/utils/parser.py
|
mdfaijul/neural-compressor
|
e1d59da3790f9ff9647a21d2ff23da98c78a9682
|
[
"Apache-2.0"
] | 25
|
2021-01-05T00:16:17.000Z
|
2021-09-10T03:24:01.000Z
|
neural_compressor/ux/utils/parser.py
|
mdfaijul/neural-compressor
|
e1d59da3790f9ff9647a21d2ff23da98c78a9682
|
[
"Apache-2.0"
] | 25
|
2020-12-01T19:07:08.000Z
|
2021-08-30T14:20:07.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsers for log files."""
import re
from abc import ABC
from typing import Any, Dict, List, Union
from neural_compressor.ux.components.benchmark import Benchmarks
from neural_compressor.ux.utils.exceptions import InternalException
from neural_compressor.ux.utils.logger import log
from neural_compressor.ux.utils.templates.metric import Metric
ROUND_PRECISION = 2
class Parser(ABC):
"""Parser abstract class."""
def __init__(self, logs: list) -> None:
"""Initialize parser."""
self._logs = logs
self.metric = Metric()
def process(self) -> Dict[str, Any]:
"""Process log files."""
raise NotImplementedError
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
raise NotImplementedError
class OptimizationParser(Parser):
"""Parser class is responsible for parsing optimization log files."""
def process(self) -> Dict[str, Any]:
"""Process files."""
for log_file in self._logs:
log.debug(f"Read from {log_file}")
with open(log_file) as f:
for line in f:
for key in self.patterns:
prog = re.compile(self.patterns[key])
match = prog.search(line)
if match and match.groupdict().get(key):
requested_value = str(match.groupdict().get(key))
self.metric.insert_data(key, requested_value)
parsed_data: Dict[str, Any] = self.metric.serialize() # type: ignore
return parsed_data
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
return {
"acc_input_model": r".*FP32 baseline is:\s+\[("
r"(Accuracy:\s+(?P<acc_input_model>(\d+(\.\d+)?)))?"
r"(Duration\s+\(seconds\):\s+(?P<duration>(\d+(\.\d+)?)))?"
r"(Memory footprint\s+\(MB\):\s+(?P<mem_footprint>(\d+(\.\d+)?)))?(,\s+)?"
r")*\]",
"acc_optimized_model": r".*Best tune result is:\s+\[("
r"(Accuracy:\s+(?P<acc_optimized_model>(\d+(\.\d+)?)))?"
r"(Duration\s+\(seconds\):\s+(?P<duration>(\d+(\.\d+)?)))?"
r"(Memory footprint\s+\(MB\):\s+(?P<mem_footprint>(\d+(\.\d+)?)))?(,\s+)?"
r")*\]",
"path_optimized_model": r".*Save quantized model to (?P<path_optimized_model>.*)\.",
}
class PerformanceParser(Parser):
"""Parser class is responsible for parsing performance benchmark log files."""
def process(self) -> Dict[str, Any]:
"""Process files."""
partial: Dict[str, List] = {}
for log_file in self._logs:
log.debug(f"Read from {log_file}")
with open(log_file) as f:
for line in f:
for key in self.patterns:
prog = re.compile(self.patterns[key])
match = prog.search(line)
if not match:
continue
metric_name = f"perf_{key}_input_model"
self.metric.insert_data(metric_name, match.group(1))
converted_value = getattr(self.metric, metric_name)
parse_result = {
key: converted_value,
}
partial = self.update_partial(partial, parse_result)
return self.summarize_partial(partial)
@staticmethod
def update_partial(
partial: Dict[str, List],
parsed_result: Dict[str, Union[float, int]],
) -> Dict[str, List]:
"""Update partial entries."""
for key, value in parsed_result.items():
if key not in partial:
partial[key] = []
partial[key].append(value)
return partial
def summarize_partial(self, partial: dict) -> dict:
"""Calculate final values."""
summary = {}
for key, value in partial.items():
summarized_value = self.summarize_value(key, value)
for precision in ["input_model", "optimized_model"]:
metric_name = f"perf_{key}_{precision}"
summary[metric_name] = summarized_value
return summary
@staticmethod
def summarize_value(key: str, value: list) -> Union[float, int]:
"""Calculate final value."""
if key == "latency":
return round(sum(value) / len(value), 4)
if key == "throughput":
return round(sum(value), 4)
return value[0]
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
return {
"throughput": r"Throughput:\s+(\d+(\.\d+)?)",
"latency": r"Latency:\s+(\d+(\.\d+)?)",
}
class AccuracyParser(Parser):
"""Parser class is responsible for parsing accuracy benchmark log files."""
def process(self) -> Dict[str, Any]:
"""Process accuracy logs."""
for log_file in self._logs:
log.debug(f"Read from {log_file}")
with open(log_file) as f:
for line in f:
for key in self.patterns:
prog = re.compile(self.patterns[key])
match = prog.search(line)
if match:
for precision in ["input_model", "optimized_model"]:
metric_name = f"acc_{precision}"
self.metric.insert_data(metric_name, match.group(1))
parsed_data: Dict[str, Any] = self.metric.serialize() # type: ignore
return parsed_data
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
return {
Benchmarks.ACC: r"Accuracy is (\d+(\.\d+)?)",
}
class BenchmarkParserFactory:
"""Benchmark parser factory."""
@staticmethod
def get_parser(benchmark_mode: str, logs: List[str]) -> Parser:
"""Get benchmark parser for specified mode."""
parser_map = {
Benchmarks.PERF: PerformanceParser,
Benchmarks.ACC: AccuracyParser,
}
parser = parser_map.get(benchmark_mode, None)
if parser is None:
raise InternalException(f"Could not find optimization class for {benchmark_mode}")
return parser(logs)
class ProfilingParser(Parser):
"""Parser class is responsible for parsing profiling log files."""
@staticmethod
def unify_time(string_value: str) -> float:
"""
Unify time with unit to micro seconds float value.
:param string_value: time value with unit, e.g. 125.6ms
:type string_value: str
:return: time value in microseconds
:rtype: float
"""
search = re.search(r"(\d+(\.\d+)?)\s*(\w+)", string_value)
if not search:
raise Exception(f"Coud not parse {string_value}")
value = round(float(search.group(1)), ROUND_PRECISION)
unit = search.group(3)
unit_map = {"s": 10e5, "sec": 10e5, "ms": 10e2, "us": 1, "ns": 10e-4}
unit_modifier = unit_map.get(unit, None)
if unit_modifier is None:
raise Exception(f'Unit "{unit}" not recognized.')
return value * unit_modifier
def process(self) -> Dict[str, Any]:
"""Process profiling logs."""
parsed_data: List[dict] = []
for log_file in self._logs:
log.debug(f"Read from {log_file}")
with open(log_file) as file:
lines = file.readlines()
for line in reversed(lines):
header_search: Any = re.search(self.patterns.get("profiling_header"), line)
if header_search:
break
entry_search: Any = re.search(self.patterns.get("profiling_entry"), line)
if entry_search:
node_name = str(entry_search.group(1))
total_execution_time = str(entry_search.group(2))
accelerator_execution_time = str(entry_search.group(8))
cpu_execution_time = str(entry_search.group(14))
op_occurence = {
"run": int(entry_search.group(20)),
"defined": int(entry_search.group(21)),
}
parsed_data.append(
{
"node_name": node_name,
"total_execution_time": self.unify_time(total_execution_time),
"accelerator_execution_time": self.unify_time(
accelerator_execution_time,
),
"cpu_execution_time": self.unify_time(cpu_execution_time),
"op_occurence": op_occurence,
},
)
parsed_data.reverse()
self.metric.profiling_data = parsed_data
performance_data: Dict[str, Any] = self.metric.serialize() # type: ignore
return performance_data
@property
def patterns(self) -> dict:
"""Set patterns to get metrics from lines."""
return {
"profiling_entry": r"^(\S+)\s+(\d+(\.\d+)?\w+)\s\((\d+(\.\d+)?)%,"
r"\s(\d+(\.\d+)?)%\),\s+(\d+(\.\d+)?\w+)\s\((\d+(\.\d+)?)%,"
r"\s(\d+(\.\d+)?)%\),\s+(\d+(\.\d+)?\w+)\s\((\d+(\.\d+)?)%,"
r"\s(\d+(\.\d+)?)%\),\s+(\d+)\|(\d+)$",
"profiling_header": r"node name \| total execution time \| accelerator execution time "
r"\| cpu execution time \| op occurrence \(run\|defined\)",
}
| 37.861818
| 99
| 0.543892
|
4a16868c7faaf29ad09f2bee65cf07fa5d05fc9f
| 15,964
|
py
|
Python
|
gpt2generator.py
|
sokonashi/gensokyo-bot
|
cd96cc18635ff7888b10ffcd4c07966d0f65eba9
|
[
"MIT"
] | null | null | null |
gpt2generator.py
|
sokonashi/gensokyo-bot
|
cd96cc18635ff7888b10ffcd4c07966d0f65eba9
|
[
"MIT"
] | null | null | null |
gpt2generator.py
|
sokonashi/gensokyo-bot
|
cd96cc18635ff7888b10ffcd4c07966d0f65eba9
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from typing import Union
import torch
import torch.nn.functional as F
import re
from gpt2 import GPT2LMHeadModelExperimental
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from getconfig import settings, logger
from utils import cut_trailing_sentence, output, clear_lines, format_result, use_ptoolkit
if not settings.getboolean('force-cpu') and not torch.cuda.is_available():
logger.warning('CUDA is not available, you are limited to CPU only.')
DTYPE = torch.float32 if ((not torch.cuda.is_available()) or settings.getboolean('force-cpu')) else torch.float32
#DTYPE = torch.float32
logger.info('Cuda Available: {} Force CPU: {} Precision: {}'.format(torch.cuda.is_available(),
settings.getboolean('force-cpu'),
'32-bit' if DTYPE == torch.float32 else '16-bit'))
# warnings.filterwarnings("ignore")
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"gpt2-experimental": (GPT2LMHeadModelExperimental, GPT2Tokenizer),
}
def getTokens(tokenizer, l):
tokenizer.encode()
# the tokenizer does not preserve white space at the front of the string.
# so we will append something else to the front of the string and then remove it after tokenization
def hackyEncode(tokenizer, s):
return tokenizer.encode('====\n ' + s)[2:]
def hackyWhiteSpaceCutter(prompt):
return re.search(r'\s*$', prompt).group(0)
def memory_merge(prompt, context, tokenizer, maxHistory=1024):
assert (prompt + context)
# print(prompt+context)
# logger.debug('RAW TEXT INPUT IS:`%r`', context)
# the tokenizer is kind of broken for the first input, especially if it includes white space. Same with any trailing white space on the last output.
# I'm going with the add prefix option but I'm not sure it's quite right
prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False, add_prefix_space=True)
context_tokens = hackyEncode(tokenizer, hackyWhiteSpaceCutter(prompt) + context)
context_tokens = context_tokens[-(maxHistory - len(prompt_tokens)):]
# logger.debug('DECODED CONTEXT TOKENS: `%r`', tokenizer.convert_ids_to_tokens(context_tokens))
prompt_tokens.extend(context_tokens)
context_tokens = prompt_tokens
# logger.debug('DECODED OUTPUT IS: `%r`', tokenizer.decode(context_tokens, clean_up_tokenization_spaces=False))
# this is a hack and it should be up to the sampler to deal with max size
if len(context_tokens) > maxHistory:
logger.error("CONTEXT IS TOO LONG ERROR")
context_tokens = context_tokens[-maxHistory:]
return context_tokens
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size x vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
dim=-1, index=sorted_indices, src=sorted_indices_to_remove
)
logits[indices_to_remove] = filter_value
return logits
# length should be max length, other settings should be removed, device should not be set
# we could possibly optimize this by having larger batch sizes but it would likely double or more the memory requirements
def sample_sequence(
model,
length,
context,
temperature=1,
top_k=0,
top_p=0.9,
repetition_penalty=1.0,
device="cpu",
stop_tokens=None,
tokenizer=None
):
"""Actually generate the tokens"""
logger.debug(
'temp: {} top_k: {} top_p: {} rep-pen: {}'.format(temperature, top_k, top_p, repetition_penalty))
context_tokens = context
context = torch.tensor(context, dtype=torch.long, device=device)
# context = context.repeat(num_samples, 1)
generated = context
USE_PAST = True
next_token = context
pasts = None
clines = 0
with torch.no_grad():
for j in range(length):
# why would we ever not use past?
# is generated and next_token always same thing?
if not USE_PAST:
input_ids_next = generated
pasts = None
else:
input_ids_next = next_token
# Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)
logits, pasts = model(input_ids=input_ids_next, past=pasts)
logits = logits[-1, :].float()
# Originally the order was Temperature, Repetition Penalty, then top-k/p
if settings.getboolean('top-p-first'):
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
logits = logits / (temperature if temperature > 0 else 1.0)
# repetition penalty from CTRL (https://arxiv.org/abs/1909.05858)
for k in set(generated.tolist()):
logits[k] /= repetition_penalty
if not settings.getboolean('top-p-first'):
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
if temperature == 0: # greedy sampling:
next_token = torch.argmax(logits, dim=-1).unsqueeze(-1)
else:
next_token = torch.multinomial(
F.softmax(logits, dim=-1), num_samples=1
)
generated = torch.cat((generated, next_token), dim=-1)
# Decode into plain text
o = generated[len(context_tokens):].tolist()
generated.text = tokenizer.decode(
o, clean_up_tokenization_spaces=False, skip_special_tokens=True
)
if use_ptoolkit():
clear_lines(clines)
generated.text = format_result(generated.text)
clines = output(generated.text, "ai-text")
if (
(stop_tokens is not None)
and (j > 4)
and (next_token[0] in stop_tokens)
):
# Why the minimum tokens, j>X. Because sometimes the models starts with whitespace, which will strip away anyway. Having a minimum amount of tokens before we stop usually means we don't just stop because of "\n " or similar
logger.debug(
"Stopping generation as we found stop tokens. One of `%s`, in '%s'. token generated `%s`",
stop_tokens,
next_token,
j,
)
break
clear_lines(clines)
return generated
def truncate_multiple_sequences(seqs, max_len=100):
"""Truncate multiple sequences, longest first, removing first."""
while sum(len(s) for s in seqs) > max_len:
longest = sorted(seqs, key=len, reverse=True)[0]
longest.pop(0)
class GPT2Generator:
def __init__(
self, generate_num=60, temperature=0.4, top_k=40, top_p=0.9, dtype=DTYPE,
model_path: Union[str, Path] = Path('models', 'pytorch-gpt2-xl-aid2-v5'), repetition_penalty=1,
):
self.generate_num = generate_num
self.temp = temperature
self.top_k = top_k
self.top_p = top_p
self.samples = 1
self.dtype = dtype
self.repetition_penalty = repetition_penalty
self.batch_size = 1
self.max_history_tokens = 1024 - generate_num
self.stop_token = "<|endoftext|>"
if isinstance(model_path, str):
self.checkpoint_path = model_path
logger.warning(
f"Using DEBUG MODE! This will load one of the generic (non-finetuned) GPT2 models. "
f"Selected: {model_path}")
elif isinstance(model_path, Path):
self.checkpoint_path = model_path
if not self.checkpoint_path.exists():
raise FileNotFoundError(
"Could not find {} Make sure to download a pytorch model and put it in the models directory!".format(
str(self.checkpoint_path)))
else:
raise ValueError(f"model_path must be either str or Path, got {type(model_path)}")
self.device = torch.device("cuda" if self.dtype == torch.float16 else "cuda")
logger.info(
"Using device={}, checkpoint={}, dtype={}".format(self.device, str(self.checkpoint_path), self.dtype))
# Load tokenizer and model
model_class, tokenizer_class = MODEL_CLASSES["gpt2-experimental"] if settings.getboolean(
"gpt2_experimental") else MODEL_CLASSES["gpt2"]
self.tokenizer = tokenizer_class.from_pretrained(str(self.checkpoint_path))
self.model = model_class.from_pretrained(str(self.checkpoint_path))
self.model.to(self.dtype).to(self.device)
self.model.eval()
def sample_sequence(
self, context_tokens=None, top_k=None, top_p=None, repetition_penalty=None, generate_num=None,
temperature=None, stop_tokens=None
):
assert (top_k is not None)
assert (temperature is not None)
assert (top_p)
assert (repetition_penalty)
generate_num = generate_num if (generate_num is not None) else self.generate_num
temperature = temperature if (temperature is not None) else self.temp
top_k = top_k if top_k is not None else self.top_k
top_p = top_p if top_p is not None else self.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.repetition_penalty
out = sample_sequence(
model=self.model,
context=context_tokens,
length=generate_num,
# context=self.context,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
device=self.device,
stop_tokens=stop_tokens,
tokenizer=self.tokenizer
# batch_size=self.batch_size,
)
return out
def result_replace(self, result, allow_action=False):
# logger.debug("BEFORE RESULT_REPLACE: `%s`", repr(result))
result = cut_trailing_sentence(result, allow_action=allow_action)
if len(result) == 0:
return ""
first_letter_capitalized = result[0].isupper()
result = result.replace('."', '".')
result = result.replace("#", "")
result = result.replace("*", "")
# TODO look at this I think blank lines should be fine or blacklisted at generation time
result = result.replace("\n\n", "\n")
# result = first_to_second_person(result)
if not first_letter_capitalized:
result = result[0].lower() + result[1:]
# this is annoying since we can already see the AIs output
# logger.debug( "AFTER RESULT_REPLACE: `%r`. allow_action=%r", repr(result), allow_action)
return result
def generate_raw(
self, context, prompt='', generate_num=None, temperature=None, top_k=None, top_p=None,
repetition_penalty=None, stop_tokens=None
):
assert (top_k is not None)
assert (temperature is not None)
assert (top_p)
assert (repetition_penalty)
context_tokens = memory_merge(prompt, context, self.tokenizer, self.max_history_tokens)
logger.debug(
"Text passing into model `%r`",
self.tokenizer.decode(
context_tokens,
clean_up_tokenization_spaces=True,
# skip_special_tokens=True,
),
)
generated = 0
text = ""
for _ in range(self.samples // self.batch_size):
out = self.sample_sequence(
context_tokens,
generate_num=generate_num,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
stop_tokens=stop_tokens,
)
text += out.text
generated += 1
# disabled clean up of spaces, see what effect this has TODO
if self.stop_token:
index = text.find(self.stop_token)
if index == -1:
index = None
text = text[:index]
if stop_tokens is not None:
for stop_token in stop_tokens:
index = text.find(self.stop_token)
if index == -1:
index = None
text = text[:index]
return text
def generate(self, context, prompt='', temperature=None, top_p=None, top_k=None, repetition_penalty=None, depth=0):
assert (top_k is not None)
assert (temperature is not None)
assert (top_p)
assert (repetition_penalty)
# logger.debug("BEFORE PROMPT_REPLACE: `%r`", prompt)
# prompt = [self.prompt_replace(p) for p in prompt]
# logger.debug("AFTER PROMPT_REPLACE is: `%r`", repr(prompt))
assert (prompt + context)
text = self.generate_raw(
context, prompt, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty,
stop_tokens=self.tokenizer.encode(["<|endoftext|>", ">"])
)
logger.debug("Generated result is: `%r`", repr(text))
result = self.result_replace(text)
if (depth > 6) and len(result) == 0:
# Sometimes it keeps generating a story startng with an action (">"), if it's tried a few times and it keeps
# happening, lets let it keep action text which starts in ">"
# We could just blacklist that token and force it to generate something else. TODO
result = self.result_replace(text, allow_action=True)
logger.info(
"Model generated empty text after formatting `%r`. Trying to format less with allow_action=True. `%r`",
text,
result,
)
# same here as above
if len(result) == 0:
if depth < 20:
logger.info("Model generated empty text trying again %r", depth)
return self.generate(
prompt, context, temperature=temperature, top_p=top_p, top_k=top_k,
repetition_penalty=repetition_penalty, depth=depth + 1
)
else:
logger.warn(
"Model generated empty text %r times. Try another action", depth
)
return result
| 42.684492
| 239
| 0.616637
|
4a16870aafa6e4cf4d9be5ffe78209d5bbe4ace5
| 74,449
|
py
|
Python
|
pythonwhois/parse.py
|
ShakedShechter/python-whois
|
b0e710f73f5046cbb948351cc00a2c267b84e36a
|
[
"WTFPL"
] | null | null | null |
pythonwhois/parse.py
|
ShakedShechter/python-whois
|
b0e710f73f5046cbb948351cc00a2c267b84e36a
|
[
"WTFPL"
] | null | null | null |
pythonwhois/parse.py
|
ShakedShechter/python-whois
|
b0e710f73f5046cbb948351cc00a2c267b84e36a
|
[
"WTFPL"
] | null | null | null |
from __future__ import print_function
import re, sys, datetime, csv, pkgutil
from . import net, shared
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
def pkgdata(name):
data = pkgutil.get_data("pythonwhois", name)
if sys.version_info < (3, 0):
return data
else:
return data.decode("utf-8")
def read_dataset(filename, destination, abbrev_key, name_key, is_dict=False):
try:
if is_dict:
reader = csv.DictReader(pkgdata(filename).splitlines())
else:
reader = csv.reader(pkgdata(filename).splitlines())
for line in reader:
destination[line[abbrev_key]] = line[name_key]
except IOError as e:
pass
airports = {}
countries = {}
states_au = {}
states_us = {}
states_ca = {}
try:
reader = csv.reader(pkgdata("airports.dat").splitlines())
for line in reader:
airports[line[4]] = line[2]
airports[line[5]] = line[2]
except IOError as e:
# The distributor likely removed airports.dat for licensing reasons. We'll just leave an empty dict.
pass
read_dataset("countries.dat", countries, "iso", "name", is_dict=True)
read_dataset("countries3.dat", countries, "iso3", "name", is_dict=True)
read_dataset("states_au.dat", states_au, 0, 1)
read_dataset("states_us.dat", states_us, "abbreviation", "name", is_dict=True)
read_dataset("states_ca.dat", states_ca, "abbreviation", "name", is_dict=True)
def precompile_regexes(source, flags=0):
return [re.compile(regex, flags) for regex in source]
grammar = {
"_data": {
'id': ['Domain ID:[ ]*(?P<val>.+)'],
'status': ['\[Status\]\s*(?P<val>.+)',
'Status\s*:\s?(?P<val>.+)',
'\[State\]\s*(?P<val>.+)',
'^state:\s*(?P<val>.+)'],
'creation_date': ['\[Created on\]\s*(?P<val>.+)',
'Created on[.]*: [a-zA-Z]+, (?P<val>.+)',
'Creation Date:\s?(?P<val>.+)',
'Creation date\s*:\s?(?P<val>.+)',
'Registration Date:\s?(?P<val>.+)',
'Created Date:\s?(?P<val>.+)',
'Created on:\s?(?P<val>.+)',
'Created on\s?[.]*:\s?(?P<val>.+)\.',
'Date Registered\s?[.]*:\s?(?P<val>.+)',
'Domain Created\s?[.]*:\s?(?P<val>.+)',
'Domain registered\s?[.]*:\s?(?P<val>.+)',
'Domain record activated\s?[.]*:\s*?(?P<val>.+)',
'Record created on\s?[.]*:?\s*?(?P<val>.+)',
'Record created\s?[.]*:?\s*?(?P<val>.+)',
'Created\s?[.]*:?\s*?(?P<val>.+)',
'Registered on\s?[.]*:?\s*?(?P<val>.+)',
'Registered\s?[.]*:?\s*?(?P<val>.+)',
'Domain Create Date\s?[.]*:?\s*?(?P<val>.+)',
'Domain Registration Date\s?[.]*:?\s*?(?P<val>.+)',
'created:\s*(?P<val>.+)',
'\[Registered Date\]\s*(?P<val>.+)',
'created-date:\s*(?P<val>.+)',
'Domain Name Commencement Date: (?P<val>.+)',
'registered:\s*(?P<val>.+)',
'registration:\s*(?P<val>.+)'],
'expiration_date': ['\[Expires on\]\s*(?P<val>.+)',
'Registrar Registration Expiration Date:[ ]*(?P<val>.+)-[0-9]{4}',
'Expires on[.]*: [a-zA-Z]+, (?P<val>.+)',
'Expiration Date:\s?(?P<val>.+)',
'Expiration date\s*:\s?(?P<val>.+)',
'Expires on:\s?(?P<val>.+)',
'Expires on\s?[.]*:\s?(?P<val>.+)\.',
'Exp(?:iry)? Date\s?[.]*:\s?(?P<val>.+)',
'Expiry\s*:\s?(?P<val>.+)',
'Domain Currently Expires\s?[.]*:\s?(?P<val>.+)',
'Record will expire on\s?[.]*:\s?(?P<val>.+)',
'Domain expires\s?[.]*:\s*?(?P<val>.+)',
'Record expires on\s?[.]*:?\s*?(?P<val>.+)',
'Record expires\s?[.]*:?\s*?(?P<val>.+)',
'Expires\s?[.]*:?\s*?(?P<val>.+)',
'Expire Date\s?[.]*:?\s*?(?P<val>.+)',
'Expired\s?[.]*:?\s*?(?P<val>.+)',
'Domain Expiration Date\s?[.]*:?\s*?(?P<val>.+)',
'paid-till:\s*(?P<val>.+)',
'expiration_date:\s*(?P<val>.+)',
'expire-date:\s*(?P<val>.+)',
'renewal:\s*(?P<val>.+)',
'expire:\s*(?P<val>.+)'],
'updated_date': ['\[Last Updated\]\s*(?P<val>.+)',
'Record modified on[.]*: (?P<val>.+) [a-zA-Z]+',
'Record last updated on[.]*: [a-zA-Z]+, (?P<val>.+)',
'Updated Date:\s?(?P<val>.+)',
'Updated date\s*:\s?(?P<val>.+)',
#'Database last updated on\s?[.]*:?\s*?(?P<val>.+)\s[a-z]+\.?',
'Record last updated on\s?[.]*:?\s?(?P<val>.+)\.',
'Domain record last updated\s?[.]*:\s*?(?P<val>.+)',
'Domain Last Updated\s?[.]*:\s*?(?P<val>.+)',
'Last updated on:\s?(?P<val>.+)',
'Date Modified\s?[.]*:\s?(?P<val>.+)',
'Last Modified\s?[.]*:\s?(?P<val>.+)',
'Domain Last Updated Date\s?[.]*:\s?(?P<val>.+)',
'Record last updated\s?[.]*:\s?(?P<val>.+)',
'Modified\s?[.]*:\s?(?P<val>.+)',
'(C|c)hanged:\s*(?P<val>.+)',
'last_update:\s*(?P<val>.+)',
'Last Update\s?[.]*:\s?(?P<val>.+)',
'Last updated on (?P<val>.+) [a-z]{3,4}',
'Last updated:\s*(?P<val>.+)',
'last-updated:\s*(?P<val>.+)',
'\[Last Update\]\s*(?P<val>.+) \([A-Z]+\)',
'Last update of whois database:\s?[a-z]{3}, (?P<val>.+) [a-z]{3,4}'],
'registrar': ['registrar:\s*(?P<val>.+)',
'Registrar:\s*(?P<val>.+)',
'Sponsoring Registrar Organization:\s*(?P<val>.+)',
'Registered through:\s?(?P<val>.+)',
'Registrar Name[.]*:\s?(?P<val>.+)',
'Record maintained by:\s?(?P<val>.+)',
'Registration Service Provided By:\s?(?P<val>.+)',
'Registrar of Record:\s?(?P<val>.+)',
'Domain Registrar :\s?(?P<val>.+)',
'Registration Service Provider: (?P<val>.+)',
'\tName:\t\s(?P<val>.+)'],
'whois_server': ['Whois Server:\s?(?P<val>.+)',
'Registrar Whois:\s?(?P<val>.+)'],
'nameservers': ['Name Server:[ ]*(?P<val>[^ ]+)',
'Nameservers:[ ]*(?P<val>[^ ]+)',
'(?<=[ .]{2})(?P<val>([a-z0-9-]+\.)+[a-z0-9]+)(\s+([0-9]{1,3}\.){3}[0-9]{1,3})',
'nameserver:\s*(?P<val>.+)',
'nserver:\s*(?P<val>[^[\s]+)',
'Name Server[.]+ (?P<val>[^[\s]+)',
'Hostname:\s*(?P<val>[^\s]+)',
'DNS[0-9]+:\s*(?P<val>.+)',
' DNS:\s*(?P<val>.+)',
'ns[0-9]+:\s*(?P<val>.+)',
'NS [0-9]+\s*:\s*(?P<val>.+)',
'\[Name Server\]\s*(?P<val>.+)',
'(?<=[ .]{2})(?P<val>[a-z0-9-]+\.d?ns[0-9]*\.([a-z0-9-]+\.)+[a-z0-9]+)',
'(?<=[ .]{2})(?P<val>([a-z0-9-]+\.)+[a-z0-9]+)(\s+([0-9]{1,3}\.){3}[0-9]{1,3})',
'(?<=[ .]{2})[^a-z0-9.-](?P<val>d?ns\.([a-z0-9-]+\.)+[a-z0-9]+)',
'Nserver:\s*(?P<val>.+)'],
'emails': ['(?P<val>[\w.-]+@[\w.-]+\.[\w]{2,6})', # Really need to fix this, much longer TLDs now exist...
'(?P<val>[\w.-]+\sAT\s[\w.-]+\sDOT\s[\w]{2,6})']
},
"_dateformats": (
'(?P<day>[0-9]{1,2})[./ -](?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[./ -](?P<year>[0-9]{4}|[0-9]{2})'
'(\s+(?P<hour>[0-9]{1,2})[:.](?P<minute>[0-9]{1,2})[:.](?P<second>[0-9]{1,2}))?',
'[a-z]{3}\s(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[./ -](?P<day>[0-9]{1,2})(\s+(?P<hour>[0-9]{1,2})[:.](?P<minute>[0-9]{1,2})[:.](?P<second>[0-9]{1,2}))?\s[a-z]{3}\s(?P<year>[0-9]{4}|[0-9]{2})',
'[a-zA-Z]+\s(?P<day>[0-9]{1,2})(?:st|nd|rd|th)\s(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|May|June|July|August|September|October|November|December)\s(?P<year>[0-9]{4})',
'(?P<year>[0-9]{4})[./-]?(?P<month>[0-9]{2})[./-]?(?P<day>[0-9]{2})(\s|T|/)((?P<hour>[0-9]{1,2})[:.-](?P<minute>[0-9]{1,2})[:.-](?P<second>[0-9]{1,2}))',
'(?P<year>[0-9]{4})[./-](?P<month>[0-9]{1,2})[./-](?P<day>[0-9]{1,2})',
'(?P<day>[0-9]{1,2})[./ -](?P<month>[0-9]{1,2})[./ -](?P<year>[0-9]{4}|[0-9]{2})',
'(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) (?P<day>[0-9]{1,2}),? (?P<year>[0-9]{4})',
'(?P<day>[0-9]{1,2})-(?P<month>January|February|March|April|May|June|July|August|September|October|November|December)-(?P<year>[0-9]{4})',
),
"_months": {
'jan': 1,
'january': 1,
'feb': 2,
'february': 2,
'mar': 3,
'march': 3,
'apr': 4,
'april': 4,
'may': 5,
'jun': 6,
'june': 6,
'jul': 7,
'july': 7,
'aug': 8,
'august': 8,
'sep': 9,
'sept': 9,
'september': 9,
'oct': 10,
'october': 10,
'nov': 11,
'november': 11,
'dec': 12,
'december': 12
}
}
def preprocess_regex(regex):
# Fix for #2; prevents a ridiculous amount of varying size permutations.
regex = re.sub(r"\\s\*\(\?P<([^>]+)>\.\+\)", r"\s*(?P<\1>\S.*)", regex)
# Experimental fix for #18; removes unnecessary variable-size whitespace
# matching, since we're stripping results anyway.
regex = re.sub(r"\[ \]\*\(\?P<([^>]+)>\.\*\)", r"(?P<\1>.*)", regex)
return regex
registrant_regexes = [
" Registrant:[ ]*\n (?P<organization>.*)\n (?P<name>.*)\n (?P<street>.*)\n (?P<city>.*), (?P<state>.*) (?P<postalcode>.*)\n (?P<country>.*)\n(?: Phone: (?P<phone>.*)\n)? Email: (?P<email>.*)\n", # Corporate Domains, Inc.
"Registrant:\n (?P<name>.+)\n (?P<street1>.+)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<postalcode>.+), (?P<city>.+)\n (?P<country>.+)\n (?P<phone>.+)\n (?P<email>.+)\n\n", # OVH
"(?:Registrant ID:(?P<handle>.+)\n)?Registrant Name:(?P<name>.*)\n(?:Registrant Organization:(?P<organization>.*)\n)?Registrant Street1?:(?P<street1>.*)\n(?:Registrant Street2:(?P<street2>.*)\n)?(?:Registrant Street3:(?P<street3>.*)\n)?Registrant City:(?P<city>.*)\nRegistrant State/Province:(?P<state>.*)\nRegistrant Postal Code:(?P<postalcode>.*)\nRegistrant Country:(?P<country>.*)\nRegistrant Phone:(?P<phone>.*)\n(?:Registrant Phone Ext.:(?P<phone_ext>.*)\n)?(?:Registrant FAX:(?P<fax>.*)\n)?(?:Registrant FAX Ext.:(?P<fax_ext>.*)\n)?Registrant Email:(?P<email>.*)", # Public Interest Registry (.org), nic.pw, No-IP.com
"Registrant ID:(?P<handle>.+)\nRegistrant Name:(?P<name>.*)\n(?:Registrant Organization:(?P<organization>.*)\n)?Registrant Address1?:(?P<street1>.*)\n(?:Registrant Address2:(?P<street2>.*)\n)?(?:Registrant Address3:(?P<street3>.*)\n)?Registrant City:(?P<city>.*)\nRegistrant State/Province:(?P<state>.*)\nRegistrant Country/Economy:(?P<country>.*)\nRegistrant Postal Code:(?P<postalcode>.*)\nRegistrant Phone:(?P<phone>.*)\n(?:Registrant Phone Ext.:(?P<phone_ext>.*)\n)?(?:Registrant FAX:(?P<fax>.*)\n)?(?:Registrant FAX Ext.:(?P<fax_ext>.*)\n)?Registrant E-mail:(?P<email>.*)", # .ME, DotAsia
"Registrant ID:\s*(?P<handle>.+)\nRegistrant Name:\s*(?P<name>.+)\nRegistrant Organization:\s*(?P<organization>.*)\nRegistrant Address1:\s*(?P<street1>.+)\nRegistrant Address2:\s*(?P<street2>.*)\nRegistrant City:\s*(?P<city>.+)\nRegistrant State/Province:\s*(?P<state>.+)\nRegistrant Postal Code:\s*(?P<postalcode>.+)\nRegistrant Country:\s*(?P<country>.+)\nRegistrant Country Code:\s*(?P<country_code>.+)\nRegistrant Phone Number:\s*(?P<phone>.+)\nRegistrant Email:\s*(?P<email>.+)\n", # .CO Internet
"Registrant Contact: (?P<handle>.+)\nRegistrant Organization: (?P<organization>.+)\nRegistrant Name: (?P<name>.+)\nRegistrant Street: (?P<street>.+)\nRegistrant City: (?P<city>.+)\nRegistrant Postal Code: (?P<postalcode>.+)\nRegistrant State: (?P<state>.+)\nRegistrant Country: (?P<country>.+)\nRegistrant Phone: (?P<phone>.*)\nRegistrant Phone Ext: (?P<phone_ext>.*)\nRegistrant Fax: (?P<fax>.*)\nRegistrant Fax Ext: (?P<fax_ext>.*)\nRegistrant Email: (?P<email>.*)\n", # Key-Systems GmbH
"(?:Registrant ID:[ ]*(?P<handle>.*)\n)?Registrant Name:[ ]*(?P<name>.*)\n(?:Registrant Organization:[ ]*(?P<organization>.*)\n)?Registrant Street:[ ]*(?P<street1>.+)\n(?:Registrant Street:[ ]*(?P<street2>.+)\n)?(?:Registrant Street:[ ]*(?P<street3>.+)\n)?Registrant City:[ ]*(?P<city>.+)\nRegistrant State(?:\/Province)?:[ ]*(?P<state>.*)\nRegistrant Postal Code:[ ]*(?P<postalcode>.+)\nRegistrant Country:[ ]*(?P<country>.+)\n(?:Registrant Phone:[ ]*(?P<phone>.*)\n)?(?:Registrant Phone Ext:[ ]*(?P<phone_ext>.*)\n)?(?:Registrant Fax:[ ]*(?P<fax>.*)\n)?(?:Registrant Fax Ext:[ ]*(?P<fax_ext>.*)\n)?(?:Registrant Email:[ ]*(?P<email>.+)\n)?", # WildWestDomains, GoDaddy, Namecheap/eNom, Ascio, Musedoma (.museum), EuroDNS, nic.ps
"Registrant\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n Email:(?P<email>.+)\n (?P<street1>.+)\n(?: (?P<street2>.+)\n)? (?P<postalcode>.+) (?P<city>.+)\n (?P<country>.+)\n Tel: (?P<phone>.+)\n\n", # internet.bs
" Registrant Contact Details:[ ]*\n (?P<organization>.*)\n (?P<name>.*)[ ]{2,}\((?P<email>.*)\)\n (?P<street1>.*)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<city>.*)\n (?P<state>.*),(?P<postalcode>.*)\n (?P<country>.*)\n Tel. (?P<phone>.*)", # Whois.com
"owner-id:[ ]*(?P<handle>.*)\n(?:owner-organization:[ ]*(?P<organization>.*)\n)?owner-name:[ ]*(?P<name>.*)\nowner-street:[ ]*(?P<street>.*)\nowner-city:[ ]*(?P<city>.*)\nowner-zip:[ ]*(?P<postalcode>.*)\nowner-country:[ ]*(?P<country>.*)\n(?:owner-phone:[ ]*(?P<phone>.*)\n)?(?:owner-fax:[ ]*(?P<fax>.*)\n)?owner-email:[ ]*(?P<email>.*)", # InterNetworX
"Registrant:\n registrant_org: (?P<organization>.*)\n registrant_name: (?P<name>.*)\n registrant_email: (?P<email>.*)\n registrant_address: (?P<address>.*)\n registrant_city: (?P<city>.*)\n registrant_state: (?P<state>.*)\n registrant_zip: (?P<postalcode>.*)\n registrant_country: (?P<country>.*)\n registrant_phone: (?P<phone>.*)", # Bellnames
"Holder of domain name:\n(?P<name>[\S\s]+)\n(?P<street>.+)\n(?P<postalcode>[A-Z0-9-]+)\s+(?P<city>.+)\n(?P<country>.+)\nContractual Language", # nic.ch
"\n\n(?:Owner)?\s+: (?P<name>.*)\n(?:\s+: (?P<organization>.*)\n)?\s+: (?P<street>.*)\n\s+: (?P<city>.*)\n\s+: (?P<state>.*)\n\s+: (?P<country>.*)\n", # nic.io
"Contact Information:\n\[Name\]\s*(?P<name>.*)\n\[Email\]\s*(?P<email>.*)\n\[Web Page\]\s*(?P<url>.*)\n\[Postal code\]\s*(?P<postalcode>.*)\n\[Postal Address\]\s*(?P<street1>.*)\n(?:\s+(?P<street2>.*)\n)?(?:\s+(?P<street3>.*)\n)?\[Phone\]\s*(?P<phone>.*)\n\[Fax\]\s*(?P<fax>.*)\n", # jprs.jp
"g\. \[Organization\] (?P<organization>.+)\n", # .co.jp registrations at jprs.jp
"Registrant ID:(?P<handle>.*)\nRegistrant Name:(?P<name>.*)\n(?:Registrant Organization:(?P<organization>.*)\n)?Registrant Address1:(?P<street1>.*)\n(?:Registrant Address2:(?P<street2>.*)\n)?(?:Registrant Address3:(?P<street3>.*)\n)?Registrant City:(?P<city>.*)\n(?:Registrant State/Province:(?P<state>.*)\n)?(?:Registrant Postal Code:(?P<postalcode>.*)\n)?Registrant Country:(?P<country>.*)\nRegistrant Country Code:.*\nRegistrant Phone Number:(?P<phone>.*)\n(?:Registrant Facsimile Number:(?P<facsimile>.*)\n)?Registrant Email:(?P<email>.*)", # .US, .biz (NeuStar), .buzz, .moe (Interlink Co. Ltd.)
"Registrant\n Name: (?P<name>.+)\n(?: Organization: (?P<organization>.+)\n)? ContactID: (?P<handle>.+)\n(?: Address: (?P<street1>.+)\n(?: (?P<street2>.+)\n(?: (?P<street3>.+)\n)?)? (?P<city>.+)\n (?P<postalcode>.+)\n (?P<state>.+)\n (?P<country>.+)\n)?(?: Created: (?P<creationdate>.+)\n)?(?: Last Update: (?P<changedate>.+)\n)?", # nic.it
" Organisation Name[.]* (?P<name>.*)\n Organisation Address[.]* (?P<street1>.*)\n Organisation Address[.]* (?P<street2>.*)\n(?: Organisation Address[.]* (?P<street3>.*)\n)? Organisation Address[.]* (?P<city>.*)\n Organisation Address[.]* (?P<postalcode>.*)\n Organisation Address[.]* (?P<state>.*)\n Organisation Address[.]* (?P<country>.*)", # Melbourne IT (what a horrid format...)
"Registrant:[ ]*(?P<name>.+)\n[\s\S]*Eligibility Name:[ ]*(?P<organization>.+)\n[\s\S]*Registrant Contact ID:[ ]*(?P<handle>.+)\n", # .au business
"Eligibility Type:[ ]*Citizen\/Resident\n[\s\S]*Registrant Contact ID:[ ]*(?P<handle>.+)\n[\s\S]*Registrant Contact Name:[ ]*(?P<name>.+)\n", # .au individual
"Registrant:[ ]*(?P<organization>.+)\n[\s\S]*Eligibility Type:[ ]*(Higher Education Institution|Company|Incorporated Association|Other)\n[\s\S]*Registrant Contact ID:[ ]*(?P<handle>.+)\n[\s\S]*Registrant Contact Name:[ ]*(?P<name>.+)\n", # .au educational, company, 'incorporated association' (non-profit?), other (spotted for linux.conf.au, unsure if also for others)
" Registrant:\n (?P<name>.+)\n\n Registrant type:\n .*\n\n Registrant's address:\n The registrant .* opted to have", # Nominet (.uk) with hidden address
" Registrant:\n (?P<name>.+)\n\n[\s\S]* Registrant type:\n .*\n\n Registrant's address:\n (?P<street1>.+)\n(?: (?P<street2>.+)\n(?: (?P<street3>.+)\n)??)?? (?P<city>[^0-9\n]+)\n(?: (?P<state>.+)\n)? (?P<postalcode>.+)\n (?P<country>.+)\n\n", # Nominet (.uk) with visible address
"Domain Owner:\n\t(?P<organization>.+)\n\n[\s\S]*?(?:Registrant Contact:\n\t(?P<name>.+))?\n\nRegistrant(?:'s)? (?:a|A)ddress:(?:\n\t(?P<street1>.+)\n(?:\t(?P<street2>.+)\n)?(?:\t(?P<street3>.+)\n)?\t(?P<city>.+)\n\t(?P<postalcode>.+))?\n\t(?P<country>.+)(?:\n\t(?P<phone>.+) \(Phone\)\n\t(?P<fax>.+) \(FAX\)\n\t(?P<email>.+))?\n\n", # .ac.uk - what a mess...
"Registrant ID: (?P<handle>.+)\nRegistrant: (?P<name>.+)\nRegistrant Contact Email: (?P<email>.+)", # .cn (CNNIC)
"Registrant contact:\n (?P<name>.+)\n (?P<street>.*)\n (?P<city>.+), (?P<state>.+) (?P<postalcode>.+) (?P<country>.+)\n\n", # Fabulous.com
"registrant-name:\s*(?P<name>.+)\nregistrant-type:\s*(?P<type>.+)\nregistrant-address:\s*(?P<street>.+)\nregistrant-postcode:\s*(?P<postalcode>.+)\nregistrant-city:\s*(?P<city>.+)\nregistrant-country:\s*(?P<country>.+)\n(?:registrant-phone:\s*(?P<phone>.+)\n)?(?:registrant-email:\s*(?P<email>.+)\n)?", # Hetzner
"Registrant Contact Information :[ ]*\n[ ]+(?P<firstname>.*)\n[ ]+(?P<lastname>.*)\n[ ]+(?P<organization>.*)\n[ ]+(?P<email>.*)\n[ ]+(?P<street>.*)\n[ ]+(?P<city>.*)\n[ ]+(?P<postalcode>.*)\n[ ]+(?P<phone>.*)\n[ ]+(?P<fax>.*)\n\n", # GAL Communication
"Contact Information : For Customer # [0-9]+[ ]*\n[ ]+(?P<firstname>.*)\n[ ]+(?P<lastname>.*)\n[ ]+(?P<organization>.*)\n[ ]+(?P<email>.*)\n[ ]+(?P<street>.*)\n[ ]+(?P<city>.*)\n[ ]+(?P<postalcode>.*)\n[ ]+(?P<phone>.*)\n[ ]+(?P<fax>.*)\n\n", # GAL Communication alternative (private WHOIS) format?
"Registrant:\n Name: (?P<name>.+)\n City: (?P<city>.+)\n State: (?P<state>.+)\n Country: (?P<country>.+)\n", # Akky (.com.mx)
" Registrant:\n (?P<name>.+)\n (?P<street>.+)\n (?P<city>.+) (?P<state>\S+),[ ]+(?P<postalcode>.+)\n (?P<country>.+)", # .am
"Domain Holder: (?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?, (?P<city>[^.,]+), (?P<district>.+), (?P<state>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 1
"Domain Holder: (?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?, (?P<city>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 2
"Domain Holder: (?P<organization>.+)\n(?P<street1>.+)\n(?:(?P<street2>.+)\n)?(?:(?P<street3>.+)\n)?.+?, (?P<district>.+)\n(?P<city>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 3
"Domain Holder: (?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?\n(?P<city>.+),? (?P<state>[A-Z]{2,3})(?: [A-Z0-9]+)?\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 4
" Registrant:\n (?P<organization>.+)\n (?P<name>.+) (?P<email>.+)\n (?P<phone>.*)\n (?P<fax>.*)\n (?P<street>.*)\n (?P<city>.+), (?P<state>[^,\n]*)\n (?P<country>.+)\n", # .com.tw (Western registrars)
"Registrant:\n(?P<organization1>.+)\n(?P<organization2>.+)\n(?P<street1>.+?)(?:,+(?P<street2>.+?)(?:,+(?P<street3>.+?)(?:,+(?P<street4>.+?)(?:,+(?P<street5>.+?)(?:,+(?P<street6>.+?)(?:,+(?P<street7>.+?))?)?)?)?)?)?,(?P<city>.+),(?P<country>.+)\n\n Contact:\n (?P<name>.+) (?P<email>.+)\n TEL: (?P<phone>.+?)(?:(?:#|ext.?)(?P<phone_ext>.+))?\n FAX: (?P<fax>.+)(?:(?:#|ext.?)(?P<fax_ext>.+))?\n", # .com.tw (TWNIC/SEEDNET, Taiwanese companies only?)
"Registrant Contact Information:\n\nCompany English Name \(It should be the same as the registered/corporation name on your Business Register Certificate or relevant documents\):(?P<organization1>.+)\nCompany Chinese name:(?P<organization2>.+)\nAddress: (?P<street>.+)\nCountry: (?P<country>.+)\nEmail: (?P<email>.+)\n", # HKDNR (.hk)
"Registrant ID:(?P<handle>.+)\nRegistrant Name:(?P<name>.*)\n(?:Registrant Organization:(?P<organization>.*)\n)?Registrant Street1:(?P<street1>.+?)\n(?:Registrant Street2:(?P<street2>.+?)\n(?:Registrant Street3:(?P<street3>.+?)\n)?)?Registrant City:(?P<city>.+)\nRegistrant State:(?P<state>.*)\nRegistrant Postal Code:(?P<postalcode>.+)\nRegistrant Country:(?P<country>[A-Z]+)\nRegistrant Phone:(?P<phone>.*?)\nRegistrant Fax:(?P<fax>.*)\nRegistrant Email:(?P<email>.+)\n", # Realtime Register
"owner:\s+(?P<name>.+)", # .br
"person:\s+(?P<name>.+)", # nic.ru (person)
"org:\s+(?P<organization>.+)", # nic.ru (organization)
]
tech_contact_regexes = [
" Technical Contact:[ ]*\n (?P<organization>.*)\n (?P<name>.*)\n (?P<street>.*)\n (?P<city>.*), (?P<state>.*) (?P<postalcode>.*)\n (?P<country>.*)\n(?: Phone: (?P<phone>.*)\n)? Email: (?P<email>.*)\n", # Corporate Domains, Inc.
"Technical Contact:\n (?P<name>.+)\n (?P<street1>.+)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<postalcode>.+), (?P<city>.+)\n (?P<country>.+)\n (?P<phone>.+)\n (?P<email>.+)\n\n", # OVH
"(?:Tech ID:(?P<handle>.+)\n)?Tech Name:(?P<name>.*)\n(:?Tech Organization:(?P<organization>.*)\n)?Tech Street1?:(?P<street1>.*)\n(?:Tech Street2:(?P<street2>.*)\n)?(?:Tech Street3:(?P<street3>.*)\n)?Tech City:(?P<city>.*)\nTech State/Province:(?P<state>.*)\nTech Postal Code:(?P<postalcode>.*)\nTech Country:(?P<country>.*)\nTech Phone:(?P<phone>.*)\n(?:Tech Phone Ext.:(?P<phone_ext>.*)\n)?(?:Tech FAX:(?P<fax>.*)\n)?(?:Tech FAX Ext.:(?P<fax_ext>.*)\n)?Tech Email:(?P<email>.*)", # Public Interest Registry (.org), nic.pw, No-IP.com
"Tech(?:nical)? ID:(?P<handle>.+)\nTech(?:nical)? Name:(?P<name>.*)\n(?:Tech(?:nical)? Organization:(?P<organization>.*)\n)?Tech(?:nical)? Address1?:(?P<street1>.*)\n(?:Tech(?:nical)? Address2:(?P<street2>.*)\n)?(?:Tech(?:nical)? Address3:(?P<street3>.*)\n)?Tech(?:nical)? City:(?P<city>.*)\nTech(?:nical)? State/Province:(?P<state>.*)\nTech(?:nical)? Country/Economy:(?P<country>.*)\nTech(?:nical)? Postal Code:(?P<postalcode>.*)\nTech(?:nical)? Phone:(?P<phone>.*)\n(?:Tech(?:nical)? Phone Ext.:(?P<phone_ext>.*)\n)?(?:Tech(?:nical)? FAX:(?P<fax>.*)\n)?(?:Tech(?:nical)? FAX Ext.:(?P<fax_ext>.*)\n)?Tech(?:nical)? E-mail:(?P<email>.*)", # .ME, DotAsia
"Technical Contact ID:\s*(?P<handle>.+)\nTechnical Contact Name:\s*(?P<name>.+)\nTechnical Contact Organization:\s*(?P<organization>.*)\nTechnical Contact Address1:\s*(?P<street1>.+)\nTechnical Contact Address2:\s*(?P<street2>.*)\nTechnical Contact City:\s*(?P<city>.+)\nTechnical Contact State/Province:\s*(?P<state>.+)\nTechnical Contact Postal Code:\s*(?P<postalcode>.+)\nTechnical Contact Country:\s*(?P<country>.+)\nTechnical Contact Country Code:\s*(?P<country_code>.+)\nTechnical Contact Phone Number:\s*(?P<phone>.+)\nTechnical Contact Email:\s*(?P<email>.+)\n", # .CO Internet
"Tech Contact: (?P<handle>.+)\nTech Organization: (?P<organization>.+)\nTech Name: (?P<name>.+)\nTech Street: (?P<street>.+)\nTech City: (?P<city>.+)\nTech Postal Code: (?P<postalcode>.+)\nTech State: (?P<state>.+)\nTech Country: (?P<country>.+)\nTech Phone: (?P<phone>.*)\nTech Phone Ext: (?P<phone_ext>.*)\nTech Fax: (?P<fax>.*)\nTech Fax Ext: (?P<fax_ext>.*)\nTech Email: (?P<email>.*)\n", # Key-Systems GmbH
"(?:Tech ID:[ ]*(?P<handle>.*)\n)?Tech[ ]*Name:[ ]*(?P<name>.*)\n(?:Tech[ ]*Organization:[ ]*(?P<organization>.*)\n)?Tech[ ]*Street:[ ]*(?P<street1>.+)\n(?:Tech[ ]*Street:[ ]*(?P<street2>.+)\n)?(?:Tech[ ]*Street:[ ]*(?P<street3>.+)\n)?Tech[ ]*City:[ ]*(?P<city>.+)\nTech[ ]*State(?:\/Province)?:[ ]*(?P<state>.*)\nTech[ ]*Postal[ ]*Code:[ ]*(?P<postalcode>.+)\nTech[ ]*Country:[ ]*(?P<country>.+)\n(?:Tech[ ]*Phone:[ ]*(?P<phone>.*)\n)?(?:Tech[ ]*Phone[ ]*Ext:[ ]*(?P<phone_ext>.*)\n)?(?:Tech[ ]*Fax:[ ]*(?P<fax>.*)\n)?(?:Tech[ ]*Fax[ ]*Ext:\s*?(?P<fax_ext>.*)\n)?(?:Tech[ ]*Email:[ ]*(?P<email>.+)\n)?", # WildWestDomains, GoDaddy, Namecheap/eNom, Ascio, Musedoma (.museum), EuroDNS, nic.ps
"Technical Contact\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n Email:(?P<email>.+)\n (?P<street1>.+)\n(?: (?P<street2>.+)\n)? (?P<postalcode>.+) (?P<city>.+)\n (?P<country>.+)\n Tel: (?P<phone>.+)\n\n", # internet.bs
" Technical Contact Details:[ ]*\n (?P<organization>.*)\n (?P<name>.*)[ ]{2,}\((?P<email>.*)\)\n (?P<street1>.*)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<city>.*)\n (?P<state>.*),(?P<postalcode>.*)\n (?P<country>.*)\n Tel. (?P<phone>.*)", # Whois.com
"tech-id:[ ]*(?P<handle>.*)\n(?:tech-organization:[ ]*(?P<organization>.*)\n)?tech-name:[ ]*(?P<name>.*)\ntech-street:[ ]*(?P<street>.*)\ntech-city:[ ]*(?P<city>.*)\ntech-zip:[ ]*(?P<postalcode>.*)\ntech-country:[ ]*(?P<country>.*)\n(?:tech-phone:[ ]*(?P<phone>.*)\n)?(?:tech-fax:[ ]*(?P<fax>.*)\n)?tech-email:[ ]*(?P<email>.*)", # InterNetworX
"Technical Contact:\n tech_org: (?P<organization>.*)\n tech_name: (?P<name>.*)\n tech_email: (?P<email>.*)\n tech_address: (?P<address>.*)\n tech_city: (?P<city>.*)\n tech_state: (?P<state>.*)\n tech_zip: (?P<postalcode>.*)\n tech_country: (?P<country>.*)\n tech_phone: (?P<phone>.*)", # Bellnames
"Technical contact:\n(?P<name>[\S\s]+)\n(?P<street>.+)\n(?P<postalcode>[A-Z0-9-]+)\s+(?P<city>.+)\n(?P<country>.+)\n\n", # nic.ch
"Tech Contact ID:[ ]*(?P<handle>.+)\nTech Contact Name:[ ]*(?P<name>.+)", # .au
"Technical Contact ID:(?P<handle>.*)\nTechnical Contact Name:(?P<name>.*)\n(?:Technical Contact Organization:(?P<organization>.*)\n)?Technical Contact Address1:(?P<street1>.*)\n(?:Technical Contact Address2:(?P<street2>.*)\n)?(?:Technical Contact Address3:(?P<street3>.*)\n)?Technical Contact City:(?P<city>.*)\n(?:Technical Contact State/Province:(?P<state>.*)\n)?(?:Technical Contact Postal Code:(?P<postalcode>.*)\n)?Technical Contact Country:(?P<country>.*)\nTechnical Contact Country Code:.*\nTechnical Contact Phone Number:(?P<phone>.*)\n(?:Technical Contact Facsimile Number:(?P<facsimile>.*)\n)?Technical Contact Email:(?P<email>.*)", # .US, .biz (NeuStar), .buzz, .moe (Interlink Co. Ltd.)
"Technical Contacts\n Name: (?P<name>.+)\n(?: Organization: (?P<organization>.+)\n)? ContactID: (?P<handle>.+)\n(?: Address: (?P<street1>.+)\n(?: (?P<street2>.+)\n(?: (?P<street3>.+)\n)?)? (?P<city>.+)\n (?P<postalcode>.+)\n (?P<state>.+)\n (?P<country>.+)\n)?(?: Created: (?P<creationdate>.+)\n)?(?: Last Update: (?P<changedate>.+)\n)?", # nic.it // NOTE: Why does this say 'Contacts'? Can it have multiple?
"Tech Name[.]* (?P<name>.*)\n Tech Address[.]* (?P<street1>.*)\n Tech Address[.]* (?P<street2>.*)\n(?: Tech Address[.]* (?P<street3>.*)\n)? Tech Address[.]* (?P<city>.*)\n Tech Address[.]* (?P<postalcode>.*)\n Tech Address[.]* (?P<state>.*)\n Tech Address[.]* (?P<country>.*)\n Tech Email[.]* (?P<email>.*)\n Tech Phone[.]* (?P<phone>.*)\n Tech Fax[.]* (?P<fax>.*)", # Melbourne IT
"Technical contact:\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n (?P<email>.+)\n (?P<street>.+)\n (?P<city>.+), (?P<state>.+) (?P<postalcode>.+) (?P<country>.+)\n Phone: (?P<phone>.*)\n Fax: (?P<fax>.*)\n", # Fabulous.com
"tech-c-name:\s*(?P<name>.+)\ntech-c-type:\s*(?P<type>.+)\ntech-c-address:\s*(?P<street>.+)\ntech-c-postcode:\s*(?P<postalcode>.+)\ntech-c-city:\s*(?P<city>.+)\ntech-c-country:\s*(?P<country>.+)\n(?:tech-c-phone:\s*(?P<phone>.+)\n)?(?:tech-c-email:\s*(?P<email>.+)\n)?", # Hetzner
"Admin Contact Information :[ ]*\n[ ]+(?P<firstname>.*)\n[ ]+(?P<lastname>.*)\n[ ]+(?P<organization>.*)\n[ ]+(?P<email>.*)\n[ ]+(?P<street>.*)\n[ ]+(?P<city>.*)\n[ ]+(?P<postalcode>.*)\n[ ]+(?P<phone>.*)\n[ ]+(?P<fax>.*)\n\n", # GAL Communication
" Technical contact:\n (?P<name>.+)\n (?P<organization>.*)\n (?P<street>.+)\n (?P<city>.+) (?P<state>\S+),[ ]+(?P<postalcode>.+)\n (?P<country>.+)\n (?P<email>.+)\n (?P<phone>.*)\n (?P<fax>.*)", # .am
"Technical:\n\s*Name:\s*(?P<name>.*)\n\s*Organisation:\s*(?P<organization>.*)\n\s*Language:.*\n\s*Phone:\s*(?P<phone>.*)\n\s*Fax:\s*(?P<fax>.*)\n\s*Email:\s*(?P<email>.*)\n", # EURid
"\[Zone-C\]\nType: (?P<type>.+)\nName: (?P<name>.+)\n(Organisation: (?P<organization>.+)\n){0,1}(Address: (?P<street1>.+)\n){1}(Address: (?P<street2>.+)\n){0,1}(Address: (?P<street3>.+)\n){0,1}(Address: (?P<street4>.+)\n){0,1}PostalCode: (?P<postalcode>.+)\nCity: (?P<city>.+)\nCountryCode: (?P<country>[A-Za-z]{2})\nPhone: (?P<phone>.+)\nFax: (?P<fax>.+)\nEmail: (?P<email>.+)\n(Remarks: (?P<remark>.+)\n){0,1}Changed: (?P<changed>.+)", # DeNIC
"Technical Contact:\n Name: (?P<name>.+)\n City: (?P<city>.+)\n State: (?P<state>.+)\n Country: (?P<country>.+)\n", # Akky (.com.mx)
"Tech Contact: (?P<handle>.+)\n(?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?\n(?P<city>.+),? (?P<state>[A-Z]{2,3})(?: [A-Z0-9]+)?\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 1
"Tech Contact: (?P<handle>.+)\n(?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?\n(?P<city>.+), (?P<state>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 2
"Tech Contact: (?P<handle>.+)\n(?P<organization>.+)\n(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?, (?P<city>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 3
"Tech Contact: (?P<handle>.+)\n(?P<street1>.+) (?P<city>[^\s]+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 4
"Tech Contact: (?P<handle>.+)\n(?P<organization>.+)\n(?P<street1>.+)\n(?P<district>.+) (?P<city>[^\s]+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 5
"Tech Contact: (?P<handle>.+)\n(?P<organization>.+)\n(?P<street1>.+)\n(?P<street2>.+)\n(?:(?P<street3>.+)\n)?(?P<city>.+)\n(?P<postalcode>.+)\n(?P<country>[A-Z]+)\n", # .co.th, format 6
" Technical Contact:\n (?P<name>.+) (?P<email>.+)\n (?P<phone>.*)\n (?P<fax>.*)\n", # .com.tw (Western registrars)
"Technical Contact Information:\n\n(?:Given name: (?P<firstname>.+)\n)?(?:Family name: (?P<lastname>.+)\n)?(?:Company name: (?P<organization>.+)\n)?Address: (?P<street>.+)\nCountry: (?P<country>.+)\nPhone: (?P<phone>.*)\nFax: (?P<fax>.*)\nEmail: (?P<email>.+)\n(?:Account Name: (?P<handle>.+)\n)?", # HKDNR (.hk)
"TECH ID:(?P<handle>.+)\nTECH Name:(?P<name>.*)\n(?:TECH Organization:(?P<organization>.*)\n)?TECH Street1:(?P<street1>.+?)\n(?:TECH Street2:(?P<street2>.+?)\n(?:TECH Street3:(?P<street3>.+?)\n)?)?TECH City:(?P<city>.+)\nTECH State:(?P<state>.*)\nTECH Postal Code:(?P<postalcode>.+)\nTECH Country:(?P<country>[A-Z]+)\nTECH Phone:(?P<phone>.*?)\nTECH Fax:(?P<fax>.*)\nTECH Email:(?P<email>.+)\n", # Realtime Register
]
admin_contact_regexes = [
" Administrative Contact:[ ]*\n (?P<organization>.*)\n (?P<name>.*)\n (?P<street>.*)\n (?P<city>.*), (?P<state>.*) (?P<postalcode>.*)\n (?P<country>.*)\n(?: Phone: (?P<phone>.*)\n)? Email: (?P<email>.*)\n", # Corporate Domains, Inc.
"Administrative Contact:\n (?P<name>.+)\n (?P<street1>.+)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<postalcode>.+), (?P<city>.+)\n (?P<country>.+)\n (?P<phone>.+)\n (?P<email>.+)\n\n", # OVH
"(?:Admin ID:(?P<handle>.+)\n)?Admin Name:(?P<name>.*)\n(?:Admin Organization:(?P<organization>.*)\n)?Admin Street1?:(?P<street1>.*)\n(?:Admin Street2:(?P<street2>.*)\n)?(?:Admin Street3:(?P<street3>.*)\n)?Admin City:(?P<city>.*)\nAdmin State/Province:(?P<state>.*)\nAdmin Postal Code:(?P<postalcode>.*)\nAdmin Country:(?P<country>.*)\nAdmin Phone:(?P<phone>.*)\n(?:Admin Phone Ext.:(?P<phone_ext>.*)\n)?(?:Admin FAX:(?P<fax>.*)\n)?(?:Admin FAX Ext.:(?P<fax_ext>.*)\n)?Admin Email:(?P<email>.*)", # Public Interest Registry (.org), nic.pw, No-IP.com
"Admin(?:istrative)? ID:(?P<handle>.+)\nAdmin(?:istrative)? Name:(?P<name>.*)\n(?:Admin(?:istrative)? Organization:(?P<organization>.*)\n)?Admin(?:istrative)? Address1?:(?P<street1>.*)\n(?:Admin(?:istrative)? Address2:(?P<street2>.*)\n)?(?:Admin(?:istrative)? Address3:(?P<street3>.*)\n)?Admin(?:istrative)? City:(?P<city>.*)\nAdmin(?:istrative)? State/Province:(?P<state>.*)\nAdmin(?:istrative)? Country/Economy:(?P<country>.*)\nAdmin(?:istrative)? Postal Code:(?P<postalcode>.*)\nAdmin(?:istrative)? Phone:(?P<phone>.*)\n(?:Admin(?:istrative)? Phone Ext.:(?P<phone_ext>.*)\n)?(?:Admin(?:istrative)? FAX:(?P<fax>.*)\n)?(?:Admin(?:istrative)? FAX Ext.:(?P<fax_ext>.*)\n)?Admin(?:istrative)? E-mail:(?P<email>.*)", # .ME, DotAsia
"Administrative Contact ID:\s*(?P<handle>.+)\nAdministrative Contact Name:\s*(?P<name>.+)\nAdministrative Contact Organization:\s*(?P<organization>.*)\nAdministrative Contact Address1:\s*(?P<street1>.+)\nAdministrative Contact Address2:\s*(?P<street2>.*)\nAdministrative Contact City:\s*(?P<city>.+)\nAdministrative Contact State/Province:\s*(?P<state>.+)\nAdministrative Contact Postal Code:\s*(?P<postalcode>.+)\nAdministrative Contact Country:\s*(?P<country>.+)\nAdministrative Contact Country Code:\s*(?P<country_code>.+)\nAdministrative Contact Phone Number:\s*(?P<phone>.+)\nAdministrative Contact Email:\s*(?P<email>.+)\n", # .CO Internet
"Admin Contact: (?P<handle>.+)\nAdmin Organization: (?P<organization>.+)\nAdmin Name: (?P<name>.+)\nAdmin Street: (?P<street>.+)\nAdmin City: (?P<city>.+)\nAdmin State: (?P<state>.+)\nAdmin Postal Code: (?P<postalcode>.+)\nAdmin Country: (?P<country>.+)\nAdmin Phone: (?P<phone>.*)\nAdmin Phone Ext: (?P<phone_ext>.*)\nAdmin Fax: (?P<fax>.*)\nAdmin Fax Ext: (?P<fax_ext>.*)\nAdmin Email: (?P<email>.*)\n", # Key-Systems GmbH
"(?:Admin ID:[ ]*(?P<handle>.*)\n)?Admin[ ]*Name:[ ]*(?P<name>.*)\n(?:Admin[ ]*Organization:[ ]*(?P<organization>.*)\n)?Admin[ ]*Street:[ ]*(?P<street1>.+)\n(?:Admin[ ]*Street:[ ]*(?P<street2>.+)\n)?(?:Admin[ ]*Street:[ ]*(?P<street3>.+)\n)?Admin[ ]*City:[ ]*(?P<city>.+)\nAdmin[ ]*State(?:\/Province)?:[ ]*(?P<state>.*)\nAdmin[ ]*Postal[ ]*Code:[ ]*(?P<postalcode>.+)\nAdmin[ ]*Country:[ ]*(?P<country>.+)\n(?:Admin[ ]*Phone:[ ]*(?P<phone>.*)\n)?(?:Admin[ ]*Phone[ ]*Ext:[ ]*(?P<phone_ext>.*)\n)?(?:Admin[ ]*Fax:[ ]*(?P<fax>.*)\n)?(?:Admin[ ]*Fax[ ]*Ext:\s*?(?P<fax_ext>.*)\n)?(?:Admin[ ]*Email:[ ]*(?P<email>.+)\n)?", # WildWestDomains, GoDaddy, Namecheap/eNom, Ascio, Musedoma (.museum), EuroDNS, nic.ps
"Administrative Contact\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n Email:(?P<email>.+)\n (?P<street1>.+)\n(?: (?P<street2>.+)\n)? (?P<postalcode>.+) (?P<city>.+)\n (?P<country>.+)\n Tel: (?P<phone>.+)\n\n", # internet.bs
" Administrative Contact Details:[ ]*\n (?P<organization>.*)\n (?P<name>.*)[ ]{2,}\((?P<email>.*)\)\n (?P<street1>.*)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<city>.*)\n (?P<state>.*),(?P<postalcode>.*)\n (?P<country>.*)\n Tel. (?P<phone>.*)", # Whois.com
"admin-id:[ ]*(?P<handle>.*)\n(?:admin-organization:[ ]*(?P<organization>.*)\n)?admin-name:[ ]*(?P<name>.*)\nadmin-street:[ ]*(?P<street>.*)\nadmin-city:[ ]*(?P<city>.*)\nadmin-zip:[ ]*(?P<postalcode>.*)\nadmin-country:[ ]*(?P<country>.*)\n(?:admin-phone:[ ]*(?P<phone>.*)\n)?(?:admin-fax:[ ]*(?P<fax>.*)\n)?admin-email:[ ]*(?P<email>.*)", # InterNetworX
"Administrative Contact:\n admin_org: (?P<organization>.*)\n admin_name: (?P<name>.*)\n admin_email: (?P<email>.*)\n admin_address: (?P<address>.*)\n admin_city: (?P<city>.*)\n admin_state: (?P<state>.*)\n admin_zip: (?P<postalcode>.*)\n admin_country: (?P<country>.*)\n admin_phone: (?P<phone>.*)", # Bellnames
"Administrative Contact ID:(?P<handle>.*)\nAdministrative Contact Name:(?P<name>.*)\n(?:Administrative Contact Organization:(?P<organization>.*)\n)?Administrative Contact Address1:(?P<street1>.*)\n(?:Administrative Contact Address2:(?P<street2>.*)\n)?(?:Administrative Contact Address3:(?P<street3>.*)\n)?Administrative Contact City:(?P<city>.*)\n(?:Administrative Contact State/Province:(?P<state>.*)\n)?(?:Administrative Contact Postal Code:(?P<postalcode>.*)\n)?Administrative Contact Country:(?P<country>.*)\nAdministrative Contact Country Code:.*\nAdministrative Contact Phone Number:(?P<phone>.*)\n(?:Administrative Contact Facsimile Number:(?P<facsimile>.*)\n)?Administrative Contact Email:(?P<email>.*)", # .US, .biz (NeuStar), .buzz, .moe (Interlink Co. Ltd.)
"Admin Contact\n Name: (?P<name>.+)\n(?: Organization: (?P<organization>.+)\n)? ContactID: (?P<handle>.+)\n(?: Address: (?P<street1>.+)\n(?: (?P<street2>.+)\n(?: (?P<street3>.+)\n)?)? (?P<city>.+)\n (?P<postalcode>.+)\n (?P<state>.+)\n (?P<country>.+)\n)?(?: Created: (?P<creationdate>.+)\n)?(?: Last Update: (?P<changedate>.+)\n)?", # nic.it
"Admin Name[.]* (?P<name>.*)\n Admin Address[.]* (?P<street1>.*)\n Admin Address[.]* (?P<street2>.*)\n(?: Admin Address[.]* (?P<street3>.*)\n)? Admin Address[.]* (?P<city>.*)\n Admin Address[.]* (?P<postalcode>.*)\n Admin Address[.]* (?P<state>.*)\n Admin Address[.]* (?P<country>.*)\n Admin Email[.]* (?P<email>.*)\n Admin Phone[.]* (?P<phone>.*)\n Admin Fax[.]* (?P<fax>.*)", # Melbourne IT
"Administrative contact:\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n (?P<email>.+)\n (?P<street>.+)\n (?P<city>.+), (?P<state>.+) (?P<postalcode>.+) (?P<country>.+)\n Phone: (?P<phone>.*)\n Fax: (?P<fax>.*)\n", # Fabulous.com
"admin-c-name:\s*(?P<name>.+)\nadmin-c-type:\s*(?P<type>.+)\nadmin-c-address:\s*(?P<street>.+)\nadmin-c-postcode:\s*(?P<postalcode>.+)\nadmin-c-city:\s*(?P<city>.+)\nadmin-c-country:\s*(?P<country>.+)\n(?:admin-c-phone:\s*(?P<phone>.+)\n)?(?:admin-c-email:\s*(?P<email>.+)\n)?", # Hetzner
"Tech Contact Information :[ ]*\n[ ]+(?P<firstname>.*)\n[ ]+(?P<lastname>.*)\n[ ]+(?P<organization>.*)\n[ ]+(?P<email>.*)\n[ ]+(?P<street>.*)\n[ ]+(?P<city>.*)\n[ ]+(?P<postalcode>.*)\n[ ]+(?P<phone>.*)\n[ ]+(?P<fax>.*)\n\n", # GAL Communication
" Administrative contact:\n (?P<name>.+)\n (?P<organization>.*)\n (?P<street>.+)\n (?P<city>.+) (?P<state>\S+),[ ]+(?P<postalcode>.+)\n (?P<country>.+)\n (?P<email>.+)\n (?P<phone>.*)\n (?P<fax>.*)", # .am
"Administrative Contact:\n Name: (?P<name>.+)\n City: (?P<city>.+)\n State: (?P<state>.+)\n Country: (?P<country>.+)\n", # Akky (.com.mx)
"\[Tech-C\]\nType: (?P<type>.+)\nName: (?P<name>.+)\n(Organisation: (?P<organization>.+)\n){0,1}(Address: (?P<street1>.+)\n){1}(Address: (?P<street2>.+)\n){0,1}(Address: (?P<street3>.+)\n){0,1}(Address: (?P<street4>.+)\n){0,1}PostalCode: (?P<postalcode>.+)\nCity: (?P<city>.+)\nCountryCode: (?P<country>[A-Za-z]{2})\nPhone: (?P<phone>.+)\nFax: (?P<fax>.+)\nEmail: (?P<email>.+)\n(Remarks: (?P<remark>.+)\n){0,1}Changed: (?P<changed>.+)", # DeNIC
" Administrative Contact:\n (?P<name>.+) (?P<email>.+)\n (?P<phone>.*)\n (?P<fax>.*)\n", # .com.tw (Western registrars)
"Administrative Contact Information:\n\n(?:Given name: (?P<firstname>.+)\n)?(?:Family name: (?P<lastname>.+)\n)?(?:Company name: (?P<organization>.+)\n)?Address: (?P<street>.+)\nCountry: (?P<country>.+)\nPhone: (?P<phone>.*)\nFax: (?P<fax>.*)\nEmail: (?P<email>.+)\n(?:Account Name: (?P<handle>.+)\n)?", # HKDNR (.hk)
"ADMIN ID:(?P<handle>.+)\nADMIN Name:(?P<name>.*)\n(?:ADMIN Organization:(?P<organization>.*)\n)?ADMIN Street1:(?P<street1>.+?)\n(?:ADMIN Street2:(?P<street2>.+?)\n(?:ADMIN Street3:(?P<street3>.+?)\n)?)?ADMIN City:(?P<city>.+)\nADMIN State:(?P<state>.*)\nADMIN Postal Code:(?P<postalcode>.+)\nADMIN Country:(?P<country>[A-Z]+)\nADMIN Phone:(?P<phone>.*?)\nADMIN Fax:(?P<fax>.*)\nADMIN Email:(?P<email>.+)\n", # Realtime Register
]
billing_contact_regexes = [
"(?:Billing ID:(?P<handle>.+)\n)?Billing Name:(?P<name>.*)\nBilling Organization:(?P<organization>.*)\nBilling Street1:(?P<street1>.*)\n(?:Billing Street2:(?P<street2>.*)\n)?(?:Billing Street3:(?P<street3>.*)\n)?Billing City:(?P<city>.*)\nBilling State/Province:(?P<state>.*)\nBilling Postal Code:(?P<postalcode>.*)\nBilling Country:(?P<country>.*)\nBilling Phone:(?P<phone>.*)\n(?:Billing Phone Ext.:(?P<phone_ext>.*)\n)?(?:Billing FAX:(?P<fax>.*)\n)?(?:Billing FAX Ext.:(?P<fax_ext>.*)\n)?Billing Email:(?P<email>.*)", # nic.pw, No-IP.com
"Billing ID:(?P<handle>.+)\nBilling Name:(?P<name>.*)\n(?:Billing Organization:(?P<organization>.*)\n)?Billing Address1?:(?P<street1>.*)\n(?:Billing Address2:(?P<street2>.*)\n)?(?:Billing Address3:(?P<street3>.*)\n)?Billing City:(?P<city>.*)\nBilling State/Province:(?P<state>.*)\nBilling Country/Economy:(?P<country>.*)\nBilling Postal Code:(?P<postalcode>.*)\nBilling Phone:(?P<phone>.*)\n(?:Billing Phone Ext.:(?P<phone_ext>.*)\n)?(?:Billing FAX:(?P<fax>.*)\n)?(?:Billing FAX Ext.:(?P<fax_ext>.*)\n)?Billing E-mail:(?P<email>.*)", # DotAsia
"Billing Contact ID:\s*(?P<handle>.+)\nBilling Contact Name:\s*(?P<name>.+)\nBilling Contact Organization:\s*(?P<organization>.*)\nBilling Contact Address1:\s*(?P<street1>.+)\nBilling Contact Address2:\s*(?P<street2>.*)\nBilling Contact City:\s*(?P<city>.+)\nBilling Contact State/Province:\s*(?P<state>.+)\nBilling Contact Postal Code:\s*(?P<postalcode>.+)\nBilling Contact Country:\s*(?P<country>.+)\nBilling Contact Country Code:\s*(?P<country_code>.+)\nBilling Contact Phone Number:\s*(?P<phone>.+)\nBilling Contact Email:\s*(?P<email>.+)\n", # .CO Internet
"Billing Contact: (?P<handle>.+)\nBilling Organization: (?P<organization>.+)\nBilling Name: (?P<name>.+)\nBilling Street: (?P<street>.+)\nBilling City: (?P<city>.+)\nBilling Postal Code: (?P<postalcode>.+)\nBilling State: (?P<state>.+)\nBilling Country: (?P<country>.+)\nBilling Phone: (?P<phone>.*)\nBilling Phone Ext: (?P<phone_ext>.*)\nBilling Fax: (?P<fax>.*)\nBilling Fax Ext: (?P<fax_ext>.*)\nBilling Email: (?P<email>.*)\n", # Key-Systems GmbH
"(?:Billing ID:[ ]*(?P<handle>.*)\n)?Billing[ ]*Name:[ ]*(?P<name>.*)\n(?:Billing[ ]*Organization:[ ]*(?P<organization>.*)\n)?Billing[ ]*Street:[ ]*(?P<street1>.+)\n(?:Billing[ ]*Street:[ ]*(?P<street2>.+)\n)?Billing[ ]*City:[ ]*(?P<city>.+)\nBilling[ ]*State\/Province:[ ]*(?P<state>.+)\nBilling[ ]*Postal[ ]*Code:[ ]*(?P<postalcode>.+)\nBilling[ ]*Country:[ ]*(?P<country>.+)\n(?:Billing[ ]*Phone:[ ]*(?P<phone>.*)\n)?(?:Billing[ ]*Phone[ ]*Ext:[ ]*(?P<phone_ext>.*)\n)?(?:Billing[ ]*Fax:[ ]*(?P<fax>.*)\n)?(?:Billing[ ]*Fax[ ]*Ext:\s*?(?P<fax_ext>.*)\n)?(?:Billing[ ]*Email:[ ]*(?P<email>.+)\n)?", # Musedoma (.museum)
"Billing Contact:\n (?P<name>.+)\n (?P<street1>.+)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<postalcode>.+), (?P<city>.+)\n (?P<country>.+)\n (?P<phone>.+)\n (?P<email>.+)\n\n", # OVH
" Billing Contact Details:[ ]*\n (?P<organization>.*)\n (?P<name>.*)[ ]{2,}\((?P<email>.*)\)\n (?P<street1>.*)\n(?: (?P<street2>.*)\n)?(?: (?P<street3>.*)\n)? (?P<city>.*)\n (?P<state>.*),(?P<postalcode>.*)\n (?P<country>.*)\n Tel. (?P<phone>.*)", # Whois.com
"billing-id:[ ]*(?P<handle>.*)\n(?:billing-organization:[ ]*(?P<organization>.*)\n)?billing-name:[ ]*(?P<name>.*)\nbilling-street:[ ]*(?P<street>.*)\nbilling-city:[ ]*(?P<city>.*)\nbilling-zip:[ ]*(?P<postalcode>.*)\nbilling-country:[ ]*(?P<country>.*)\n(?:billing-phone:[ ]*(?P<phone>.*)\n)?(?:billing-fax:[ ]*(?P<fax>.*)\n)?billing-email:[ ]*(?P<email>.*)", # InterNetworX
"Billing Contact:\n bill_org: (?P<organization>.*)\n bill_name: (?P<name>.*)\n bill_email: (?P<email>.*)\n bill_address: (?P<address>.*)\n bill_city: (?P<city>.*)\n bill_state: (?P<state>.*)\n bill_zip: (?P<postalcode>.*)\n bill_country: (?P<country>.*)\n bill_phone: (?P<phone>.*)", # Bellnames
"Billing Contact ID:(?P<handle>.*)\nBilling Contact Name:(?P<name>.*)\n(?:Billing Contact Organization:(?P<organization>.*)\n)?Billing Contact Address1:(?P<street1>.*)\n(?:Billing Contact Address2:(?P<street2>.*)\n)?(?:Billing Contact Address3:(?P<street3>.*)\n)?Billing Contact City:(?P<city>.*)\n(?:Billing Contact State/Province:(?P<state>.*)\n)?(?:Billing Contact Postal Code:(?P<postalcode>.*)\n)?Billing Contact Country:(?P<country>.*)\nBilling Contact Country Code:.*\nBilling Contact Phone Number:(?P<phone>.*)\n(?:Billing Contact Facsimile Number:(?P<facsimile>.*)\n)?Billing Contact Email:(?P<email>.*)", # .US, .biz (NeuStar), .buzz, .moe (Interlink Co. Ltd.)
"Billing contact:\n(?: (?P<organization>.+)\n)? (?P<name>.+)\n (?P<email>.+)\n (?P<street>.+)\n (?P<city>.+), (?P<state>.+) (?P<postalcode>.+) (?P<country>.+)\n Phone: (?P<phone>.*)\n Fax: (?P<fax>.*)\n", # Fabulous.com
"Billing Contact Information :[ ]*\n[ ]+(?P<firstname>.*)\n[ ]+(?P<lastname>.*)\n[ ]+(?P<organization>.*)\n[ ]+(?P<email>.*)\n[ ]+(?P<street>.*)\n[ ]+(?P<city>.*)\n[ ]+(?P<postalcode>.*)\n[ ]+(?P<phone>.*)\n[ ]+(?P<fax>.*)\n\n", # GAL Communication
"Billing Contact:\n Name: (?P<name>.+)\n City: (?P<city>.+)\n State: (?P<state>.+)\n Country: (?P<country>.+)\n", # Akky (.com.mx)
"BILLING ID:(?P<handle>.+)\nBILLING Name:(?P<name>.*)\n(?:BILLING Organization:(?P<organization>.*)\n)?BILLING Street1:(?P<street1>.+?)\n(?:BILLING Street2:(?P<street2>.+?)\n(?:BILLING Street3:(?P<street3>.+?)\n)?)?BILLING City:(?P<city>.+)\nBILLING State:(?P<state>.*)\nBILLING Postal Code:(?P<postalcode>.+)\nBILLING Country:(?P<country>[A-Z]+)\nBILLING Phone:(?P<phone>.*?)\nBILLING Fax:(?P<fax>.*)\nBILLING Email:(?P<email>.+)\n", # Realtime Register
]
# Some registries use NIC handle references instead of directly listing contacts...
nic_contact_references = {
"registrant": [
"registrant:\s*(?P<handle>.+)", # nic.at
"owner-contact:\s*(?P<handle>.+)", # LCN.com
"holder-c:\s*(?P<handle>.+)", # AFNIC
"holder:\s*(?P<handle>.+)", # iis.se (they apparently want to be difficult, and won't give you contact info for the handle over their WHOIS service)
],
"tech": [
"tech-c:\s*(?P<handle>.+)", # nic.at, AFNIC, iis.se
"technical-contact:\s*(?P<handle>.+)", # LCN.com
"n\. \[Technical Contact\] (?P<handle>.+)\n", #.co.jp
],
"admin": [
"admin-c:\s*(?P<handle>.+)", # nic.at, AFNIC, iis.se
"admin-contact:\s*(?P<handle>.+)", # LCN.com
"m\. \[Administrative Contact\] (?P<handle>.+)\n", # .co.jp
],
"billing": [
"billing-c:\s*(?P<handle>.+)", # iis.se
"billing-contact:\s*(?P<handle>.+)", # LCN.com
]
}
# Why do the below? The below is meant to handle with an edge case (issue #2) where a partial match followed
# by a failure, for a regex containing the \s*.+ pattern, would send the regex module on a wild goose hunt for
# matching positions. The workaround is to use \S.* instead of .+, but in the interest of keeping the regexes
# consistent and compact, it's more practical to do this (predictable) conversion on runtime.
# FIXME: This breaks on NIC contact regex for nic.at. Why?
registrant_regexes = [preprocess_regex(regex) for regex in registrant_regexes]
tech_contact_regexes = [preprocess_regex(regex) for regex in tech_contact_regexes]
admin_contact_regexes = [preprocess_regex(regex) for regex in admin_contact_regexes]
billing_contact_regexes = [preprocess_regex(regex) for regex in billing_contact_regexes]
nic_contact_regexes = [
"personname:\s*(?P<name>.+)\norganization:\s*(?P<organization>.+)\nstreet address:\s*(?P<street>.+)\npostal code:\s*(?P<postalcode>.+)\ncity:\s*(?P<city>.+)\ncountry:\s*(?P<country>.+)\n(?:phone:\s*(?P<phone>.+)\n)?(?:fax-no:\s*(?P<fax>.+)\n)?(?:e-mail:\s*(?P<email>.+)\n)?nic-hdl:\s*(?P<handle>.+)\nchanged:\s*(?P<changedate>.+)", # nic.at
"contact-handle:[ ]*(?P<handle>.+)\ncontact:[ ]*(?P<name>.+)\n(?:organisation:[ ]*(?P<organization>.+)\n)?address:[ ]*(?P<street1>.+)\n(?:address:[ ]*(?P<street2>.+)\n)?(?:address:[ ]*(?P<street3>.+)\n)?(?:address:[ ]*(?P<street4>.+)\n)?address:[ ]*(?P<city>.+)\naddress:[ ]*(?P<state>.+)\naddress:[ ]*(?P<postalcode>.+)\naddress:[ ]*(?P<country>.+)\n(?:phone:[ ]*(?P<phone>.+)\n)?(?:fax:[ ]*(?P<fax>.+)\n)?(?:email:[ ]*(?P<email>.+)\n)?", # LCN.com
"Contact Information:\na\. \[JPNIC Handle\] (?P<handle>.+)\nc\. \[Last, First\] (?P<lastname>.+), (?P<firstname>.+)\nd\. \[E-Mail\] (?P<email>.+)\ng\. \[Organization\] (?P<organization>.+)\nl\. \[Division\] (?P<division>.+)\nn\. \[Title\] (?P<title>.+)\no\. \[TEL\] (?P<phone>.+)\np\. \[FAX\] (?P<fax>.+)\ny\. \[Reply Mail\] .*\n\[Last Update\] (?P<changedate>.+) \(JST\)\n", # JPRS .co.jp contact handle lookup
"person:\s*(?P<name>.+)\nnic-hdl:\s*(?P<handle>.+)\n", # .ie
"nic-hdl:\s+(?P<handle>.+)\nperson:\s+(?P<name>.+)\n(?:e-mail:\s+(?P<email>.+)\n)?(?:address:\s+(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?, (?P<city>.+), (?P<state>.+), (?P<country>.+)\n)?(?:phone:\s+(?P<phone>.+)\n)?(?:fax-no:\s+(?P<fax>.+)\n)?", # nic.ir, individual - this is a nasty one.
"nic-hdl:\s+(?P<handle>.+)\norg:\s+(?P<organization>.+)\n(?:e-mail:\s+(?P<email>.+)\n)?(?:address:\s+(?P<street1>.+?)(?:,+ (?P<street2>.+?)(?:,+ (?P<street3>.+?)(?:,+ (?P<street4>.+?)(?:,+ (?P<street5>.+?)(?:,+ (?P<street6>.+?)(?:,+ (?P<street7>.+?))?)?)?)?)?)?, (?P<city>.+), (?P<state>.+), (?P<country>.+)\n)?(?:phone:\s+(?P<phone>.+)\n)?(?:fax-no:\s+(?P<fax>.+)\n)?", # nic.ir, organization
"nic-hdl:\s*(?P<handle>.+)\ntype:\s*(?P<type>.+)\ncontact:\s*(?P<name>.+)\n(?:.+\n)*?(?:address:\s*(?P<street1>.+)\naddress:\s*(?P<street2>.+)\naddress:\s*(?P<street3>.+)\naddress:\s*(?P<country>.+)\n)?(?:phone:\s*(?P<phone>.+)\n)?(?:fax-no:\s*(?P<fax>.+)\n)?(?:.+\n)*?(?:e-mail:\s*(?P<email>.+)\n)?(?:.+\n)*?changed:\s*(?P<changedate>[0-9]{2}\/[0-9]{2}\/[0-9]{4}).*\n", # AFNIC madness without country field
"nic-hdl:\s*(?P<handle>.+)\ntype:\s*(?P<type>.+)\ncontact:\s*(?P<name>.+)\n(?:.+\n)*?(?:address:\s*(?P<street1>.+)\n)?(?:address:\s*(?P<street2>.+)\n)?(?:address:\s*(?P<street3>.+)\n)?(?:phone:\s*(?P<phone>.+)\n)?(?:fax-no:\s*(?P<fax>.+)\n)?(?:.+\n)*?(?:e-mail:\s*(?P<email>.+)\n)?(?:.+\n)*?changed:\s*(?P<changedate>[0-9]{2}\/[0-9]{2}\/[0-9]{4}).*\n", # AFNIC madness any country -at all-
"nic-hdl:\s*(?P<handle>.+)\ntype:\s*(?P<type>.+)\ncontact:\s*(?P<name>.+)\n(?:.+\n)*?(?:address:\s*(?P<street1>.+)\n)?(?:address:\s*(?P<street2>.+)\n)?(?:address:\s*(?P<street3>.+)\n)?(?:address:\s*(?P<street4>.+)\n)?country:\s*(?P<country>.+)\n(?:phone:\s*(?P<phone>.+)\n)?(?:fax-no:\s*(?P<fax>.+)\n)?(?:.+\n)*?(?:e-mail:\s*(?P<email>.+)\n)?(?:.+\n)*?changed:\s*(?P<changedate>[0-9]{2}\/[0-9]{2}\/[0-9]{4}).*\n", # AFNIC madness with country field
]
organization_regexes = (
r"\sltd\.?($|\s)",
r"\sco\.?($|\s)",
r"\scorp\.?($|\s)",
r"\sinc\.?($|\s)",
r"\ss\.?p\.?a\.?($|\s)",
r"\ss\.?(c\.?)?r\.?l\.?($|\s)",
r"\ss\.?a\.?s\.?($|\s)",
r"\sa\.?g\.?($|\s)",
r"\sn\.?v\.?($|\s)",
r"\sb\.?v\.?($|\s)",
r"\sp\.?t\.?y\.?($|\s)",
r"\sp\.?l\.?c\.?($|\s)",
r"\sv\.?o\.?f\.?($|\s)",
r"\sb\.?v\.?b\.?a\.?($|\s)",
r"\sg\.?m\.?b\.?h\.?($|\s)",
r"\ss\.?a\.?r\.?l\.?($|\s)",
)
grammar["_data"]["id"] = precompile_regexes(grammar["_data"]["id"], re.IGNORECASE)
grammar["_data"]["status"] = precompile_regexes(grammar["_data"]["status"], re.IGNORECASE)
grammar["_data"]["creation_date"] = precompile_regexes(grammar["_data"]["creation_date"], re.IGNORECASE)
grammar["_data"]["expiration_date"] = precompile_regexes(grammar["_data"]["expiration_date"], re.IGNORECASE)
grammar["_data"]["updated_date"] = precompile_regexes(grammar["_data"]["updated_date"], re.IGNORECASE)
grammar["_data"]["registrar"] = precompile_regexes(grammar["_data"]["registrar"], re.IGNORECASE)
grammar["_data"]["whois_server"] = precompile_regexes(grammar["_data"]["whois_server"], re.IGNORECASE)
grammar["_data"]["nameservers"] = precompile_regexes(grammar["_data"]["nameservers"], re.IGNORECASE)
grammar["_data"]["emails"] = precompile_regexes(grammar["_data"]["emails"], re.IGNORECASE)
grammar["_dateformats"] = precompile_regexes(grammar["_dateformats"], re.IGNORECASE)
registrant_regexes = precompile_regexes(registrant_regexes)
tech_contact_regexes = precompile_regexes(tech_contact_regexes)
billing_contact_regexes = precompile_regexes(billing_contact_regexes)
admin_contact_regexes = precompile_regexes(admin_contact_regexes)
nic_contact_regexes = precompile_regexes(nic_contact_regexes)
organization_regexes = precompile_regexes(organization_regexes, re.IGNORECASE)
nic_contact_references["registrant"] = precompile_regexes(nic_contact_references["registrant"])
nic_contact_references["tech"] = precompile_regexes(nic_contact_references["tech"])
nic_contact_references["admin"] = precompile_regexes(nic_contact_references["admin"])
nic_contact_references["billing"] = precompile_regexes(nic_contact_references["billing"])
if sys.version_info < (3, 0):
def is_string(data):
"""Test for string with support for python 2."""
return isinstance(data, basestring)
else:
def is_string(data):
"""Test for string with support for python 3."""
return isinstance(data, str)
def parse_raw_whois(raw_data, normalized=None, never_query_handles=True, handle_server=""):
normalized = normalized or []
data = {}
raw_data = [segment.replace("\r", "") for segment in raw_data] # Carriage returns are the devil
for segment in raw_data:
for rule_key, rule_regexes in grammar['_data'].items():
if (rule_key in data) == False:
for line in segment.splitlines():
for regex in rule_regexes:
result = re.search(regex, line)
if result is not None:
val = result.group("val").strip()
if val != "":
try:
data[rule_key].append(val)
except KeyError as e:
data[rule_key] = [val]
# Whois.com is a bit special... Fabulous.com also seems to use this format. As do some others.
match = re.search("^\s?Name\s?[Ss]ervers:?\s*\n((?:\s*.+\n)+?\s?)\n", segment, re.MULTILINE)
if match is not None:
chunk = match.group(1)
for match in re.findall("[ ]*(.+)\n", chunk):
if match.strip() != "":
if not re.match("^[a-zA-Z]+:", match):
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# Nominet also needs some special attention
match = re.search(" Registrar:\n (.+)\n", segment)
if match is not None:
data["registrar"] = [match.group(1).strip()]
match = re.search(" Registration status:\n (.+)\n", segment)
if match is not None:
data["status"] = [match.group(1).strip()]
match = re.search(" Name servers:\n([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall(" (.+)\n", chunk):
match = match.split()[0]
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# janet (.ac.uk) is kinda like Nominet, but also kinda not
match = re.search("Registered By:\n\t(.+)\n", segment)
if match is not None:
data["registrar"] = [match.group(1).strip()]
match = re.search("Entry created:\n\t(.+)\n", segment)
if match is not None:
data["creation_date"] = [match.group(1).strip()]
match = re.search("Renewal date:\n\t(.+)\n", segment)
if match is not None:
data["expiration_date"] = [match.group(1).strip()]
match = re.search("Entry updated:\n\t(.+)\n", segment)
if match is not None:
data["updated_date"] = [match.group(1).strip()]
match = re.search("Servers:([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall("\t(.+)\n", chunk):
match = match.split()[0]
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# .am plays the same game
match = re.search(" DNS servers:([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall(" (.+)\n", chunk):
match = match.split()[0]
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# SIDN isn't very standard either. And EURid uses a similar format.
match = re.search("Registrar:\n\s+(?:Name:\s*)?(\S.*)", segment)
if match is not None:
data["registrar"].insert(0, match.group(1).strip())
match = re.search("(?:Domain nameservers|Name servers):([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall("\s+?(.+)\n", chunk):
match = match.split()[0]
# Prevent nameserver aliases from being picked up.
if not match.startswith("[") and not match.endswith("]"):
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# The .ie WHOIS server puts ambiguous status information in an unhelpful order
match = re.search('ren-status:\s*(.+)', segment)
if match is not None:
data["status"].insert(0, match.group(1).strip())
# nic.it gives us the registrar in a multi-line format...
match = re.search('Registrar\n Organization: (.+)\n', segment)
if match is not None:
data["registrar"] = [match.group(1).strip()]
# HKDNR (.hk) provides a weird nameserver format with too much whitespace
match = re.search("Name Servers Information:\n\n([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall("(.+)\n", chunk):
match = match.split()[0]
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
# ... and again for TWNIC.
match = re.search(" Domain servers in listed order:\n([\s\S]*?\n)\n", segment)
if match is not None:
chunk = match.group(1)
for match in re.findall(" (.+)\n", chunk):
match = match.split()[0]
try:
data["nameservers"].append(match.strip())
except KeyError as e:
data["nameservers"] = [match.strip()]
data["contacts"] = parse_registrants(raw_data, never_query_handles, handle_server)
# Parse dates
try:
data['expiration_date'] = remove_duplicates(data['expiration_date'])
data['expiration_date'] = parse_dates(data['expiration_date'])
except KeyError as e:
pass # Not present
try:
data['creation_date'] = remove_duplicates(data['creation_date'])
data['creation_date'] = parse_dates(data['creation_date'])
except KeyError as e:
pass # Not present
try:
data['updated_date'] = remove_duplicates(data['updated_date'])
data['updated_date'] = parse_dates(data['updated_date'])
except KeyError as e:
pass # Not present
try:
data['nameservers'] = remove_suffixes(data['nameservers'])
data['nameservers'] = remove_duplicates([ns.rstrip(".") for ns in data['nameservers']])
except KeyError as e:
pass # Not present
try:
data['emails'] = remove_duplicates(data['emails'])
except KeyError as e:
pass # Not present
try:
data['registrar'] = remove_duplicates(data['registrar'])
except KeyError as e:
pass # Not present
# Remove e-mail addresses if they are already listed for any of the contacts
known_emails = []
for contact in ("registrant", "tech", "admin", "billing"):
if data["contacts"][contact] is not None:
try:
known_emails.append(data["contacts"][contact]["email"])
except KeyError as e:
pass # No e-mail recorded for this contact...
try:
data['emails'] = [email for email in data["emails"] if email not in known_emails]
except KeyError as e:
pass # Not present
for key in list(data.keys()):
if data[key] is None or len(data[key]) == 0:
del data[key]
data["raw"] = raw_data
if normalized != []:
data = normalize_data(data, normalized)
return data
def normalize_data(data, normalized):
for key in ("nameservers", "emails", "whois_server"):
if key in data and data[key] is not None and (normalized == True or key in normalized):
if is_string(data[key]):
data[key] = data[key].lower()
else:
data[key] = [item.lower() for item in data[key]]
for key, threshold in (("registrar", 4), ("status", 3)):
if key == "registrar":
ignore_nic = True
else:
ignore_nic = False
if key in data and data[key] is not None and (normalized == True or key in normalized):
if is_string(data[key]):
data[key] = normalize_name(data[key], abbreviation_threshold=threshold, length_threshold=1, ignore_nic=ignore_nic)
else:
data[key] = [normalize_name(item, abbreviation_threshold=threshold, length_threshold=1, ignore_nic=ignore_nic) for item in data[key]]
for contact_type, contact in data['contacts'].items():
if contact is not None:
if 'country' in contact and contact['country'] in countries:
contact['country'] = countries[contact['country']]
if 'city' in contact and contact['city'] in airports:
contact['city'] = airports[contact['city']]
if 'country' in contact and 'state' in contact:
for country, source in (("united states", states_us), ("australia", states_au), ("canada", states_ca)):
if country in contact["country"].lower() and contact["state"] in source:
contact["state"] = source[contact["state"]]
for key in ("email",):
if key in contact and contact[key] is not None and (normalized == True or key in normalized):
if is_string(contact[key]):
contact[key] = contact[key].lower()
else:
contact[key] = [item.lower() for item in contact[key]]
for key in ("name", "street"):
if key in contact and contact[key] is not None and (normalized == True or key in normalized):
contact[key] = normalize_name(contact[key], abbreviation_threshold=3)
for key in ("city", "organization", "state", "country"):
if key in contact and contact[key] is not None and (normalized == True or key in normalized):
contact[key] = normalize_name(contact[key], abbreviation_threshold=3, length_threshold=3)
if "name" in contact and "organization" not in contact:
lines = [x.strip() for x in contact["name"].splitlines()]
new_lines = []
for i, line in enumerate(lines):
for regex in organization_regexes:
if re.search(regex, line):
new_lines.append(line)
del lines[i]
break
if len(lines) > 0:
contact["name"] = "\n".join(lines)
else:
del contact["name"]
if len(new_lines) > 0:
contact["organization"] = "\n".join(new_lines)
if "street" in contact and "organization" not in contact:
lines = [x.strip() for x in contact["street"].splitlines()]
if len(lines) > 1:
for regex in organization_regexes:
if re.search(regex, lines[0]):
contact["organization"] = lines[0]
contact["street"] = "\n".join(lines[1:])
break
for key in list(contact.keys()):
try:
contact[key] = contact[key].strip(", ")
if contact[key] == "-" or contact[key].lower() == "n/a":
del contact[key]
except AttributeError as e:
pass # Not a string
return data
def normalize_name(value, abbreviation_threshold=4, length_threshold=8, lowercase_domains=True, ignore_nic=False):
normalized_lines = []
for line in value.split("\n"):
line = line.strip(",") # Get rid of useless comma's
if (line.isupper() or line.islower()) and len(line) >= length_threshold:
# This line is likely not capitalized properly
if ignore_nic == True and "nic" in line.lower():
# This is a registrar name containing 'NIC' - it should probably be all-uppercase.
line = line.upper()
else:
words = line.split()
normalized_words = []
if len(words) >= 1:
# First word
if len(words[0]) >= abbreviation_threshold and "." not in words[0]:
normalized_words.append(words[0].capitalize())
elif lowercase_domains and "." in words[0] and not words[0].endswith(".") and not words[0].startswith("."):
normalized_words.append(words[0].lower())
else:
# Probably an abbreviation or domain, leave it alone
normalized_words.append(words[0])
if len(words) >= 3:
# Words between the first and last
for word in words[1:-1]:
if len(word) >= abbreviation_threshold and "." not in word:
normalized_words.append(word.capitalize())
elif lowercase_domains and "." in word and not word.endswith(".") and not word.startswith("."):
normalized_words.append(word.lower())
else:
# Probably an abbreviation or domain, leave it alone
normalized_words.append(word)
if len(words) >= 2:
# Last word
if len(words[-1]) >= abbreviation_threshold and "." not in words[-1]:
normalized_words.append(words[-1].capitalize())
elif lowercase_domains and "." in words[-1] and not words[-1].endswith(".") and not words[-1].startswith("."):
normalized_words.append(words[-1].lower())
else:
# Probably an abbreviation or domain, leave it alone
normalized_words.append(words[-1])
line = " ".join(normalized_words)
normalized_lines.append(line)
return "\n".join(normalized_lines)
def parse_dates(dates):
global grammar
parsed_dates = []
for date in dates:
for rule in grammar['_dateformats']:
result = re.match(rule, date)
if result is not None:
try:
# These are always numeric. If they fail, there is no valid date present.
year = int(result.group("year"))
day = int(result.group("day"))
# Detect and correct shorthand year notation
if year < 60:
year += 2000
elif year < 100:
year += 1900
# This will require some more guesswork - some WHOIS servers present the name of the month
try:
month = int(result.group("month"))
except ValueError as e:
# Apparently not a number. Look up the corresponding number.
try:
month = grammar['_months'][result.group("month").lower()]
except KeyError as e:
# Unknown month name, default to 0
month = 0
try:
hour = int(result.group("hour"))
except IndexError as e:
hour = 0
except TypeError as e:
hour = 0
try:
minute = int(result.group("minute"))
except IndexError as e:
minute = 0
except TypeError as e:
minute = 0
try:
second = int(result.group("second"))
except IndexError as e:
second = 0
except TypeError as e:
second = 0
break
except ValueError as e:
# Something went horribly wrong, maybe there is no valid date present?
year = 0
month = 0
day = 0
hour = 0
minute = 0
second = 0
print(e.message) # FIXME: This should have proper logging of some sort...?
try:
if year > 0:
try:
parsed_dates.append(datetime.datetime(year, month, day, hour, minute, second))
except ValueError as e:
# We might have gotten the day and month the wrong way around, let's try it the other way around
# If you're not using an ISO-standard date format, you're an evil registrar!
parsed_dates.append(datetime.datetime(year, day, month, hour, minute, second))
except UnboundLocalError as e:
pass
if len(parsed_dates) > 0:
return parsed_dates
else:
return None
def remove_duplicates(data):
cleaned_list = []
for entry in data:
if entry not in cleaned_list:
cleaned_list.append(entry)
return cleaned_list
def remove_suffixes(data):
# Removes everything before and after the first non-whitespace continuous string.
# Used to get rid of IP suffixes for nameservers.
cleaned_list = []
for entry in data:
cleaned_list.append(re.search("([^\s]+)\s*[\s]*", entry).group(1).lstrip())
return cleaned_list
def parse_registrants(data, never_query_handles=True, handle_server=""):
registrant = None
tech_contact = None
billing_contact = None
admin_contact = None
for segment in data:
for regex in registrant_regexes:
match = re.search(regex, segment)
if match is not None:
registrant = match.groupdict()
break
for segment in data:
for regex in tech_contact_regexes:
match = re.search(regex, segment)
if match is not None:
tech_contact = match.groupdict()
break
for segment in data:
for regex in admin_contact_regexes:
match = re.search(regex, segment)
if match is not None:
admin_contact = match.groupdict()
break
for segment in data:
for regex in billing_contact_regexes:
match = re.search(regex, segment)
if match is not None:
billing_contact = match.groupdict()
break
# Find NIC handle contact definitions
handle_contacts = parse_nic_contact(data)
# Find NIC handle references and process them
missing_handle_contacts = []
for category in nic_contact_references:
for regex in nic_contact_references[category]:
for segment in data:
match = re.search(regex, segment)
if match is not None:
data_reference = match.groupdict()
if data_reference["handle"] == "-" or re.match("https?:\/\/", data_reference["handle"]) is not None:
pass # Reference was either blank or a URL; the latter is to deal with false positives for nic.ru
else:
found = False
for contact in handle_contacts:
if contact["handle"] == data_reference["handle"]:
found = True
data_reference.update(contact)
if found == False:
# The contact definition was not found in the supplied raw WHOIS data. If the
# method has been called with never_query_handles=False, we can use the supplied
# WHOIS server for looking up the handle information separately.
if never_query_handles == False:
try:
contact = fetch_nic_contact(data_reference["handle"], handle_server)
data_reference.update(contact)
except shared.WhoisException as e:
pass # No data found. TODO: Log error?
else:
pass # TODO: Log warning?
if category == "registrant":
registrant = data_reference
elif category == "tech":
tech_contact = data_reference
elif category == "billing":
billing_contact = data_reference
elif category == "admin":
admin_contact = data_reference
break
# Post-processing
for obj in (registrant, tech_contact, billing_contact, admin_contact):
if obj is not None:
for key in list(obj.keys()):
if obj[key] is None or obj[key].strip() == "": # Just chomp all surrounding whitespace
del obj[key]
else:
obj[key] = obj[key].strip()
if "phone_ext" in obj:
if "phone" in obj:
obj["phone"] += " ext. %s" % obj["phone_ext"]
del obj["phone_ext"]
if "street1" in obj:
street_items = []
i = 1
while True:
try:
street_items.append(obj["street%d" % i])
del obj["street%d" % i]
except KeyError as e:
break
i += 1
obj["street"] = "\n".join(street_items)
if "organization1" in obj: # This is to deal with eg. HKDNR, who allow organization names in multiple languages.
organization_items = []
i = 1
while True:
try:
if obj["organization%d" % i].strip() != "":
organization_items.append(obj["organization%d" % i])
del obj["organization%d" % i]
except KeyError as e:
break
i += 1
obj["organization"] = "\n".join(organization_items)
if 'changedate' in obj:
obj['changedate'] = parse_dates([obj['changedate']])[0]
if 'creationdate' in obj:
obj['creationdate'] = parse_dates([obj['creationdate']])[0]
if 'street' in obj and "\n" in obj["street"] and 'postalcode' not in obj:
# Deal with certain mad WHOIS servers that don't properly delimit address data... (yes, AFNIC, looking at you)
lines = [x.strip() for x in obj["street"].splitlines()]
if " " in lines[-1]:
postal_code, city = lines[-1].split(" ", 1)
if "." not in lines[-1] and re.match("[0-9]", postal_code) and len(postal_code) >= 3:
obj["postalcode"] = postal_code
obj["city"] = city
obj["street"] = "\n".join(lines[:-1])
if 'firstname' in obj or 'lastname' in obj:
elements = []
if 'firstname' in obj:
elements.append(obj["firstname"])
if 'lastname' in obj:
elements.append(obj["lastname"])
obj["name"] = " ".join(elements)
if 'country' in obj and 'city' in obj and (re.match("^R\.?O\.?C\.?$", obj["country"], re.IGNORECASE) or obj["country"].lower() == "republic of china") and obj["city"].lower() == "taiwan":
# There's an edge case where some registrants append ", Republic of China" after "Taiwan", and this is mis-parsed
# as Taiwan being the city. This is meant to correct that.
obj["country"] = "%s, %s" % (obj["city"], obj["country"])
lines = [x.strip() for x in obj["street"].splitlines()]
obj["city"] = lines[-1]
obj["street"] = "\n".join(lines[:-1])
return {
"registrant": registrant,
"tech": tech_contact,
"admin": admin_contact,
"billing": billing_contact,
}
def fetch_nic_contact(handle, lookup_server):
response = net.get_whois_raw(handle, lookup_server)
response = [segment.replace("\r", "") for segment in response] # Carriage returns are the devil
results = parse_nic_contact(response)
if len(results) > 0:
return results[0]
else:
raise shared.WhoisException("No contact data found in the response.")
def parse_nic_contact(data):
handle_contacts = []
for regex in nic_contact_regexes:
for segment in data:
matches = re.finditer(regex, segment)
if "{}".format(type(matches)) == "<type 'callable-iterator'>":
return handle_contacts
for match in matches:
handle_contacts.append(match.groupdict())
return handle_contacts
| 74.00497
| 769
| 0.581029
|
4a168771885f338dcf56e6ebbc1cd64eb0047c1d
| 16,030
|
py
|
Python
|
models/heading_markup.py
|
asvbkr/ttb_client_py
|
84bf655c797d301c597d00cf7947495874d936b2
|
[
"Apache-2.0"
] | 9
|
2019-08-17T15:04:09.000Z
|
2020-07-13T09:56:36.000Z
|
models/heading_markup.py
|
asvbkr/ttb_client_py
|
84bf655c797d301c597d00cf7947495874d936b2
|
[
"Apache-2.0"
] | null | null | null |
models/heading_markup.py
|
asvbkr/ttb_client_py
|
84bf655c797d301c597d00cf7947495874d936b2
|
[
"Apache-2.0"
] | 2
|
2019-07-13T05:41:12.000Z
|
2020-04-20T21:45:46.000Z
|
# coding: utf-8
# noinspection SpellCheckingInspection
"""
TamTam Bot API
# About Bot API allows bots to interact with TamTam. Methods are called by sending HTTPS requests to [botapi.tamtam.chat](https://botapi.tamtam.chat) domain. Bots are third-party applications that use TamTam features. A bot can legitimately take part in a conversation. It can be achieved through HTTP requests to the TamTam Bot API. ## Features TamTam bots of the current version are able to: - Communicate with users and respond to requests - Recommend users complete actions via programmed buttons - Request personal data from users (name, short reference, phone number) We'll keep working on expanding bot capabilities in the future. ## Examples Bots can be used for the following purposes: - Providing support, answering frequently asked questions - Sending typical information - Voting - Likes/dislikes - Following external links - Forwarding a user to a chat/channel ## @PrimeBot [PrimeBot](https://tt.me/primebot) is the main bot in TamTam, all bots creator. Use PrimeBot to create and edit your bots. Feel free to contact us for any questions, [@support](https://tt.me/support) or [team@tamtam.chat](mailto:team@tamtam.chat). ## HTTP verbs `GET` — getting resources, parameters are transmitted via URL `POST` — creation of resources (for example, sending new messages) `PUT` — editing resources `DELETE` — deleting resources `PATCH` — patching resources ## HTTP response codes `200` — successful operation `400` — invalid request `401` — authentication error `404` — resource not found `405` — method is not allowed `429` — the number of requests is exceeded `503` — service unavailable ## Resources format For content requests (PUT and POST) and responses, the API uses the JSON format. All strings are UTF-8 encoded. Date/time fields are represented as the number of milliseconds that have elapsed since 00:00 January 1, 1970 in the long format. To get it, you can simply multiply the UNIX timestamp by 1000. All date/time fields have a UTC timezone. ## Error responses In case of an error, the API returns a response with the corresponding HTTP code and JSON with the following fields: `code` - the string with the error key `message` - a string describing the error </br> For example: ```bash > http https://botapi.tamtam.chat/chats?access_token={EXAMPLE_TOKEN} HTTP / 1.1 403 Forbidden Cache-Control: no-cache Connection: Keep-Alive Content-Length: 57 Content-Type: application / json; charset = utf-8 Set-Cookie: web_ui_lang = ru; Path = /; Domain = .tamtam.chat; Expires = 2019-03-24T11: 45: 36.500Z { \"code\": \"verify.token\", \"message\": \"Invalid access_token\" } ``` ## Receiving notifications TamTam Bot API supports 2 options of receiving notifications on new events for bots: - Push notifications via WebHook. To receive data via WebHook, you'll have to [add subscription](https://dev.tamtam.chat/#operation/subscribe); - Notifications upon request via [long polling](#operation/getUpdates) API. All data can be received via long polling **by default** after creating the bot. Both methods **cannot** be used simultaneously. Refer to the response schema of [/updates](https://dev.tamtam.chat/#operation/getUpdates) method to check all available types of updates. ### Webhook There is some notes about how we handle webhook subscription: 1. Sometimes webhook notification cannot be delivered in case when bot server or network is down. In such case we well retry delivery in a short period of time (from 30 to 60 seconds) and will do this until get `200 OK` status code from your server, but not longer than **8 hours** (*may change over time*) since update happened. We also consider any non `200`-response from server as failed delivery. 2. To protect your bot from unexpected high load we send **no more than 100** notifications per second by default. If you want increase this limit, contact us at [@support](https://tt.me/support). It should be from one of the following subnets: ``` 185.16.150.0/30 185.16.150.84/30 185.16.150.152/30 185.16.150.192/30 ``` ## Message buttons You can program buttons for users answering a bot. TamTam supports the following types of buttons: `callback` — sends a notification with payload to a bot (via WebHook or long polling) `link` — makes a user to follow a link `request_contact` — requests the user permission to access contact information (phone number, short link, email) `request_geo_location` — asks user to provide current geo location `chat` — creates chat associated with message To start create buttons [send message](#operation/sendMessage) with `InlineKeyboardAttachment`: ```json { \"text\": \"It is message with inline keyboard\", \"attachments\": [ { \"type\": \"inline_keyboard\", \"payload\": { \"buttons\": [ [ { \"type\": \"callback\", \"text\": \"Press me!\", \"payload\": \"button1 pressed\" } ], [ { \"type\": \"chat\", \"text\": \"Discuss\", \"chat_title\": \"Message discussion\" } ] ] } } ] } ``` ### Chat button Chat button is a button that starts chat assosiated with the current message. It will be **private** chat with a link, bot will be added as administrator by default. Chat will be created as soon as the first user taps on button. Bot will receive `message_chat_created` update. Bot can set title and description of new chat by setting `chat_title` and `chat_description` properties. Whereas keyboard can contain several `chat`-buttons there is `uuid` property to distinct them between each other. In case you do not pass `uuid` we will generate it. If you edit message, pass `uuid` so we know that this button starts the same chat as before. Chat button also can contain `start_payload` that will be sent to bot as part of `message_chat_created` update. ## Deep linking TamTam supports deep linking mechanism for bots. It allows passing additional payload to the bot on startup. Deep link can contain any data encoded into string up to **128** characters long. Longer strings will be omitted and **not** passed to the bot. Each bot has start link that looks like: ``` https://tt.me/%BOT_USERNAME%/start/%PAYLOAD% ``` As soon as user clicks on such link we open dialog with bot and send this payload to bot as part of `bot_started` update: ```json { \"update_type\": \"bot_started\", \"timestamp\": 1573226679188, \"chat_id\": 1234567890, \"user\": { \"user_id\": 1234567890, \"name\": \"Boris\", \"username\": \"borisd84\" }, \"payload\": \"any data meaningful to bot\" } ``` Deep linking mechanism is supported for iOS version 2.7.0 and Android 2.9.0 and higher. ## Constructors Constructor is a bot that can create a message for user: add buttons, attach some media, insert text. You can enable constructor mode for your bot via [@PrimeBot](https://tt.me/primebot) sending [/constructor_mode](https://tt.me/primebot/start/constructor_mode) command. For bot developers, it looks like request-response interaction where TamTam application sends `message_construction_request` on behalf of user. Bot [responds](#operation/construct) to it with `messages` ready to go or `keyboard` in case it requires further action from user. Bot also can set up UI parts such as `hint` or `placeholder`, allow or not user's input:  As soon as user finishes composing a message, they can post it. Bot will receive `message_constructed_update` with posted message. Constructors are supported for iOS version 2.7.0 and Android 2.9.0 and higher. ## Text formatting Message text can be improved with basic formatting such as: **strong**, *emphasis*, ~strikethough~, <ins>underline</ins>, `code` or link. You can use either markdown-like or HTML formatting. To enable text formatting set the `format` property of [NewMessageBody](#tag/new_message_model). ### TamTam flavored Markdown To enable [Markdown](https://spec.commonmark.org/0.29/) parsing, set the `format` property of [NewMessageBody](#tag/new_message_model) to `markdown`. We currently support only the following syntax: `*empasized*` or `_empasized_` for *italic* text `**strong**` or `__strong__` for __bold__ text `~~strikethough~~` for ~strikethough~ text `++underline++` for <ins>underlined</ins> text ``` `code` ``` or ` ```code``` ` for `monospaced` text `^^important^^` for highlighted text (colored in red, by default) `[Inline URL](https://dev.tamtam.chat/)` for inline URLs `[User mention](tamtam://user/%user_id%)` for user mentions without username ### HTML support To enable HTML parsing, set the `format` property of [NewMessageBody](#tag/new_message_model) to `html`. Only the following HTML tags are supported. All others will be stripped: Emphasized: `<i>` or `<em>` Strong: `<b>` or `<strong>` Strikethrough: `<del>` or `<s>` Underlined: `<ins>` or `<u>` Link: `<a href=\"https://dev.tamtam.chat\">Docs</a>` Monospaced text: `<pre>` or `<code>` Highlighted text: `<mark>` Text formatting is supported for iOS since version 3.1 and Android since 2.20.0. # Versioning API models and interface may change over time. To make sure your bot will get the right info, we strongly recommend adding API version number to each request. You can add it as `v` parameter to each HTTP-request. For instance, `v=0.1.2`. To specify the data model version you are getting through WebHook subscription, use the `version` property in the request body of the [subscribe](https://dev.tamtam.chat/#operation/subscribe) request. # Libraries We have developed the official [Java client](https://github.com/tamtam-chat/tamtam-bot-api) and [SDK](https://github.com/tamtam-chat/tamtam-bot-sdk). Also check out unofficial libraries, created by our enthusiasts: - [Kotlin DSL client](https://github.com/Namazed/TamTamBotApiClientDsl) - [GO client](https://github.com/neonxp/tamtam) - [Node.js module](https://github.com/vershininivan/node-tamtam-botapi) #### Python: - [Python client](https://github.com/asvbkr/openapi_client) - [tamtam.py](https://github.com/uwinx/tamtam.py) - [registriren/botapitamtam](https://github.com/registriren/botapitamtam) # Changelog ##### Version 0.3.0 - Added methods to [pin](#operation/pinMessage)/[unpin](#operation/unpinMessage) messages in chats/channels - Added `is_bot` flag to [`User`](#tag/user_model) model Check out the complete [diff](https://github.com/tamtam-chat/tamtam-bot-api-schema/compare/v0.2.1..v0.3.0) for this release. ##### Version 0.2.1 - [Added](#operation/getChatByLink) method to get chat by its `@link` - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/compare/v0.2.0..HEAD#diff-7e9de78f42fb0d2ae80878b90c87300aR1240) `description` for users in some cases - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/compare/v0.2.0..HEAD#diff-7e9de78f42fb0d2ae80878b90c87300aR2555) `user_locale` to `message_created` update in dialogs Check out the complete [diff](https://github.com/tamtam-chat/tamtam-bot-api-schema/compare/v0.2.0..v0.2.1) for this release. ##### Version 0.2.0 - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/09c95259d6c8c424f82b50eab93872e7db2ca208) new type of button to start new chat - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/ea4581d83d7132663d6cc5c2c61c058a2bd46aac) Constructors API that allows bots to create message on behalf of a user - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/c5ff03175407819aceebd9c25de49eed566a0ce1) support for deep-links - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/ff4cc4f93662d6c25db11fac72d9fcbf1f66cad8) ability to block users in chats - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/b965bfb0d02933e819435312e6ab184a3dfc0250) `chat_id` and `user_id` to `message_removed` update - Added meta information for video attachments - Other minor improvements and fixes. Check out complete [diff](https://github.com/tamtam-chat/tamtam-bot-api-schema/compare/v0.1.11...v0.1.10) for this version ##### Version 0.1.10 - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/a9ef3a1b8f4e1a75b55a9b80877eddc2c6f07ec4) `disable_link_preview` parameter to POST:/messages method to disable links parsing in text - [Added](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/eb99e8ab97b55fa196d9957fca34d2316a4ca8aa) `sending_file` action - [Removed](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/7a5ab5f0ea1336b3460d1827a6a7b3b141e19776) several deprecated properties - `photo` upload type [renamed](https://github.com/tamtam-chat/tamtam-bot-api-schema/commit/74505883e6acb306686a6d141414aeaf5131ef49) to `image`. *C* is for consistency To see changelog for older versions visit our [GitHub](https://github.com/tamtam-chat/tamtam-bot-api-schema/releases). # noqa: E501
OpenAPI spec version: 0.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from .markup_element import MarkupElement
from .text_format import TextFormat
# noinspection PyShadowingBuiltins
class HeadingMarkup(MarkupElement):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'_from': 'int',
'length': 'int',
}
attribute_map = {
'type': 'type',
'_from': 'from',
'length': 'length',
}
def __init__(self, type="heading", _from=None, length=None): # noqa: E501
"""HeadingMarkup - a model defined in OpenAPI""" # noqa: E501
super(HeadingMarkup, self).__init__(type, _from, length)
self.discriminator = None
def markup_apply(self, text, format):
# type: (str, TextFormat) -> str
if format == TextFormat.HTML:
return f'<h1>{text}</h1>'
else:
return f'# {text}'
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeadingMarkup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __hash__(self):
return super().__hash__()
| 149.813084
| 13,145
| 0.706987
|
4a16879a6f61289396bea23c9f7b7df403dff233
| 23,869
|
py
|
Python
|
staff_manage_sdk/model/monitor/alert_rule_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
staff_manage_sdk/model/monitor/alert_rule_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
staff_manage_sdk/model/monitor/alert_rule_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alert_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from staff_manage_sdk.model.monitor import alert_dims_pb2 as staff__manage__sdk_dot_model_dot_monitor_dot_alert__dims__pb2
from staff_manage_sdk.model.monitor import alert_conditions_pb2 as staff__manage__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alert_rule.proto',
package='monitor',
syntax='proto3',
serialized_options=_b('ZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitor'),
serialized_pb=_b('\n\x10\x61lert_rule.proto\x12\x07monitor\x1a/staff_manage_sdk/model/monitor/alert_dims.proto\x1a\x35staff_manage_sdk/model/monitor/alert_conditions.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xc2\x08\n\tAlertRule\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\n\n\x02id\x18\x02 \x01(\t\x12\x11\n\trule_name\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\x05\x12\x12\n\nversion_id\x18\x05 \x01(\t\x12&\n\nalert_dims\x18\x06 \x03(\x0b\x32\x12.monitor.AlertDims\x12\x15\n\rrule_priority\x18\x07 \x01(\x05\x12\x32\n\x10\x61lert_conditions\x18\x08 \x01(\x0b\x32\x18.monitor.AlertConditions\x12\x15\n\rdetect_window\x18\t \x01(\x05\x12\x13\n\x0b\x61lert_count\x18\n \x01(\x05\x12\x16\n\x0e\x61lert_interval\x18\x0b \x01(\x05\x12\x15\n\rrecover_count\x18\x0c \x01(\x05\x12+\n\x07\x61\x63tions\x18\r \x03(\x0b\x32\x1a.monitor.AlertRule.Actions\x12/\n\ttemplates\x18\x0e \x01(\x0b\x32\x1c.monitor.AlertRule.Templates\x12\x0f\n\x07\x63reator\x18\x0f \x01(\t\x12\r\n\x05\x63time\x18\x10 \x01(\x05\x12\r\n\x05mtime\x18\x11 \x01(\x05\x12/\n\tinstances\x18\x12 \x01(\x0b\x32\x1c.monitor.AlertRule.Instances\x12\x10\n\x08objectId\x18\x13 \x01(\t\x12\x10\n\x08\x64isabled\x18\x14 \x01(\x08\x12\x0e\n\x06source\x18\x15 \x01(\t\x1a\xe8\x02\n\x07\x41\x63tions\x12\x37\n\tcondition\x18\x01 \x01(\x0b\x32$.monitor.AlertRule.Actions.Condition\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0f\n\x07upgrade\x18\x03 \x01(\x08\x12\x0f\n\x07methods\x18\x04 \x03(\t\x12\x11\n\treceivers\x18\x05 \x03(\t\x12\x1c\n\x14receiver_user_groups\x18\x06 \x03(\t\x12\x42\n\x0freceiver_owners\x18\x07 \x03(\x0b\x32).monitor.AlertRule.Actions.ReceiverOwners\x1a/\n\tCondition\x12\x13\n\x0blasting_for\x18\x01 \x01(\x05\x12\r\n\x05level\x18\x02 \x01(\x05\x1aN\n\x0eReceiverOwners\x12\x11\n\tobject_id\x18\x01 \x01(\t\x12\x16\n\x0eobject_attr_id\x18\x02 \x01(\t\x12\x11\n\ttranslate\x18\x03 \x01(\t\x1a\x61\n\tTemplates\x12\x18\n\x10\x63ontent_template\x18\x01 \x01(\t\x12\x17\n\x0ftarget_template\x18\x02 \x01(\t\x12!\n\x19recovery_content_template\x18\x03 \x01(\t\x1aV\n\tInstances\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x13\n\x0binstanceIds\x18\x02 \x03(\t\x12&\n\x05query\x18\x03 \x01(\x0b\x32\x17.google.protobuf.StructBCZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitorb\x06proto3')
,
dependencies=[staff__manage__sdk_dot_model_dot_monitor_dot_alert__dims__pb2.DESCRIPTOR,staff__manage__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_ALERTRULE_ACTIONS_CONDITION = _descriptor.Descriptor(
name='Condition',
full_name='monitor.AlertRule.Actions.Condition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lasting_for', full_name='monitor.AlertRule.Actions.Condition.lasting_for', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='monitor.AlertRule.Actions.Condition.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=987,
)
_ALERTRULE_ACTIONS_RECEIVEROWNERS = _descriptor.Descriptor(
name='ReceiverOwners',
full_name='monitor.AlertRule.Actions.ReceiverOwners',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='object_id', full_name='monitor.AlertRule.Actions.ReceiverOwners.object_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_attr_id', full_name='monitor.AlertRule.Actions.ReceiverOwners.object_attr_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate', full_name='monitor.AlertRule.Actions.ReceiverOwners.translate', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=989,
serialized_end=1067,
)
_ALERTRULE_ACTIONS = _descriptor.Descriptor(
name='Actions',
full_name='monitor.AlertRule.Actions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='condition', full_name='monitor.AlertRule.Actions.condition', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='monitor.AlertRule.Actions.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upgrade', full_name='monitor.AlertRule.Actions.upgrade', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='methods', full_name='monitor.AlertRule.Actions.methods', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receivers', full_name='monitor.AlertRule.Actions.receivers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_user_groups', full_name='monitor.AlertRule.Actions.receiver_user_groups', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_owners', full_name='monitor.AlertRule.Actions.receiver_owners', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTRULE_ACTIONS_CONDITION, _ALERTRULE_ACTIONS_RECEIVEROWNERS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=707,
serialized_end=1067,
)
_ALERTRULE_TEMPLATES = _descriptor.Descriptor(
name='Templates',
full_name='monitor.AlertRule.Templates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content_template', full_name='monitor.AlertRule.Templates.content_template', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_template', full_name='monitor.AlertRule.Templates.target_template', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recovery_content_template', full_name='monitor.AlertRule.Templates.recovery_content_template', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1069,
serialized_end=1166,
)
_ALERTRULE_INSTANCES = _descriptor.Descriptor(
name='Instances',
full_name='monitor.AlertRule.Instances',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='monitor.AlertRule.Instances.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceIds', full_name='monitor.AlertRule.Instances.instanceIds', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='monitor.AlertRule.Instances.query', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1168,
serialized_end=1254,
)
_ALERTRULE = _descriptor.Descriptor(
name='AlertRule',
full_name='monitor.AlertRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='monitor.AlertRule.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='monitor.AlertRule.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rule_name', full_name='monitor.AlertRule.rule_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='monitor.AlertRule.version', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version_id', full_name='monitor.AlertRule.version_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_dims', full_name='monitor.AlertRule.alert_dims', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rule_priority', full_name='monitor.AlertRule.rule_priority', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_conditions', full_name='monitor.AlertRule.alert_conditions', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detect_window', full_name='monitor.AlertRule.detect_window', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_count', full_name='monitor.AlertRule.alert_count', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_interval', full_name='monitor.AlertRule.alert_interval', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recover_count', full_name='monitor.AlertRule.recover_count', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actions', full_name='monitor.AlertRule.actions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templates', full_name='monitor.AlertRule.templates', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='monitor.AlertRule.creator', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='monitor.AlertRule.ctime', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='monitor.AlertRule.mtime', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instances', full_name='monitor.AlertRule.instances', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='monitor.AlertRule.objectId', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disabled', full_name='monitor.AlertRule.disabled', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='monitor.AlertRule.source', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTRULE_ACTIONS, _ALERTRULE_TEMPLATES, _ALERTRULE_INSTANCES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=1254,
)
_ALERTRULE_ACTIONS_CONDITION.containing_type = _ALERTRULE_ACTIONS
_ALERTRULE_ACTIONS_RECEIVEROWNERS.containing_type = _ALERTRULE_ACTIONS
_ALERTRULE_ACTIONS.fields_by_name['condition'].message_type = _ALERTRULE_ACTIONS_CONDITION
_ALERTRULE_ACTIONS.fields_by_name['receiver_owners'].message_type = _ALERTRULE_ACTIONS_RECEIVEROWNERS
_ALERTRULE_ACTIONS.containing_type = _ALERTRULE
_ALERTRULE_TEMPLATES.containing_type = _ALERTRULE
_ALERTRULE_INSTANCES.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_ALERTRULE_INSTANCES.containing_type = _ALERTRULE
_ALERTRULE.fields_by_name['alert_dims'].message_type = staff__manage__sdk_dot_model_dot_monitor_dot_alert__dims__pb2._ALERTDIMS
_ALERTRULE.fields_by_name['alert_conditions'].message_type = staff__manage__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2._ALERTCONDITIONS
_ALERTRULE.fields_by_name['actions'].message_type = _ALERTRULE_ACTIONS
_ALERTRULE.fields_by_name['templates'].message_type = _ALERTRULE_TEMPLATES
_ALERTRULE.fields_by_name['instances'].message_type = _ALERTRULE_INSTANCES
DESCRIPTOR.message_types_by_name['AlertRule'] = _ALERTRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AlertRule = _reflection.GeneratedProtocolMessageType('AlertRule', (_message.Message,), {
'Actions' : _reflection.GeneratedProtocolMessageType('Actions', (_message.Message,), {
'Condition' : _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_ACTIONS_CONDITION,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions.Condition)
})
,
'ReceiverOwners' : _reflection.GeneratedProtocolMessageType('ReceiverOwners', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_ACTIONS_RECEIVEROWNERS,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions.ReceiverOwners)
})
,
'DESCRIPTOR' : _ALERTRULE_ACTIONS,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Actions)
})
,
'Templates' : _reflection.GeneratedProtocolMessageType('Templates', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_TEMPLATES,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Templates)
})
,
'Instances' : _reflection.GeneratedProtocolMessageType('Instances', (_message.Message,), {
'DESCRIPTOR' : _ALERTRULE_INSTANCES,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule.Instances)
})
,
'DESCRIPTOR' : _ALERTRULE,
'__module__' : 'alert_rule_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertRule)
})
_sym_db.RegisterMessage(AlertRule)
_sym_db.RegisterMessage(AlertRule.Actions)
_sym_db.RegisterMessage(AlertRule.Actions.Condition)
_sym_db.RegisterMessage(AlertRule.Actions.ReceiverOwners)
_sym_db.RegisterMessage(AlertRule.Templates)
_sym_db.RegisterMessage(AlertRule.Instances)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.801961
| 2,272
| 0.747245
|
4a1687e9af702bc0b8b21d589266629b67053f1c
| 827
|
py
|
Python
|
scripts/report.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/report.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/report.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de>
# SPDX-License-Identifier: BSD-2-Clause
import os
import os.path
import mako.template
script_path = os.path.dirname(os.path.realpath(__file__))
mako_report = mako.template.Template(filename=os.path.join(script_path,"report.md"), input_encoding="utf-8")
def create_report(test, name, log, wraplogs, testtype, output_prefix):
mdfile = os.path.join(output_prefix + "_report.md")
with open(mdfile, "w") as freport:
freport.write(mako_report.render(testname=name, log=log, wraplogs=wraplogs, testtype=testtype, test=test))
htmlfile = os.path.join(output_prefix + "_report.html")
#pandoc -s -f markdown -t html -o sim_report.html sim_report.md
os.system("pandoc -s -f markdown -t html -o "+htmlfile+" "+mdfile)
| 35.956522
| 108
| 0.748489
|
4a1688976e9e74026b05708729bc151474ae8eda
| 472
|
py
|
Python
|
plotly/validators/layout/scene/zaxis/_gridwidth.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/layout/scene/zaxis/_gridwidth.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/layout/scene/zaxis/_gridwidth.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='gridwidth',
parent_name='layout.scene.zaxis',
**kwargs
):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
min=0,
role='style',
**kwargs
)
| 23.6
| 71
| 0.586864
|
4a1688afea12786c1e6c458b1f989e91b946db05
| 392
|
py
|
Python
|
android/image/models.py
|
gwhong917/A_C_C
|
1cd75a2e439f86bcbf1e6d9e1ac454f7dd276540
|
[
"MIT"
] | null | null | null |
android/image/models.py
|
gwhong917/A_C_C
|
1cd75a2e439f86bcbf1e6d9e1ac454f7dd276540
|
[
"MIT"
] | null | null | null |
android/image/models.py
|
gwhong917/A_C_C
|
1cd75a2e439f86bcbf1e6d9e1ac454f7dd276540
|
[
"MIT"
] | null | null | null |
from django.db import models
# plt_num = plate_detection(
class PicPost(models.Model):
model_pic = models.ImageField(upload_to='image/%Y/%m/%d', max_length=1000)
time = models.DateTimeField(auto_now_add=True)
userid = models.IntegerField();
lat = models.FloatField(null=True)
lng = models.FloatField(null=True)
plate_num = models.CharField(null=True, max_length=200)
| 35.636364
| 78
| 0.732143
|
4a16894b9f589d39b1f69b4b565ebced08c34575
| 168
|
py
|
Python
|
docs/ETC/UnusedModes/S - Grid Squares/info.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | 1
|
2021-11-17T15:00:14.000Z
|
2021-11-17T15:00:14.000Z
|
docs/ETC/UnusedModes/S - Grid Squares/info.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | null | null | null |
docs/ETC/UnusedModes/S - Grid Squares/info.py
|
kbsezginel/etcviz
|
3a10e1fbfe0e2b032e87b20a58386c412f59ff28
|
[
"BSD-3-Clause"
] | null | null | null |
name = "S - Grid Squares"
description = "Grid of oscillating squares"
knob1 = "X Offset"
knob2 = "Y Offset"
knob3 = "Size"
knob4 = "Color"
released = "March 21 2017"
| 21
| 43
| 0.672619
|
4a168a45e998dee023459944c11764027f516d78
| 19,013
|
py
|
Python
|
ucscsdk/mometa/ether/EtherServerIntFIo.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | null | null | null |
ucscsdk/mometa/ether/EtherServerIntFIo.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | null | null | null |
ucscsdk/mometa/ether/EtherServerIntFIo.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for EtherServerIntFIo ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class EtherServerIntFIoConsts():
ADMIN_SPEED_10GBPS = "10gbps"
ADMIN_SPEED_1GBPS = "1gbps"
ADMIN_SPEED_20GBPS = "20gbps"
ADMIN_SPEED_40GBPS = "40gbps"
ADMIN_SPEED_INDETERMINATE = "indeterminate"
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
CHASSIS_ID_N_A = "N/A"
ENCAP_DOT1Q = "dot1q"
ENCAP_ISL = "isl"
ENCAP_NEGOTIATE = "negotiate"
ENCAP_PROPRIETARY = "proprietary"
ENCAP_UNKNOWN = "unknown"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
MODE_E = "E"
MODE_F = "F"
MODE_SD = "SD"
MODE_ACCESS = "access"
MODE_FABRIC = "fabric"
MODE_N_PROXY = "n_proxy"
MODE_PROMISCUOUS_ACCESS = "promiscuousAccess"
MODE_PROMISCUOUS_TRUNK = "promiscuousTrunk"
MODE_TRUNK = "trunk"
MODE_UNKNOWN = "unknown"
MODE_VNTAG = "vntag"
OPER_STATE_ADMIN_DOWN = "admin-down"
OPER_STATE_DOWN = "down"
OPER_STATE_ERROR_DISABLED = "error-disabled"
OPER_STATE_FAILED = "failed"
OPER_STATE_HARDWARE_FAILURE = "hardware-failure"
OPER_STATE_INDETERMINATE = "indeterminate"
OPER_STATE_LINK_DOWN = "link-down"
OPER_STATE_LINK_UP = "link-up"
OPER_STATE_NO_LICENSE = "no-license"
OPER_STATE_SFP_NOT_PRESENT = "sfp-not-present"
OPER_STATE_SOFTWARE_FAILURE = "software-failure"
OPER_STATE_UDLD_AGGR_DOWN = "udld-aggr-down"
OPER_STATE_UP = "up"
PEER_CHASSIS_ID_N_A = "N/A"
PEER_ENCAP_CONSOLIDATED = "consolidated"
PEER_ENCAP_VIRTUAL = "virtual"
PEER_ENCAP_VIRTUAL_CE = "virtual-ce"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_MGMT = "mgmt"
USER_RECOVERY_OPERATION_NONE = "none"
USER_RECOVERY_OPERATION_RESET = "reset"
XCVR_TYPE_1000BASECX = "1000basecx"
XCVR_TYPE_1000BASELH = "1000baselh"
XCVR_TYPE_1000BASELX = "1000baselx"
XCVR_TYPE_1000BASESX = "1000basesx"
XCVR_TYPE_1000BASET = "1000baset"
XCVR_TYPE_1000BASEUNKNOWN = "1000baseunknown"
XCVR_TYPE_1000BASEVX = "1000basevx"
XCVR_TYPE_1000BASEX = "1000basex"
XCVR_TYPE_1000BASEZX = "1000basezx"
XCVR_TYPE_10GBASEER = "10gbaseer"
XCVR_TYPE_10GBASELR = "10gbaselr"
XCVR_TYPE_10GBASELRM = "10gbaselrm"
XCVR_TYPE_10GBASESR = "10gbasesr"
XCVR_TYPE_10GBASEZR = "10gbasezr"
XCVR_TYPE_CWDM1471 = "cwdm1471"
XCVR_TYPE_CWDM1531 = "cwdm1531"
XCVR_TYPE_CWDM1551 = "cwdm1551"
XCVR_TYPE_DWDMSFP = "dwdmsfp"
XCVR_TYPE_FET = "fet"
XCVR_TYPE_H10GACU10M = "h10gacu10m"
XCVR_TYPE_H10GACU15M = "h10gacu15m"
XCVR_TYPE_H10GACU1M = "h10gacu1m"
XCVR_TYPE_H10GACU3M = "h10gacu3m"
XCVR_TYPE_H10GACU5M = "h10gacu5m"
XCVR_TYPE_H10GACU7M = "h10gacu7m"
XCVR_TYPE_H10GACUAOC10M = "h10gacuaoc10m"
XCVR_TYPE_H10GACUAOC15M = "h10gacuaoc15m"
XCVR_TYPE_H10GACUAOC1M = "h10gacuaoc1m"
XCVR_TYPE_H10GACUAOC2M = "h10gacuaoc2m"
XCVR_TYPE_H10GACUAOC3M = "h10gacuaoc3m"
XCVR_TYPE_H10GACUAOC5M = "h10gacuaoc5m"
XCVR_TYPE_H10GACUAOC7M = "h10gacuaoc7m"
XCVR_TYPE_H10GAOC10M = "h10gaoc10m"
XCVR_TYPE_H10GAOC1M = "h10gaoc1m"
XCVR_TYPE_H10GAOC2M = "h10gaoc2m"
XCVR_TYPE_H10GAOC3M = "h10gaoc3m"
XCVR_TYPE_H10GAOC5M = "h10gaoc5m"
XCVR_TYPE_H10GAOC7M = "h10gaoc7m"
XCVR_TYPE_H10GCU10M = "h10gcu10m"
XCVR_TYPE_H10GCU1M = "h10gcu1m"
XCVR_TYPE_H10GCU2M = "h10gcu2m"
XCVR_TYPE_H10GCU3M = "h10gcu3m"
XCVR_TYPE_H10GCU5M = "h10gcu5m"
XCVR_TYPE_H10GCU7M = "h10gcu7m"
XCVR_TYPE_H10GLRMSM = "h10glrmsm"
XCVR_TYPE_H10GUSR = "h10gusr"
XCVR_TYPE_QSFP40GCR4 = "qsfp40gcr4"
XCVR_TYPE_QSFP40GCSR4 = "qsfp40gcsr4"
XCVR_TYPE_QSFP40GFET = "qsfp40gfet"
XCVR_TYPE_QSFP40GLR4 = "qsfp40glr4"
XCVR_TYPE_QSFP40GSR4 = "qsfp40gsr4"
XCVR_TYPE_QSFP40GSRBD = "qsfp40gsrbd"
XCVR_TYPE_QSFP4SFP10GCU1M = "qsfp4sfp10gcu1m"
XCVR_TYPE_QSFP4SFP10GCU2M = "qsfp4sfp10gcu2m"
XCVR_TYPE_QSFP4SFP10GCU3M = "qsfp4sfp10gcu3m"
XCVR_TYPE_QSFP4SFP10GCU5M = "qsfp4sfp10gcu5m"
XCVR_TYPE_QSFP4X10GA0C10M = "qsfp4x10ga0c10m"
XCVR_TYPE_QSFP4X10GA0C1M = "qsfp4x10ga0c1m"
XCVR_TYPE_QSFP4X10GA0C2M = "qsfp4x10ga0c2m"
XCVR_TYPE_QSFP4X10GA0C3M = "qsfp4x10ga0c3m"
XCVR_TYPE_QSFP4X10GA0C5M = "qsfp4x10ga0c5m"
XCVR_TYPE_QSFP4X10GA0C7M = "qsfp4x10ga0c7m"
XCVR_TYPE_QSFP4X10GA0CUNKNOWN = "qsfp4x10ga0cunknown"
XCVR_TYPE_QSFP4X10GAC10M = "qsfp4x10gac10m"
XCVR_TYPE_QSFP4X10GAC1M = "qsfp4x10gac1m"
XCVR_TYPE_QSFP4X10GAC3M = "qsfp4x10gac3m"
XCVR_TYPE_QSFP4X10GAC5M = "qsfp4x10gac5m"
XCVR_TYPE_QSFP4X10GAC7M = "qsfp4x10gac7m"
XCVR_TYPE_QSFP4X10GLR = "qsfp4x10glr"
XCVR_TYPE_QSFPH40GACU10M = "qsfph40gacu10m"
XCVR_TYPE_QSFPH40GACU1M = "qsfph40gacu1m"
XCVR_TYPE_QSFPH40GACU3M = "qsfph40gacu3m"
XCVR_TYPE_QSFPH40GACU5M = "qsfph40gacu5m"
XCVR_TYPE_QSFPH40GACU7M = "qsfph40gacu7m"
XCVR_TYPE_QSFPH40GAOC10M = "qsfph40gaoc10m"
XCVR_TYPE_QSFPH40GAOC15M = "qsfph40gaoc15m"
XCVR_TYPE_QSFPH40GAOC1M = "qsfph40gaoc1m"
XCVR_TYPE_QSFPH40GAOC2M = "qsfph40gaoc2m"
XCVR_TYPE_QSFPH40GAOC3M = "qsfph40gaoc3m"
XCVR_TYPE_QSFPH40GAOC5M = "qsfph40gaoc5m"
XCVR_TYPE_QSFPH40GAOC7M = "qsfph40gaoc7m"
XCVR_TYPE_QSFPH40GAOCUNKNOWN = "qsfph40gaocunknown"
XCVR_TYPE_QSFPH40GCU1M = "qsfph40gcu1m"
XCVR_TYPE_QSFPH40GCU2M = "qsfph40gcu2m"
XCVR_TYPE_QSFPH40GCU3M = "qsfph40gcu3m"
XCVR_TYPE_QSFPH40GCU5M = "qsfph40gcu5m"
XCVR_TYPE_QSFPLOOP = "qsfploop"
XCVR_TYPE_QSFPQSA = "qsfpqsa"
XCVR_TYPE_QSFPUNKNOWN = "qsfpunknown"
XCVR_TYPE_SFP = "sfp"
XCVR_TYPE_UNKNOWN = "unknown"
XCVR_TYPE_X2 = "x2"
class EtherServerIntFIo(ManagedObject):
"""This is EtherServerIntFIo class."""
consts = EtherServerIntFIoConsts()
naming_props = set([u'portId'])
mo_meta = MoMeta("EtherServerIntFIo", "etherServerIntFIo", "port-[port_id]", VersionMeta.Version111a, "InputOutput", 0x7f, [], ["admin"], [u'diagSrvCtrl', u'portGroup'], [u'equipmentXcvr', u'etherErrStats', u'etherLossStats', u'etherPauseStats', u'etherRxStats', u'etherTxStats', u'faultInst'], ["Get"])
prop_meta = {
"admin_speed": MoPropertyMeta("admin_speed", "adminSpeed", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["10gbps", "1gbps", "20gbps", "40gbps", "indeterminate"], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["disabled", "enabled"], []),
"aggr_port_id": MoPropertyMeta("aggr_port_id", "aggrPortId", "uint", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"chassis_id": MoPropertyMeta("chassis_id", "chassisId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"encap": MoPropertyMeta("encap", "encap", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["dot1q", "isl", "negotiate", "proprietary", "unknown"], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", [], []),
"mac_addr": MoPropertyMeta("mac_addr", "macAddr", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", [], []),
"mode": MoPropertyMeta("mode", "mode", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["E", "F", "SD", "access", "fabric", "n_proxy", "promiscuousAccess", "promiscuousTrunk", "trunk", "unknown", "vntag"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"ns_size": MoPropertyMeta("ns_size", "nsSize", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_border_aggr_port_id": MoPropertyMeta("oper_border_aggr_port_id", "operBorderAggrPortId", "uint", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_border_port_id": MoPropertyMeta("oper_border_port_id", "operBorderPortId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_border_slot_id": MoPropertyMeta("oper_border_slot_id", "operBorderSlotId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["admin-down", "down", "error-disabled", "failed", "hardware-failure", "indeterminate", "link-down", "link-up", "no-license", "sfp-not-present", "software-failure", "udld-aggr-down", "up"], []),
"peer_aggr_port_id": MoPropertyMeta("peer_aggr_port_id", "peerAggrPortId", "uint", VersionMeta.Version121a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_chassis_id": MoPropertyMeta("peer_chassis_id", "peerChassisId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"peer_encap": MoPropertyMeta("peer_encap", "peerEncap", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["consolidated", "virtual", "virtual-ce"], ["0-4294967295"]),
"peer_port_id": MoPropertyMeta("peer_port_id", "peerPortId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_slot_id": MoPropertyMeta("peer_slot_id", "peerSlotId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"port_id": MoPropertyMeta("port_id", "portId", "uint", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"slot_id": MoPropertyMeta("slot_id", "slotId", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"state_qual": MoPropertyMeta("state_qual", "stateQual", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"ts": MoPropertyMeta("ts", "ts", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"user_recovery_operation": MoPropertyMeta("user_recovery_operation", "userRecoveryOperation", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["none", "reset"], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"xcvr_type": MoPropertyMeta("xcvr_type", "xcvrType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1000basecx", "1000baselh", "1000baselx", "1000basesx", "1000baset", "1000baseunknown", "1000basevx", "1000basex", "1000basezx", "10gbaseer", "10gbaselr", "10gbaselrm", "10gbasesr", "10gbasezr", "cwdm1471", "cwdm1531", "cwdm1551", "dwdmsfp", "fet", "h10gacu10m", "h10gacu15m", "h10gacu1m", "h10gacu3m", "h10gacu5m", "h10gacu7m", "h10gacuaoc10m", "h10gacuaoc15m", "h10gacuaoc1m", "h10gacuaoc2m", "h10gacuaoc3m", "h10gacuaoc5m", "h10gacuaoc7m", "h10gaoc10m", "h10gaoc1m", "h10gaoc2m", "h10gaoc3m", "h10gaoc5m", "h10gaoc7m", "h10gcu10m", "h10gcu1m", "h10gcu2m", "h10gcu3m", "h10gcu5m", "h10gcu7m", "h10glrmsm", "h10gusr", "qsfp40gcr4", "qsfp40gcsr4", "qsfp40gfet", "qsfp40glr4", "qsfp40gsr4", "qsfp40gsrbd", "qsfp4sfp10gcu1m", "qsfp4sfp10gcu2m", "qsfp4sfp10gcu3m", "qsfp4sfp10gcu5m", "qsfp4x10ga0c10m", "qsfp4x10ga0c1m", "qsfp4x10ga0c2m", "qsfp4x10ga0c3m", "qsfp4x10ga0c5m", "qsfp4x10ga0c7m", "qsfp4x10ga0cunknown", "qsfp4x10gac10m", "qsfp4x10gac1m", "qsfp4x10gac3m", "qsfp4x10gac5m", "qsfp4x10gac7m", "qsfp4x10glr", "qsfph40gacu10m", "qsfph40gacu1m", "qsfph40gacu3m", "qsfph40gacu5m", "qsfph40gacu7m", "qsfph40gaoc10m", "qsfph40gaoc15m", "qsfph40gaoc1m", "qsfph40gaoc2m", "qsfph40gaoc3m", "qsfph40gaoc5m", "qsfph40gaoc7m", "qsfph40gaocunknown", "qsfph40gcu1m", "qsfph40gcu2m", "qsfph40gcu3m", "qsfph40gcu5m", "qsfploop", "qsfpqsa", "qsfpunknown", "sfp", "unknown", "x2"], []),
}
prop_map = {
"adminSpeed": "admin_speed",
"adminState": "admin_state",
"aggrPortId": "aggr_port_id",
"chassisId": "chassis_id",
"childAction": "child_action",
"dn": "dn",
"encap": "encap",
"epDn": "ep_dn",
"fltAggr": "flt_aggr",
"ifRole": "if_role",
"ifType": "if_type",
"locale": "locale",
"mac": "mac",
"macAddr": "mac_addr",
"mode": "mode",
"model": "model",
"name": "name",
"nsSize": "ns_size",
"operBorderAggrPortId": "oper_border_aggr_port_id",
"operBorderPortId": "oper_border_port_id",
"operBorderSlotId": "oper_border_slot_id",
"operState": "oper_state",
"peerAggrPortId": "peer_aggr_port_id",
"peerChassisId": "peer_chassis_id",
"peerDn": "peer_dn",
"peerEncap": "peer_encap",
"peerPortId": "peer_port_id",
"peerSlotId": "peer_slot_id",
"portId": "port_id",
"revision": "revision",
"rn": "rn",
"serial": "serial",
"slotId": "slot_id",
"stateQual": "state_qual",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"ts": "ts",
"type": "type",
"userRecoveryOperation": "user_recovery_operation",
"vendor": "vendor",
"xcvrType": "xcvr_type",
}
def __init__(self, parent_mo_or_dn, port_id, **kwargs):
self._dirty_mask = 0
self.port_id = port_id
self.admin_speed = None
self.admin_state = None
self.aggr_port_id = None
self.chassis_id = None
self.child_action = None
self.encap = None
self.ep_dn = None
self.flt_aggr = None
self.if_role = None
self.if_type = None
self.locale = None
self.mac = None
self.mac_addr = None
self.mode = None
self.model = None
self.name = None
self.ns_size = None
self.oper_border_aggr_port_id = None
self.oper_border_port_id = None
self.oper_border_slot_id = None
self.oper_state = None
self.peer_aggr_port_id = None
self.peer_chassis_id = None
self.peer_dn = None
self.peer_encap = None
self.peer_port_id = None
self.peer_slot_id = None
self.revision = None
self.serial = None
self.slot_id = None
self.state_qual = None
self.status = None
self.switch_id = None
self.transport = None
self.ts = None
self.type = None
self.user_recovery_operation = None
self.vendor = None
self.xcvr_type = None
ManagedObject.__init__(self, "EtherServerIntFIo", parent_mo_or_dn, **kwargs)
| 61.332258
| 1,522
| 0.681113
|
4a168a6deaa94e3ec426e38381e73b6c993bbc39
| 3,876
|
py
|
Python
|
datumaro/tests/test_datumaro_format.py
|
SUNx2YCH/cvat
|
7e8fc2366afe2b41f534633e4b9d42fbd580546a
|
[
"MIT"
] | null | null | null |
datumaro/tests/test_datumaro_format.py
|
SUNx2YCH/cvat
|
7e8fc2366afe2b41f534633e4b9d42fbd580546a
|
[
"MIT"
] | null | null | null |
datumaro/tests/test_datumaro_format.py
|
SUNx2YCH/cvat
|
7e8fc2366afe2b41f534633e4b9d42fbd580546a
|
[
"MIT"
] | null | null | null |
import numpy as np
from unittest import TestCase
from datumaro.components.project import Project
from datumaro.components.extractor import (Extractor, DatasetItem,
AnnotationType, Label, Mask, Points, Polygon,
PolyLine, Bbox, Caption,
LabelCategories, MaskCategories, PointsCategories
)
from datumaro.plugins.datumaro_format.converter import DatumaroConverter
from datumaro.util.test_utils import TestDir, item_to_str
from datumaro.util.mask_tools import generate_colormap
class DatumaroConverterTest(TestCase):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)),
annotations=[
Caption('hello', id=1),
Caption('world', id=2, group=5),
Label(2, id=3, attributes={
'x': 1,
'y': '2',
}),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={
'score': 1.0,
}),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(label=3, id=5, image=np.ones((2, 3))),
]),
DatasetItem(id=21, subset='train',
annotations=[
Caption('test'),
Label(2),
Bbox(1, 2, 3, 4, 5, id=42, group=42)
]),
DatasetItem(id=2, subset='val',
annotations=[
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, 7, 8], id=12),
]),
DatasetItem(id=42, subset='test'),
DatasetItem(id=42),
DatasetItem(id=43),
])
def categories(self):
label_categories = LabelCategories()
for i in range(5):
label_categories.add('cat' + str(i))
mask_categories = MaskCategories(
generate_colormap(len(label_categories.items)))
points_categories = PointsCategories()
for index, _ in enumerate(label_categories.items):
points_categories.add(index, ['cat1', 'cat2'], adjacent=[0, 1])
return {
AnnotationType.label: label_categories,
AnnotationType.mask: mask_categories,
AnnotationType.points: points_categories,
}
def test_can_save_and_load(self):
with TestDir() as test_dir:
source_dataset = self.TestExtractor()
converter = DatumaroConverter(save_images=True)
converter(source_dataset, test_dir)
project = Project.import_from(test_dir, 'datumaro')
parsed_dataset = project.make_dataset()
self.assertListEqual(
sorted(source_dataset.subsets()),
sorted(parsed_dataset.subsets()),
)
self.assertEqual(len(source_dataset), len(parsed_dataset))
for subset_name in source_dataset.subsets():
source_subset = source_dataset.get_subset(subset_name)
parsed_subset = parsed_dataset.get_subset(subset_name)
self.assertEqual(len(source_subset), len(parsed_subset))
for idx, (item_a, item_b) in enumerate(
zip(source_subset, parsed_subset)):
self.assertEqual(item_a, item_b, '%s:\n%s\nvs.\n%s\n' % \
(idx, item_to_str(item_a), item_to_str(item_b)))
self.assertEqual(
source_dataset.categories(),
parsed_dataset.categories())
| 38.76
| 79
| 0.518576
|
4a168d0d90d32611c48f1ebe2f90a2e03edd664c
| 1,393
|
py
|
Python
|
disk/migrations/0001_initial.py
|
L316645200/bkapp
|
a593126fe00827543fe0e31f37aed7ac2e4bbe14
|
[
"Apache-2.0"
] | null | null | null |
disk/migrations/0001_initial.py
|
L316645200/bkapp
|
a593126fe00827543fe0e31f37aed7ac2e4bbe14
|
[
"Apache-2.0"
] | 2
|
2020-06-05T23:50:18.000Z
|
2021-06-10T22:07:04.000Z
|
disk/migrations/0001_initial.py
|
SalomeL/bkapp
|
a593126fe00827543fe0e31f37aed7ac2e4bbe14
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hosts', '0008_auto_20191028_1810'),
]
operations = [
migrations.CreateModel(
name='DiskUsage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.IntegerField(verbose_name=b'\xe7\xa3\x81\xe7\x9b\x98\xe4\xbd\xbf\xe7\x94\xa8\xe7\x8e\x87')),
('add_time', models.DateTimeField(auto_now=True, verbose_name=b'\xe5\xbd\x95\xe5\x85\xa5\xe6\x97\xb6\xe9\x97\xb4')),
('host', models.ForeignKey(related_name='DiskUsage', to='hosts.Host')),
],
),
migrations.CreateModel(
name='MemoryUsage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.IntegerField(verbose_name=b'\xe5\x86\x85\xe5\xad\x98\xe4\xbd\xbf\xe7\x94\xa8\xe7\x8e\x87')),
('add_time', models.DateTimeField(auto_now=True, verbose_name=b'\xe5\xbd\x95\xe5\x85\xa5\xe6\x97\xb6\xe9\x97\xb4')),
('host', models.ForeignKey(related_name='MemoryUsage', to='hosts.Host')),
],
),
]
| 42.212121
| 132
| 0.603733
|
4a168d67fd68e3ae67904ee37faaa8cb93938d8a
| 1,875
|
py
|
Python
|
Problem18.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
Problem18.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
Problem18.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
#maximum path sum I
"""
By starting at the top of the triangle below and moving to
adjacent numbers on the row below,
the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom of the triangle below:
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23
NOTE: As there are only 16384 routes,
it is possible to solve this problem by trying every route.
However, Problem 67, is the same challenge with a triangle containing one-hundred rows;
it cannot be solved by brute force, and requires a clever method! ;o)
"""""
if __name__ == '__main__':
tri = [
[75],
[95, 64],
[17, 47, 82],
[18, 35, 87, 10],
[20, 4, 82, 47, 65],
[19, 1, 23, 75, 3, 34],
[88, 2, 77, 73, 7, 63, 67],
[99, 65, 4, 28, 6, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]
]
for i in range(len(tri) - 2, -1, -1):
for j in range(0, len(tri[i])):
tri[i][j] += max(tri[i + 1][j], tri[i + 1][j + 1])
print(tri[0][0])
| 30.241935
| 88
| 0.511467
|
4a168dbd44c687fee2b8efb496dc0cc170b2c4e5
| 5,105
|
py
|
Python
|
insights/core/remote_resource.py
|
Siddhantrao/insights-core
|
03807b46b3678b7c1376cd4afb7413ec36b054b4
|
[
"Apache-2.0"
] | null | null | null |
insights/core/remote_resource.py
|
Siddhantrao/insights-core
|
03807b46b3678b7c1376cd4afb7413ec36b054b4
|
[
"Apache-2.0"
] | null | null | null |
insights/core/remote_resource.py
|
Siddhantrao/insights-core
|
03807b46b3678b7c1376cd4afb7413ec36b054b4
|
[
"Apache-2.0"
] | null | null | null |
import requests
import redis
import calendar
from cachecontrol.heuristics import BaseHeuristic
from cachecontrol.wrapper import CacheControl
from cachecontrol.caches.file_cache import FileCache
from datetime import datetime, timedelta
from email.utils import parsedate, formatdate
from cachecontrol.caches.redis_cache import RedisCache
class RemoteResource(object):
"""
RemoteResource class for accessing external Web resources.
Examples:
>>> from insights.core.remote_resource import RemoteResource
>>> rr = RemoteResource()
>>> rtn = rr.get("http://google.com")
>>> print (rtn.content)
"""
timeout = 10
""" float: Time in seconds for the requests.get api call to wait before returning a timeout exception """
def __init__(self, session=None):
self.session = session or requests.Session()
def get(self, url, params={}, headers={}, auth=(), certificate_path=None):
"""
Returns the response payload from the request to the given URL.
Args:
url (str): The URL for the WEB API that the request is being made too.
params (dict): Dictionary containing the query string parameters.
headers (dict): HTTP Headers that may be needed for the request.
auth (tuple): User ID and password for Basic Auth
certificate_path (str): Path to the ssl certificate.
Returns:
response: (HttpResponse): Response object from requests.get api request
"""
certificate_path = certificate_path if certificate_path else False
return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth,
timeout=self.timeout)
class CachedRemoteResource(RemoteResource):
"""
RemoteResource subclass that sets up caching for subsequent Web resource requests.
Examples:
>>> from insights.core.remote_resource import CachedRemoteResource
>>> crr = CachedRemoteResource()
>>> rtn = crr.get("http://google.com")
>>> print (rtn.content)
"""
expire_after = 180
""" float: Amount of time in seconds that the cache will expire """
backend = "DictCache"
""" str: Type of storage for cache `DictCache1`, `FileCache` or `RedisCache` """
redis_port = 6379
""" int: Port used to contact the redis instance if `RedisCache` backend is specified """
redis_host = 'localhost'
""" str: Hostname of redis instance if `RedisCache` backend is specified """
__heuristic = 'DefaultHeuristic'
__cache = None
file_cache_path = '.web_cache'
""" str: Path to where file cache will be stored if `FileCache` backend is specified """
def __init__(self):
session = requests.Session()
hclass = globals()[self.__heuristic]
if not self.__class__.__cache:
if self.backend == "RedisCache":
pool = redis.ConnectionPool(host=self.redis_host, port=self.redis_port, db=0)
r = redis.Redis(connection_pool=pool)
self.__class__.cache = RedisCache(r)
elif self.backend == "FileCache":
self.__class__.cache = FileCache(self.file_cache_path)
else:
self.__class__.cache = None
session = CacheControl(session, heuristic=hclass(self.expire_after), cache=self.__class__.cache)
super(CachedRemoteResource, self).__init__(session)
class DefaultHeuristic(BaseHeuristic):
"""
BaseHeuristic subclass that sets the default caching headers if not supplied by the remote service.
"""
default_cache_vars = "Remote service caching headers not set correctly, using default caching"
"""
str: Message content warning that the response from the remote server did not
return proper HTTP cache headers so we will use default cache settings
"""
server_cache_headers = "Caching being done based on caching headers returned by remote service"
""" str: Message content warning that we are using cache settings returned by the remote server. """
def __init__(self, expire_after):
self.expire_after = expire_after
def update_headers(self, response):
"""
Returns the updated caching headers.
Args:
response (HttpResponse): The response from the remote service
Returns:
response:(HttpResponse.Headers): Http caching headers
"""
if 'expires' in response.headers and 'cache-control' in response.headers:
self.msg = self.server_cache_headers
return response.headers
else:
self.msg = self.default_cache_vars
date = parsedate(response.headers['date'])
expires = datetime(*date[:6]) + timedelta(0, self.expire_after)
response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())),
'cache-control': 'public'})
return response.headers
def warning(self, response):
return '110 - "%s"' % self.msg
| 37.262774
| 109
| 0.659158
|
4a168e780a35166d73625058adea95a45028f90d
| 12,785
|
py
|
Python
|
torchtools/trainer.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | 16
|
2017-08-15T14:01:13.000Z
|
2020-12-21T11:23:31.000Z
|
torchtools/trainer.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | null | null | null |
torchtools/trainer.py
|
Time1ess/torchtools
|
1c48591188827f8a7403162728f86229203354c5
|
[
"BSD-3-Clause"
] | 2
|
2017-12-28T14:09:09.000Z
|
2020-07-14T14:29:30.000Z
|
# coding: UTF-8
import torch
from tqdm import tqdm, trange
from torchtools import TRAIN_MODE, VALIDATE_MODE, TEST_MODE
from torchtools.callbacks import Hook, Callback
from torchtools.exceptions import (
HookTypeError, TrainerTerminatedException)
from torchtools.meters import Meter
class Trainer(object):
"""A class to handle the whole training, validating and testing process"""
hook_entries = [
'on_train_start', 'on_train_end',
'on_epoch_start', 'on_epoch_end',
'on_forward_end', 'on_backward_end',
'on_batch_start', 'on_batch_end',
'on_validate_start', 'on_validate_end',
'on_test_start', 'on_test_end',
'on_terminated']
trainer_ended = False
def __init__(self, model, train_data_loader, criterion, optimizer,
val_data_loader=None, test_data_loader=None,
device='cpu'):
"""
Instantiate a trainer object.
Parameters
----------
model: torch.nn.Module
A model to train.
train_data_loader: torch.utils.data.DataLoader
An instance of DataLoader to load train data.
criterion: torch.nn.Module
A loss function.
optimizer: torch.optim.Optimizer
Optimizer responsible for optimizing model
val_data_loader(optional): torch.utils.data.DataLoader
An instance of DataLoader to load validate data.
test_data_loader(optional): torch.utils.data.DataLoader
An instance of DataLoader to load test data.
device: str or torch.device
Which device should be used if use_cuda is True, should be
formatted like 'cuda:0' or 'cpu'. Default: 'cpu'.
"""
self.model = model
self.train_data_loader = train_data_loader
self.criterion = criterion
self.optimizer = optimizer
self.val_data_loader = val_data_loader
self.test_data_loader = test_data_loader
if isinstance(device, torch.device):
self.device = device
else:
self.device = torch.device(device)
self.callback_hooks = {k: [] for k in self.hook_entries}
self.meter_hooks = {k: [] for k in self.hook_entries}
self.meters = {}
self.callbacks = []
def register_hooks(self, hooks):
"""
Register multiple hooks at the same time.
Parameters
----------
hooks: Iterable of Hook
A iterable of hooks need to be registered.
"""
for hook in hooks:
self.register_hook(hook)
def register_hook(self, hook):
"""
Register a hook.
Parameters
----------
hook: Hook
A Hook object need to be registered.
"""
if not isinstance(hook, Hook):
raise HookTypeError('{} is not a valid hook'.format(hook))
if isinstance(hook, Callback):
hook._callback_check(self)
if isinstance(hook, Meter):
container = self.meter_hooks
self.meters[hook.name] = hook
else:
container = self.callback_hooks
self.callbacks.append(hook)
for name in self.hook_entries:
entry = getattr(hook, name)
container[name].append(entry)
def unregister_hooks(self, hooks):
"""
Unregister multiple hooks at the same time.
Parameters
----------
hooks: Iterable of Hook
A iterable of hooks need to be registered.
"""
for hook in hooks:
self.unregister_hook(hook)
def unregister_hook(self, hook):
"""
Unregister a hook.
Parameters
----------
hook: Hook
A hook object need to be unregistered.
"""
if not isinstance(hook, Hook):
raise HookTypeError('{} is not a valid hook'.format(hook))
if isinstance(hook, Meter):
container = self.meter_hooks
self.meters.pop(hook.name, None)
else:
container = self.callback_hooks
self.callbacks.remove(hook)
for name in self.hook_entries:
entry = getattr(hook, name, None)
if entry is not None:
container[name].remove(entry)
def terminate(self, raise_exception=True):
"""
Terminate training process, trigger `on_terminated` event.
Parameters
----------
raise_exception: bool
whether to raise TrainerTerminated exception after event,
default `True`.
"""
self.notify_registered_hooks('on_terminated', None)
if raise_exception:
raise TrainerTerminatedException()
def exit(self):
"""
Set trainer_ended flag to True.
"""
self.trainer_ended = True
return 0
def notify_registered_hooks(self, name, state):
"""
Event dispatcher for all registered hooks.
Parameters
----------
name: str
Event name.
state: dict
Current state dictionary.
"""
for hook in self.meter_hooks[name]:
hook(self, state)
if name == 'on_epoch_end':
self.validate(epochs=state['epochs'])
for hook in self.callback_hooks[name]:
hook(self, state)
def restore_state(self, state, checkpoint):
"""
Restore from checkpoint.
Parameters
----------
state: dict
Current state dictionary.
checkpoint: str
Path to checkpoint.
"""
checkpoint = torch.load(checkpoint)
state['model'].load_state_dict(checkpoint['model_state_dict'])
state['optimizer'].load_state_dict(checkpoint['optimizer_state_dict'])
state['epochs'] = checkpoint['epochs']
def train(self, max_epoch, checkpoint=None):
"""
Train model with `max_epoch` epochs.
Parameters
----------
max_epoch: int
Num of epochs the model need to be trained.
checkpoint: str
Path to checkpoint, default `None`.
"""
model = self.model.train(True)
data_loader = self.train_data_loader
criterion = self.criterion
optimizer = self.optimizer
meters = self.meters
device = self.device
self.trainer_ended = False
state = {
'model': model,
'arch': type(model).__name__,
'max_epoch': max_epoch,
'epochs': 0,
'iters': 0, # Total iterations
'optimizer': optimizer,
'mode': TRAIN_MODE,
'meters': meters,
}
if checkpoint is not None:
self.restore_state(state, checkpoint)
self.notify_registered_hooks('on_train_start', state)
iter_epoch = trange(max_epoch, initial=state['epochs'], unit='epoch')
iter_epoch.set_description('Train')
with torch.set_grad_enabled(True):
for epoch in iter_epoch:
model = self.model.train(True)
state['epochs'] = epoch + 1
self.notify_registered_hooks('on_epoch_start', state)
iter_data = tqdm(data_loader, unit=' batches')
iter_data.set_description('Epoch ' + str(epoch))
for batch in iter_data:
input, target = batch[0], batch[1]
state['iters'] += 1
state['input'] = input
state['target'] = target
self.notify_registered_hooks('on_batch_start', state)
input, target = input.to(device), target.to(device)
def closure():
state['optimizer'].zero_grad()
output = state['model'](input)
loss = criterion(output, target)
loss_val = loss.item()
iter_data.set_postfix(iters=state['iters'],
loss=loss_val)
state['loss'] = loss_val
state['output'] = output
self.notify_registered_hooks('on_forward_end', state)
loss.backward()
self.notify_registered_hooks('on_backward_end', state)
return loss
state['optimizer'].step(closure)
self.notify_registered_hooks('on_batch_end', state)
if self.trainer_ended:
break
self.notify_registered_hooks('on_epoch_end', state)
if self.trainer_ended:
break
self.trainer_ended = True
self.notify_registered_hooks('on_train_end', state)
return state
def validate(self, epochs=-1):
"""
Validate model(val_date_loader needed).
Parameters
----------
epochs: int
Which epoch the validation process is in, default `-1`.
"""
if self.val_data_loader is None:
return {}
model = self.model.train(False)
data_loader = self.val_data_loader
criterion = self.criterion
meters = self.meters
device = self.device
state = {
'model': model,
'arch': type(model).__name__,
'mode': VALIDATE_MODE,
'epochs': epochs,
'iters': 0,
'meters': meters,
}
self.notify_registered_hooks('on_validate_start', state)
iter_data = tqdm(data_loader, unit=' batches')
iter_data.set_description('Validate')
with torch.set_grad_enabled(False):
for batch in iter_data:
input, target = batch[0], batch[1]
state['iters'] += 1
state['input'] = input
state['target'] = target
self.notify_registered_hooks('on_batch_start', state)
input, target = input.to(device), target.to(device)
def closure():
output = state['model'](input)
loss = criterion(output, target)
loss_val = loss.item()
iter_data.set_postfix(loss=loss_val)
state['output'] = output
state['val_loss'] = loss_val
self.notify_registered_hooks('on_forward_end', state)
return loss
closure()
self.notify_registered_hooks('on_batch_end', state)
self.notify_registered_hooks('on_validate_end', state)
return state
def test(self, test_data_loader=None):
"""
Test model(test_data_loader needed).
Parameters
----------
test_data_loader: torch.utils.data.DataLoader
An instance of DataLoader to load test data, default `None`.
"""
if test_data_loader is None and self.test_data_loader is None:
return {}
if test_data_loader:
data_loader = test_data_loader
else:
data_loader = self.test_data_loader
model = self.model.train(False)
criterion = self.criterion
meters = self.meters
device = self.device
state = {
'model': model,
'arch': type(model).__name__,
'mode': TEST_MODE,
'iters': 0,
'meters': meters,
}
self.notify_registered_hooks('on_test_start', state)
iter_data = tqdm(data_loader, unit=' batches')
iter_data.set_description('Test')
with torch.set_grad_enabled(False):
for batch in iter_data:
input, target = batch[0], batch[1]
state['iters'] += 1
state['input'] = input
state['target'] = target
self.notify_registered_hooks('on_batch_start', state)
input, target = input.to(device), target.to(device)
def closure():
output = state['model'](input)
loss = criterion(output, target)
loss_val = loss.item()
iter_data.set_postfix(loss=loss_val)
state['output'] = output
state['test_loss'] = loss_val
self.notify_registered_hooks('on_forward_end', state)
return loss
closure()
self.notify_registered_hooks('on_batch_end', state)
self.notify_registered_hooks('on_test_end', state)
return state
| 34.00266
| 78
| 0.546734
|
4a168efd50746a1be1babb3d8c81ec9d4f0259ec
| 52,271
|
py
|
Python
|
puzzler/puzzles/pentacubes.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
puzzler/puzzles/pentacubes.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
puzzler/puzzles/pentacubes.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: (C) 1998-2015 by David J. Goodger
# License: GPL 2 (see __init__.py)
"""
Concrete pentacube puzzles.
"""
from puzzler.puzzles import Puzzle3D, Puzzle2D
from puzzler.puzzles.polycubes import (
SolidPentominoes, Pentacubes, PentacubesPlus, NonConvexPentacubes,
Pentacubes3x3x3)
from puzzler.coordsys import Cartesian3DCoordSet
class Pentacubes5x7x7OpenBox(Pentacubes):
"""many solutions"""
width = 7
height = 7
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if ( z == 0 or x == 0 or x == self.width - 1
or y == 0 or y == self.height - 1):
yield (x, y, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class Pentacubes3x9x9OpenBox(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 3
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if ( z == 0 or x == 0 or x == self.width - 1
or y == 0 or y == self.height - 1):
yield (x, y, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class Pentacubes18x3x3OpenBox(Pentacubes):
"""
many solutions
design from Kadon's Super Quintillions booklet
"""
width = 3
height = 18
depth = 3
def coordinates(self):
coords = (
set(self.coordinates_cuboid(3, 18, 3))
- set(self.coordinates_cuboid(1, 17, 1, offset=(1,1,1))))
return sorted(coords)
class Pentacubes2x7x15OpenBox(Pentacubes):
"""many solutions"""
width = 7
height = 15
depth = 2
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
return self.coordinates_open_box(self.width, self.height, self.depth)
class Pentacubes2x11x11Frame(Pentacubes):
"""many solutions"""
width = 11
height = 11
depth = 2
def coordinates(self):
for y in range(self.height):
for x in range(self.width):
yield (x, y, 0)
for y in range(2, self.height - 2):
for x in range(2, self.width - 2):
if ( x == 2 or x == self.width - 3
or y == 2 or y == self.height - 3):
yield (x, y, 1)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class Pentacubes5x5x6Tower1(Pentacubes):
"""many solutions"""
width = 5
height = 6
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if y == 5 and x == 2:
continue
yield (x, y, z)
class Pentacubes5x5x6Tower2(Pentacubes):
"""many solutions"""
width = 5
height = 6
depth = 5
def coordinates(self):
hole = set(((2,5,2), (2,5,1), (1,5,2), (3,5,2), (2,5,3)))
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if (x,y,z) not in hole:
yield (x, y, z)
class Pentacubes5x5x6Tower3(Pentacubes):
"""many solutions"""
width = 5
height = 6
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if y > 0 and z == 2 == x:
continue
yield (x, y, z)
class PentacubesCornerCrystal(Pentacubes):
"""many solutions"""
width = 10
height = 10
depth = 10
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
total = x + y + z
if ( total < 6
or total < 10 and (x == 0 or y == 0 or z == 0)):
yield (x, y, z)
def customize_piece_data(self):
"""
Add a monocube to fill in the extra space, and restrict the X piece to
one orientation to account for symmetry.
"""
Pentacubes.customize_piece_data(self)
self.piece_data['o'] = ((), {})
self.piece_data['X5'][-1]['axes'] = None
self.piece_colors['o'] = 'white'
def build_matrix(self):
"""Restrict the monocube to the 4 interior, hidden spaces."""
keys = sorted(self.pieces.keys())
o_coords, o_aspect = self.pieces['o'][0]
for coords in ((1,1,1), (2,1,1), (1,2,1), (1,1,2)):
translated = o_aspect.translate(coords)
self.build_matrix_row('o', translated)
keys.remove('o')
self.build_regular_matrix(keys)
class PentacubesNineSlices(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if ( 3 < x + y < 13 and -5 < y - x < 5
and (z + abs(x - 4)) < 5):
yield (x, y, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class PentacubesGreatWall(Pentacubes):
"""many solutions"""
width = 15
height = 15
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if x % 2:
if x + y == 13:
yield (x, y, z)
elif 11 < x + y < 15:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class Pentacubes3x3x20Tower1(Pentacubes):
"""many solutions"""
width = 3
height = 20
depth = 3
def coordinates(self):
holes = set()
for y in range(1, 19, 2):
for z in range(3):
holes.add((1,y,z))
for z in range(self.depth):
for y in range(self.height - 1):
for x in range(self.width):
if (x,y,z) not in holes:
yield (x, y, z)
yield (1, 19, 1)
class Pentacubes3x3x20Tower2(Pentacubes):
"""many solutions"""
width = 3
height = 20
depth = 3
def coordinates(self):
holes = set()
for y in range(1, 19, 2):
for i in range(3):
if (y // 2) % 2:
holes.add((i,y,1))
else:
holes.add((1,y,i))
for z in range(self.depth):
for y in range(self.height - 1):
for x in range(self.width):
if (x,y,z) not in holes:
yield (x, y, z)
yield (1, 19, 1)
class Pentacubes3x3x17Tower(Pentacubes):
"""many solutions"""
width = 3
height = 17
depth = 3
def coordinates(self):
for z in range(self.depth):
for y in range(self.height - 1):
for x in range(self.width):
yield (x, y, z)
yield (1, 16, 1)
class Pentacubes3x3x19CrystalTower(Pentacubes):
"""many solutions"""
width = 3
height = 19
depth = 3
def coordinates(self):
for z in range(self.depth):
for y in range(self.height - 1):
for x in range(self.width):
if x + y + z < 18:
yield (x, y, z)
yield (0, 18, 0)
class Pentacubes5x9x9Fortress(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
def coordinates(self):
for y in range(self.height):
for x in range(self.width):
yield (x, y, 0)
for z in range(1, self.depth):
for i in range(self.height):
if z <= abs(i - 4):
yield (0, i, z)
yield (8, i, z)
if 0 < i < self.width - 1:
yield (i, 0, z)
yield (i, 8, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class Pentacubes3x9x9Mound(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 3
def coordinates(self):
coords = set()
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if ( z <= x < (self.width - z)
and z <= y < (self.height - z)
and not (4 - z < x < 4 + z and 4 - z < y < 4 + z)):
yield (x, y, z)
class Pentacubes11x11x6Pyramid(Pentacubes):
"""
One empty cube in the center of the bottom layer.
0 solutions
Proof of impossibility: Color the cubes of the 29 pentacubes with a 3-D
black & white checkerboard pattern, such that no like-colored faces touch.
Each pentacube piece has a parity imbalance (difference between the number
of black & white cubes) of one, except for X and T1, which both have
parity imbalances of 3. Therefore the maximum possible parity imbalance
of any puzzle is 33. Now color the 11x11x6 pyramid with the same
checkerboard pattern. The parity imbalance is 37 (91 cubes of one color
vs. 54 of the other), more than the maximum possible imbalance. Even if
the empty cube is moved, the imbalance could only be reduced to 35, which
is still too large. No solution is possible.
Instead of black & white, the coordinate total (X + Y + Z) of each cube
could be used, divided into even & odd totals.
"""
width = 11
height = 11
depth = 6
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if (x,y,z) == (5,5,0):
continue
elif z + abs(x - 5) + abs(y - 5) < self.depth:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class Pentacubes11x11x5Pyramid(Pentacubes):
"""many solutions"""
width = 11
height = 11
depth = 5
def coordinates(self):
corners = set(((0,2),(0,1),(0,0),(1,0),(2,0),
(8,0),(9,0),(10,0),(10,1),(10,2),
(10,8),(10,9),(10,10),(9,10),(8,10),
(2,10),(1,10),(0,10),(0,9),(0,8)))
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if ( z == 0 and (x,y) not in corners
or z + abs(x - 5) + abs(y - 5) < self.depth):
yield (x, y, z)
class PentacubesPyramidSpire(Pentacubes):
"""
many solutions
design from `Torsten Sillke's pages [1992]`_
"""
width = 9
height = 9
depth = 9
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
layer = Puzzle2D.coordinates_diamond(6 - i, offset=(i-1,i-1))
for (x,y) in layer:
coords.add(self.coordinate_offset(x, y, i, None))
coords.update(set(self.coordinates_cuboid(1, 1, 9, offset=(4,4,0))))
coords.intersection_update(set(self.coordinates_cuboid(9, 9, 9)))
return sorted(coords)
class Pentacubes9x9x9OctahedralPlanes(Pentacubes):
"""
0 solutions?
Even/odd imbalance: 23.
"""
width = 9
height = 9
depth = 9
def coordinates(self):
coords = set()
for i in range(self.depth):
for j in range(self.height):
if abs(i - 4) + abs(j - 4) < 6:
coords.add((i, j, 4))
coords.add((i, 4, j))
coords.add((4, i, j))
return sorted(coords)
class Pentacubes2x13x13DiamondFrame(Pentacubes):
"""many solutions"""
width = 13
height = 13
depth = 2
def customize_piece_data(self):
Pentacubes.customize_piece_data(self)
self.piece_data['F5'][-1]['rotations'] = None
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if z * 4 <= abs(x - 6) + abs(y - 6) < 7:
yield (x, y, z)
class PentacubesDiamondPanel(Pentacubes):
"""many solutions"""
width = 13
height = 13
depth = 2
def coordinates(self):
coords = set()
for i in range(2):
layer = Puzzle2D.coordinates_diamond(7 - i, offset=(i,i))
for (x,y) in layer:
coords.add(self.coordinate_offset(x, y, i, None))
coords.remove((6,6,1))
return sorted(coords)
class Pentacubes2x3x2Chair(Pentacubes):
"""
A structure made of only two pieces.
17 solutions
"""
width = 2
height = 3
depth = 2
check_for_duplicates = True
duplicate_conditions = ({'x_reversed': True},)
custom_class_name = 'Pentacubes2x3x2Chair_%(p1)s_%(p2)s'
custom_class_template = """\
class %s(Pentacubes2x3x2Chair):
custom_pieces = [%%(p1)r, %%(p2)r]
""" % custom_class_name
@classmethod
def components(cls):
"""
Generate subpuzzle classes dynamically.
One class for each pair of pieces.
"""
piece_names = sorted(cls.piece_data.keys())
classes = []
for i, p1 in enumerate(piece_names):
for p2 in piece_names[i+1:]: # avoid duplicate combinations
exec cls.custom_class_template % locals()
classes.append(locals()[cls.custom_class_name % locals()])
return classes
def coordinates(self):
for coord in ((0,0,0), (1,0,0), (0,1,0), (1,1,0), (0,2,0), (1,2,0),
(0,0,1), (1,0,1), (0,1,1), (1,1,1)):
yield coord
def customize_piece_data(self):
"""Restrict pieces to those listed in `self.custom_pieces`."""
Pentacubes.customize_piece_data(self)
for name in self.piece_data.keys():
if name not in self.custom_pieces:
del self.piece_data[name]
class Pentacubes5x7x5Cubbyholes(Pentacubes):
"""many solutions"""
width = 5
height = 7
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if not (x % 2 and y % 2):
yield (x, y, z)
class Pentacubes9x9x5Cubbyholes(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if 5 < (x + y) < 11 and not (x % 2 and y % 2):
yield (x, y, z)
class Pentacubes7x7x5Block(Pentacubes):
"""many solutions"""
width = 7
height = 7
depth = 5
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if 1 <= x <= 5 and 1 <= y <= 5 or x == 3 or y == 3:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class PentacubesX1(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
svg_rotation = 41.5
def coordinates(self):
coords = set(self.coordinates_cuboid(9, 3, 3, offset=(0,3,0)))
coords.update(self.coordinates_cuboid(3, 9, 3, offset=(3,0,0)))
coords.update(self.coordinates_cuboid(5, 1, 1, offset=(2,4,3)))
coords.update(self.coordinates_cuboid(1, 5, 1, offset=(4,2,3)))
coords.add(self.coordinate_offset(4, 4, 4, None))
return sorted(coords)
class PentacubesAstroidBlock(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
def coordinates(self):
coords = set(
list(self.coordinates_cuboid(9, 1, 5, offset=(0,4,0)))
+ list(self.coordinates_cuboid(1, 9, 5, offset=(4,0,0))))
for x, y in Puzzle2D.coordinates_diamond(4, offset=(1,1)):
for z in range(5):
coords.add(self.coordinate_offset(x, y, z, None))
return sorted(coords)
class PentacubesDiamondWall(Pentacubes):
"""
many solutions
design by Nick Maeder
"""
width = 9
height = 9
depth = 5
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_diamond(5))
- (set(Puzzle2D.coordinates_diamond(3, offset=(2,2)))
- set(((4,4),))))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
class PentacubesRibbedWall(Pentacubes):
"""
0 solutions?
design by Nick Maeder
"""
width = 7
height = 7
depth = 5
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_rectangle(5, 5, offset=(1,1)))
- (set(Puzzle2D.coordinates_rectangle(3, 3, offset=(2,2)))
- set(((3,3),))))
for i in (1, 3, 5):
layer.update(((0,i), (i,0), (6,i), (i,6)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(5))
return sorted(coords)
class PentacubesGrandPlatform(Pentacubes):
"""
many solutions
design from Kadon's Super Quintillions booklet
"""
width = 9
height = 9
depth = 2
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set(
list(self.coordinates_cuboid(9, 9, 1))
+ list(self.coordinates_cuboid(8, 8, 1, offset=(0,0,1))))
return sorted(coords)
class PentacubesSteppedPyramid1(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
corner_offsets = ((0,0,0), (0,7,0), (7,0,0), (7,7,0))
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
for offset in self.corner_offsets:
coords.difference_update(set(
self.coordinates_cuboid(2, 2, 5, offset=offset)))
return sorted(coords)
class PentacubesSteppedPyramid2(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
corner_offsets = ((2,2,0), (6,2,0), (6,6,0), (2,6,0))
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
hole = Cartesian3DCoordSet(
list(self.coordinates_cuboid(2, 2, 1))
+ [self.coordinate_offset(1, 1, 1, None)])
for i, offset in enumerate(self.corner_offsets):
coords.difference_update(
hole.rotate0(i, 2).translate(offset))
return sorted(coords)
class PentacubesSteppedPyramid3(PentacubesSteppedPyramid2):
"""many solutions"""
corner_offsets = ((2,2,2), (6,2,2), (6,6,2), (2,6,2))
class PentacubesSteppedPyramid4(PentacubesSteppedPyramid2):
"""many solutions"""
corner_offsets = ((1,1,1), (7,1,1), (7,7,1), (1,7,1))
class PentacubesSteppedPyramid5(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
coords.difference_update(set(
self.coordinates_cuboid(9, 1, 1, offset=(0,4,0))))
coords.difference_update(set(
self.coordinates_cuboid(1, 9, 1, offset=(4,0,0))))
coords.difference_update(set(
self.coordinates_cuboid(3, 3, 1, offset=(3,3,0))))
coords.add(self.coordinate_offset(4, 4, 0, None))
return sorted(coords)
class PentacubesSteppedPyramid6(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
for d in (0, 1, 2, 6, 7, 8):
for e in (0, 8):
coords.discard(
self.coordinate_offset(e, d, 0, None))
coords.discard(
self.coordinate_offset(d, e, 0, None))
return sorted(coords)
class PentacubesSteppedPyramid_x1(Pentacubes):
"""0 solutions (inpossible due to corners)"""
width = 9
height = 9
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(5):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
for d in (0, 8):
coords.difference_update(set(
self.coordinates_cuboid(5, 1, 1, offset=(2,d,0))))
coords.difference_update(set(
self.coordinates_cuboid(1, 5, 1, offset=(d,2,0))))
return sorted(coords)
class PentacubesCastle(Pentacubes):
"""
many solutions
design from Andrew Clarke's Poly Pages:
http://www.recmath.com/PolyPages/PolyPages/index.htm?Polycubes.html#pentacubes
"""
width = 7
height = 7
depth = 6
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(
list(self.coordinates_cuboid(7, 7, 2))
+ list(self.coordinates_cuboid(3, 3, 4, offset=(2,2,2))))
for i in (0, 2, 4, 6):
for j in (0, 6):
coords.add(self.coordinate_offset(i, j, 2, None))
coords.add(self.coordinate_offset(j, i, 2, None))
coords.remove(self.coordinate_offset(3, 3, 5, None))
return sorted(coords)
class PentacubesSteppedPyramid11x7_1(Pentacubes):
"""
many solutions
design from `Torsten Sillke's pages [Problems for pentacubes 1992]
<http://www.mathematik.uni-bielefeld.de/~sillke/CONTEST/pentaPRB>`_
"""
width = 11
height = 7
depth = 4
holes = set(((4,3,0), (5,3,0), (6,3,0)))
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set()
for i in range(self.depth):
coords.update(set(self.coordinates_cuboid(
self.width - 2 * i, self.height - 2 * i, 1, offset=(i,i,i))))
coords -= self.holes
return sorted(coords)
class PentacubesSteppedPyramid11x7_2(PentacubesSteppedPyramid11x7_1):
"""many solutions"""
holes = set(((4,3,3), (5,3,3), (6,3,3)))
class PentacubesPanorama(Pentacubes):
"""
many solutions
design from `Torsten Sillke's pages [CFF Contest 36]
<http://www.mathematik.uni-bielefeld.de/~sillke/CONTEST/cff-contest36>`_
"""
width = 5
height = 13
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
part = set()
for i in range(self.depth):
part.update(set((x,y,i) for (x, y) in
Puzzle2D.coordinates_diamond(5 - i, offset=(i-2,i-2))))
part = Cartesian3DCoordSet(part)
part.intersection_update(set(self.coordinates_cuboid(5, 5, 5)))
coords = part.copy()
coords.update(part.translate((0,8,0)))
part.intersection_update(
set(self.coordinates_cuboid(3, 3, 3, offset=(1,1,2))))
coords.update(part.translate((0,4,-2)))
return sorted(coords)
class PentacubesCoolingFins(Pentacubes):
"""
many solutions
design from `Torsten Sillke's pages [10th Pentacube Contest, 1999]
<http://www.mathematik.uni-bielefeld.de/~sillke/CONTEST/penta-contest>`_
"""
width = 6
height = 15
depth = 2
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set(
self.coordinates_cuboid(self.width, self.height, self.depth))
for y in range(1, 14, 2):
coords -= set(self.coordinates_cuboid(5, 1, 1, offset=(1,y,1)))
return sorted(coords)
class PentacubesDiamondTower(Pentacubes):
"""many solutions"""
width = 7
height = 7
depth = 9
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in Puzzle2D.coordinates_diamond(4) for z in range(5))
for i in range(3):
coords.update(set(
self.coordinate_offset(x, y, i+5, None)
for (x, y) in
Puzzle2D.coordinates_diamond(3 - i, offset=(i+1,i+1))))
coords.add(self.coordinate_offset(3, 3, 8, None))
return sorted(coords)
class PentacubesCompoundCross1(Pentacubes):
"""
0 solutions?
design by Nick Maeder
"""
width = 11
height = 11
depth = 5
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(
list(self.coordinates_cuboid(11, 1, 5, offset=(0,5,0)))
+ list(self.coordinates_cuboid(1, 11, 5, offset=(5,0,0)))
+ list(self.coordinates_cuboid(3, 1, 5, offset=(4,2,0)))
+ list(self.coordinates_cuboid(3, 1, 5, offset=(4,8,0)))
+ list(self.coordinates_cuboid(1, 3, 5, offset=(2,4,0)))
+ list(self.coordinates_cuboid(1, 3, 5, offset=(8,4,0))))
return sorted(coords)
class PentacubesCompoundCross2(Pentacubes):
"""0 solutions?"""
width = 11
height = 11
depth = 5
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(
list(self.coordinates_cuboid(11, 1, 5, offset=(0,5,0)))
+ list(self.coordinates_cuboid(1, 11, 5, offset=(5,0,0)))
+ list(self.coordinates_cuboid(3, 1, 5, offset=(4,1,0)))
+ list(self.coordinates_cuboid(3, 1, 5, offset=(4,9,0)))
+ list(self.coordinates_cuboid(1, 3, 5, offset=(1,4,0)))
+ list(self.coordinates_cuboid(1, 3, 5, offset=(9,4,0))))
return sorted(coords)
class PentacubesOctagonalFrame1(Pentacubes):
"""
many solutions
design by Nick Maeder
"""
width = 11
height = 11
depth = 2
hole = (5,5,0)
def coordinates(self):
coords = set(self.coordinates_cuboid(11, 11, 2))
cutout = Cartesian3DCoordSet(
self.coordinate_offset(x, y, 0, None)
for (x,y) in Puzzle2D.coordinates_diamond(3))
for (x, y) in ((-2,-2), (-2,8), (8,-2), (8,8)):
for z in (0,1):
coords -= cutout.translate((x,y,z))
for (x, y) in ((2,1), (2,5), (4,1), (4,5)):
coords -= cutout.translate((x,y,1))
coords -= set(self.coordinates_cuboid(9, 3, 1, offset=(1,4,1)))
coords -= set(self.coordinates_cuboid(3, 9, 1, offset=(4,1,1)))
coords.update(set(self.coordinates_cuboid(3, 3, 1, offset=(4,4,1))))
coords.remove(self.hole)
return sorted(coords)
class PentacubesOctagonalFrame2(PentacubesOctagonalFrame1):
"""many solutions"""
hole = (5,5,1)
class PentacubesCornerSlant(Pentacubes):
"""
0 solutions?
design by Nick Maeder
"""
width = 9
height = 9
depth = 9
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set(self.coordinates_cuboid(9, 9, 1))
for (tx, ty) in Puzzle2D.coordinates_triangle(8):
coords.add(self.coordinate_offset(tx, 0, ty + 1, None))
coords.add(self.coordinate_offset(0, tx, ty + 1, None))
return sorted(coords)
class PentacubesTruncatedTetrahedron(Pentacubes):
"""
many solutions
design by Michael Reid via Torsten Sillke
(http://www.mathematik.uni-bielefeld.de/~sillke/PENTA/s3sym-5c)
This puzzle has a parity imbalance of 31, approaching the maximum of 33.
"""
width = 9
height = 9
depth = 9
def coordinates(self):
coords = set()
for coord in self.coordinates_cuboid(self.width, self.height,
self.depth):
(x, y, z) = coord
if 16 <= (x + y + z) < 21:
coords.add(coord)
return sorted(coords)
def customize_piece_data(self):
"""
Restrict the P piece to one plane, no flips, to account for symmetry.
"""
Pentacubes.customize_piece_data(self)
self.piece_data['P5'][-1]['axes'] = None
self.piece_data['P5'][-1]['flips'] = None
class PentacubesHollowTetrahedron(Pentacubes):
"""
many solutions
design by Michael Reid via Torsten Sillke
(http://www.mathematik.uni-bielefeld.de/~sillke/PENTA/s3sym-5c)
"""
width = 9
height = 9
depth = 9
def coordinates(self):
coords = set()
for coord in self.coordinates_cuboid(self.width, self.height,
self.depth):
(x, y, z) = coord
total = x + y + z
if total >= 16 and (total < 18 or x == 8 or y == 8 or z == 8):
coords.add(coord)
return sorted(coords)
def customize_piece_data(self):
"""
Restrict the P piece to one plane, no flips, to account for symmetry.
"""
Pentacubes.customize_piece_data(self)
self.piece_data['P5'][-1]['axes'] = None
self.piece_data['P5'][-1]['flips'] = None
class PentacubesStackedTriangles1(Pentacubes):
"""many solutions"""
width = 9
height = 9
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for d in range(self.depth):
coords.update(set(self.coordinates_triangular_prism(
self.width - d, 1, offset=(0,0,d))))
return sorted(coords)
class PentacubesStackedTriangles2(PentacubesStackedTriangles1):
transform_solution_matrix = Pentacubes.transform_solution_matrix
def coordinates(self):
coords = set()
for d in range(self.depth):
coords.update(set(self.coordinates_triangular_prism(
self.width - d, 1, offset=(0,d,d))))
return sorted(coords)
class PentacubesStackedTriangles3(PentacubesStackedTriangles1):
transform_solution_matrix = Pentacubes.transform_solution_matrix
def coordinates(self):
coords = set()
for d in range(self.depth):
coords.update(set(self.coordinates_triangular_prism(
self.width - d, 1, offset=((d + 1) / 2,(d / 2),d))))
return sorted(coords)
class Pentacubes4Cubes1(Pentacubes):
"""
many solutions
Suggested by Donald Knuth (2016-10-29, private correspondence):
I'm still working on two more exercises about pentacubes. [One of them
is an interesting shape that you don't seem to have yet: It consists
of a 4x4x4 cube with three 3x3x3 cubes attached --- taking advantage
of the remarkable fact that 29 times 5 equals 4^3 + 3^4! I found it in
the pentacube book that Sivy Farhi published in the 70s. I still
haven't seen the book by Kuenzell; it might well be in there too.]
"""
width = 7
height = 7
depth = 7
_offsets = ((4, 0, 0), (0, 4, 0), (0, 0, 4))
def coordinates(self):
coords = set(self.coordinates_cuboid(4, 4, 4))
for offset in self._offsets:
coords.update(set(self.coordinates_cuboid(3, 3, 3, offset=offset)))
return sorted(coords)
def customize_piece_data(self):
"""
Restrict the X piece to one orientation to account for symmetry.
"""
Pentacubes.customize_piece_data(self)
self.piece_data['X5'][-1]['axes'] = None
class Pentacubes4Cubes2(Pentacubes4Cubes1):
"""many solutions"""
_offsets = ((4, 1, 1), (1, 4, 1), (1, 1, 4))
class Pentacubes4Cubes3(Pentacubes4Cubes1):
"""many solutions"""
_offsets = ((4, 0, 1), (1, 4, 0), (0, 1, 4))
class PentacubesPlus2x5x15(PentacubesPlus):
"""many solutions"""
width = 15
height = 5
depth = 2
transform_solution_matrix = Puzzle3D.swap_yz_transform
class PentacubesPlus2x3x25(PentacubesPlus):
"""many solutions"""
width = 25
height = 3
depth = 2
transform_solution_matrix = Puzzle3D.swap_yz_transform
class PentacubesPlus3x5x10(PentacubesPlus):
"""many solutions"""
width = 10
height = 5
depth = 3
transform_solution_matrix = Puzzle3D.swap_yz_transform
class PentacubesPlus5x5x6(PentacubesPlus):
"""many solutions"""
width = 5
height = 6
depth = 5
class PentacubesPlus11x11x11OctahedralPlanes(PentacubesPlus):
"""
0 solutions?
Even/odd imbalance: 30.
"""
width = 11
height = 11
depth = 11
def coordinates(self):
coords = set()
for i in range(self.depth):
for j in range(self.height):
if i == j == 5:
continue
if abs(i - 5) + abs(j - 5) < 6:
coords.add((i, j, 5))
coords.add((i, 5, j))
coords.add((5, i, j))
return sorted(coords)
class PentacubesPlusDiamondPrism(PentacubesPlus):
"""many solutions"""
width = 7
height = 7
depth = 6
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in Puzzle2D.coordinates_diamond(4) for z in range(6))
return sorted(coords)
class PentacubesPlusDiagonalWall1(PentacubesPlus):
"""many solutions"""
width = 15
height = 15
depth = 2
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(15))
- set(Puzzle2D.coordinates_triangle(9)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(2))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall2(PentacubesPlus):
"""many solutions"""
width = 8
height = 8
depth = 5
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(8))
- set(Puzzle2D.coordinates_triangle(3)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(5))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall3(PentacubesPlus):
"""many solutions"""
width = 12
height = 12
depth = 2
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(12))
- set(Puzzle2D.coordinates_triangle(2)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(2))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall4(PentacubesPlus):
"""many solutions"""
width = 12
height = 12
depth = 3
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(12))
- set(Puzzle2D.coordinates_triangle(7)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall5(PentacubesPlus):
"""many solutions"""
width = 9
height = 9
depth = 5
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(9))
- set(Puzzle2D.coordinates_triangle(5)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall6(PentacubesPlus):
"""many solutions"""
width = 11
height = 11
depth = 5
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(11))
- set(Puzzle2D.coordinates_triangle(8)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall7(PentacubesPlus):
"""many solutions"""
width = 7
height = 7
depth = 6
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(7))
- set(Puzzle2D.coordinates_triangle(2)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusDiagonalWall8(PentacubesPlus):
"""many solutions"""
width = 6
height = 6
depth = 10
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(6))
- set(Puzzle2D.coordinates_triangle(3)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlus5x5x10Steps(PentacubesPlus):
"""many solutions"""
width = 10
height = 5
depth = 5
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for y, z in Puzzle2D.coordinates_triangle(5) for x in range(10))
return sorted(coords)
class PentacubesPlus9x5x6Steps(PentacubesPlus):
"""many solutions"""
width = 6
height = 9
depth = 5
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for y, z in Puzzle2D.coordinates_double_triangle(5)
for x in range(6))
return sorted(coords)
class PentacubesPlusDiagonalBlock1(PentacubesPlus):
"""many solutions"""
width = 9
height = 9
depth = 2
def coordinates(self):
layer = set(
(x, y) for (x, y) in Puzzle2D.coordinates_triangle(14)
if x < self.width and y < self.height)
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class PentacubesPlusSteppedPyramid1(PentacubesPlus):
"""many solutions"""
width = 9
height = 9
depth = 3
holes = set(((4,4,2), (3,4,2), (4,3,2), (5,4,2), (4,5,2)))
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
coords = set()
for i in range(3):
coords.update(set(
self.coordinates_cuboid(9 - 2 * i, 9 - 2 * i, 1,
offset=(i,i,i))))
coords -= self.holes
return sorted(coords)
class PentacubesPlusSteppedPyramid2(PentacubesPlusSteppedPyramid1):
"""many solutions"""
holes = set(((4,4,2), (2,2,2), (6,2,2), (2,6,2), (6,6,2)))
class NonConvexPentacubes2x5x14(NonConvexPentacubes):
"""many solutions"""
width = 14
height = 5
depth = 2
transform_solution_matrix = Puzzle3D.swap_yz_transform
class NonConvexPentacubes2x7x10(NonConvexPentacubes):
"""many solutions"""
width = 10
height = 7
depth = 2
transform_solution_matrix = Puzzle3D.swap_yz_transform
class NonConvexPentacubes4x5x7(NonConvexPentacubes):
"""many solutions"""
width = 7
height = 5
depth = 4
transform_solution_matrix = Puzzle3D.swap_yz_transform
class NonConvexPentacubesZigZag1(NonConvexPentacubes):
"""many solutions"""
width = 18
height = 19
depth = 2
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if 8 <= (int(x/2) + int(y/2)) <= 9:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesZigZag2(NonConvexPentacubes):
"""many solutions"""
width = 20
height = 18
depth = 2
check_for_duplicates = True
duplicate_conditions = ({'x_reversed': True, 'y_reversed': True},)
def coordinates(self):
ends = set([(0,16), (19,1)])
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if (x,y) in ends:
continue
if 8 <= (int(x/2) + int(y/2)) <= 9:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesDiagonalWall(NonConvexPentacubes):
"""many solutions"""
width = 19
height = 19
depth = 2
def coordinates(self):
for z in range(self.depth):
for y in range(self.height):
for x in range(self.width):
if 18 <= (x + y) <= 21:
yield (x, y, z)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesDiagonalWall2(NonConvexPentacubes):
"""many solutions"""
width = 9
height = 9
depth = 4
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(9))
- set(Puzzle2D.coordinates_triangle(4)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesDiagonalWall3(NonConvexPentacubes):
"""many solutions"""
width = 13
height = 13
depth = 2
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(13))
- set(Puzzle2D.coordinates_triangle(6)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesDiagonalWall4(NonConvexPentacubes):
"""many solutions"""
width = 8
height = 8
depth = 4
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(8))
- set(((0,0),)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesDiagonalWall5(NonConvexPentacubes):
"""many solutions"""
width = 6
height = 6
depth = 7
def coordinates(self):
layer = (
set(Puzzle2D.coordinates_triangle(6))
- set(((0,0),)))
coords = set(
self.coordinate_offset(x, y, z, None)
for x, y in layer for z in range(self.depth))
return sorted(coords)
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
class NonConvexPentacubesAztecPyramid(NonConvexPentacubes):
"""many solutions"""
width = 10
height = 10
depth = 5
def coordinates(self):
return self.coordinates_aztec_pyramid(5)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class NonConvexPentacubesStackedSquares(NonConvexPentacubes):
"""many solutions"""
width = 7
height = 7
depth = 7
def coordinates(self):
coords = set()
for i in range(7):
coords.update(set(
self.coordinates_cuboid(7 - i, 7 - i, 1, offset=(0,i,i))))
return sorted(coords)
transform_solution_matrix = Puzzle3D.swap_yz_transform
class NonConvexPentacubes4x4x14Steps(PentacubesPlus):
"""0? solutions"""
width = 14
height = 4
depth = 4
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for y, z in Puzzle2D.coordinates_triangle(4) for x in range(14))
return sorted(coords)
class NonConvexPentacubes7x7x5Steps(PentacubesPlus):
"""0? solutions"""
width = 5
height = 7
depth = 7
def coordinates(self):
coords = set(
self.coordinate_offset(x, y, z, None)
for y, z in Puzzle2D.coordinates_triangle(7) for x in range(5))
return sorted(coords)
class NonConvexPentacubesSteppedPyramid9x8(NonConvexPentacubes):
"""many solutions"""
width = 9
height = 8
depth = 5
holes = set()
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set()
for i in range(self.depth):
coords.update(set(self.coordinates_cuboid(
self.width - 2 * i, self.height - 2 * i, 1, offset=(i,i,i))))
coords -= self.holes
return sorted(coords)
class NonConvexPentacubesSteppedPyramid13x6(
NonConvexPentacubesSteppedPyramid9x8):
"""many solutions"""
width = 13
height = 6
depth = 3
class NonConvexPentacubesDiamondPyramid1(NonConvexPentacubes):
"""
many solutions
design from `Torsten Sillke's pages [1992]
<http://www.mathematik.uni-bielefeld.de/~sillke/CONTEST/pentaPRB>`_
"""
width = 13
height = 13
depth = 4
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set()
for i in range(self.depth):
coords.update(set(
self.coordinate_offset(x, y, i, None)
for (x, y) in
Puzzle2D.coordinates_diamond(7 - 2 * i, offset=(2*i,2*i))))
return sorted(coords)
class NonConvexPentacubes5x5x8CrystalTower(NonConvexPentacubes):
"""many solutions"""
width = 5
height = 5
depth = 8
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
coords = set(self.coordinates_cuboid(5, 5, 4))
for i in range(4):
coords.update(set(
self.coordinate_offset(x, y, i+4, None)
for (x, y) in
Puzzle2D.coordinates_diamond(4 - i, offset=(i-1,i-1))))
coords.intersection_update(set(self.coordinates_cuboid(5, 5, 8)))
return sorted(coords)
class NonConvexPentacubesOpenBox12x3x5(NonConvexPentacubes):
"""? solutions"""
width = 3
height = 12
depth = 5
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
return self.coordinates_open_box(self.width, self.height, self.depth)
class NonConvexPentacubesOpenBox10x3x6(NonConvexPentacubesOpenBox12x3x5):
"""? solutions"""
width = 3
height = 10
depth = 6
class NonConvexPentacubesRingWall(NonConvexPentacubes):
"""0 solutions"""
width = 6
height = 6
depth = 7
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
return self.coordinates_ring_wall(self.width, self.height, self.depth)
class NonConvexPentacubesRingWall1(NonConvexPentacubes):
"""0 solutions"""
width = 7
height = 5
depth = 7
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
return self.coordinates_ring_wall(self.width, self.height, self.depth)
class NonConvexPentacubesRingWall8x4x7(NonConvexPentacubes):
"""1+ solutions"""
width = 4
height = 8
depth = 7
transform_solution_matrix = Puzzle3D.cycle_xyz_transform
def coordinates(self):
return self.coordinates_ring_wall(self.width, self.height, self.depth)
class NonConvexPentacubesRingWall3(NonConvexPentacubes):
"""0 solutions"""
width = 9
height = 3
depth = 7
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
return self.coordinates_ring_wall(self.width, self.height, self.depth)
class DorianCube(Pentacubes3x3x3):
"""
Many solutions.
This is a 5x5x5 cube constructed from the 25 pentacubes that each fit
within a 3x3x3 box (omits the I, L, N, and Y pentacubes).
Designed by Joseph Dorrie. Referenced on p. 41 of `Knotted Doughnuts and
Other Mathematical Entertainments`, by Martin Garder, 1986.
"""
width = 5
height = 5
depth = 5
def customize_piece_data(self):
"""Restrict the J25 piece to a single aspect."""
Pentacubes3x3x3.customize_piece_data(self)
self.piece_data['J25'][-1]['rotations'] = None
self.piece_data['J25'][-1]['flips'] = None
self.piece_data['J25'][-1]['axes'] = None
class DorianCube5Towers(Pentacubes3x3x3):
"""
The Dorian Cube subdivided into 5 towers: 4 P-pentomino shaped towers
around a central X-pentomino tower.
Designed by Torsten Sillke.
"""
width = 5
height = 5
depth = 5
tower_bases = (
((0,0), (0,1), (0,2), (1,0), (1,1)), # lower-left P
((0,3), (0,4), (1,3), (1,4), (2,4)), # upper-left P
((2,0), (3,0), (3,1), (4,0), (4,1)), # lower-right P
((3,3), (3,4), (4,2), (4,3), (4,4)), # upper-right P
((1,2), (2,1), (2,2), (2,3), (3,2))) # central X
def build_regular_matrix(self, keys, solution_coords=None):
tower_coords = [
set((x,y,z) for z in range(self.width) for (x,y) in base_coords)
for base_coords in self.tower_bases]
for key in keys:
for coords, aspect in self.pieces[key]:
for z in range(self.depth - aspect.bounds[2]):
for y in range(self.height - aspect.bounds[1]):
for x in range(self.width - aspect.bounds[0]):
translated = aspect.translate((x, y, z))
for solution_coords in tower_coords:
if translated.issubset(solution_coords):
self.build_matrix_row(key, translated)
break
class DorianCube5TowersExploded(DorianCube5Towers):
width = 7
height = 7
depth = 5
tower_bases = tuple(
tuple((_x+_dx,_y+_dy) for (_x, _y) in DorianCube5Towers.tower_bases[_i])
for (_i, (_dx, _dy)) in enumerate(((0,0), (0,2), (2,0), (2,2), (1,1))))
transform_solution_matrix = Puzzle3D.swap_yz_transform
def coordinates(self):
return sorted((x,y,z) for z in range(self.depth)
for base_coords in self.tower_bases
for (x,y) in base_coords)
| 25.585414
| 82
| 0.572535
|
4a168f1d0efe38704b80d6a8934625ed67b53d9a
| 332
|
py
|
Python
|
pypy/module/binascii/interp_crc32.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/binascii/interp_crc32.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/binascii/interp_crc32.py
|
Qointum/pypy
|
c0ed88efbc135a75a535f4534ca1f3baf0bf39d8
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rarithmetic import r_uint
from rpython.rlib import rzipfile
@unwrap_spec(data='bufferstr', oldcrc='truncatedint_w')
def crc32(space, data, oldcrc=0):
"Compute the CRC-32 incrementally."
crc = rzipfile.crc32(data, r_uint(oldcrc))
return space.wrap(crc)
| 33.2
| 55
| 0.771084
|
4a168f41d12c1449570c5f1289a7fa038fd4ae77
| 1,123
|
py
|
Python
|
backend/dvadmin/system/management/commands/init.py
|
hojongss3ch8j/marcglasbergo
|
e03a3924210237d4e07cc116cf1d22ab5a1f2280
|
[
"Apache-2.0"
] | 60
|
2021-07-22T06:39:11.000Z
|
2022-03-30T12:45:46.000Z
|
backend/dvadmin/system/management/commands/init.py
|
hojongss3ch8j/marcglasbergo
|
e03a3924210237d4e07cc116cf1d22ab5a1f2280
|
[
"Apache-2.0"
] | 1
|
2021-09-26T21:36:28.000Z
|
2021-09-26T21:36:28.000Z
|
backend/dvadmin/system/management/commands/init.py
|
hojongss3ch8j/marcglasbergo
|
e03a3924210237d4e07cc116cf1d22ab5a1f2280
|
[
"Apache-2.0"
] | 24
|
2021-07-19T15:07:33.000Z
|
2022-03-23T09:00:15.000Z
|
import logging
from django.core.management.base import BaseCommand
from application import settings
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
项目初始化命令: python manage.py init
"""
def add_arguments(self, parser):
parser.add_argument('init_name', nargs='*', type=str, )
parser.add_argument('-y', nargs='*')
parser.add_argument('-Y', nargs='*')
parser.add_argument('-n', nargs='*')
parser.add_argument('-N', nargs='*')
def handle(self, *args, **options):
reset = False
if isinstance(options.get('y'), list) or isinstance(options.get('Y'), list):
reset = True
if isinstance(options.get('n'), list) or isinstance(options.get('N'), list):
reset = False
print(f"正在准备初始化数据,{'如有初始化数据,将会不做操作跳过' if not reset else '初始数据将会先删除后新增'}...")
for app in settings.INSTALLED_APPS:
try:
exec(f"""
from {app}.initialize import main
main(reset={reset})
""")
except ModuleNotFoundError:
pass
print("初始化数据完成!")
| 28.075
| 84
| 0.590383
|
4a168f8d457b70069ff2aaabe0b6b3e0f5dc582d
| 13,758
|
py
|
Python
|
tests/test_dse.py
|
jrt54/total_variation
|
6611bcddc0e8fe5a49414b004e5b9da9dec4fd6a
|
[
"MIT"
] | 1
|
2018-10-02T00:36:53.000Z
|
2018-10-02T00:36:53.000Z
|
tests/test_dse.py
|
jrt54/total_variation
|
6611bcddc0e8fe5a49414b004e5b9da9dec4fd6a
|
[
"MIT"
] | null | null | null |
tests/test_dse.py
|
jrt54/total_variation
|
6611bcddc0e8fe5a49414b004e5b9da9dec4fd6a
|
[
"MIT"
] | null | null | null |
from conftest import EVAL
from sympy import sin # noqa
import numpy as np
import pytest
from conftest import x, y, z, time, skipif_yask # noqa
from devito import Eq # noqa
from devito.ir import Stencil, Expression, FindNodes, TemporariesGraph, clusterize
from devito.dse import rewrite, common_subexprs_elimination, collect
from devito.symbolics import (xreplace_constrained, iq_timeinvariant, iq_timevarying,
estimate_cost, pow_to_mul)
from devito.types import Scalar
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import demo_model, RickerSource, GaborSource, Receiver
from examples.seismic.tti import AnisotropicWaveSolver
# Acoustic
def run_acoustic_forward(dse=None):
shape = (50, 50, 50)
spacing = (10., 10., 10.)
nbpml = 10
nrec = 101
t0 = 0.0
tn = 250.0
# Create two-layer model from preset
model = demo_model(preset='layers-isotropic', vp_top=3., vp_bottom=4.5,
spacing=spacing, shape=shape, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time_values = np.linspace(t0, tn, nt) # Discretized time axis
# Define source geometry (center of domain, just below surface)
src = RickerSource(name='src', grid=model.grid, f0=0.01, time=time_values)
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = 20.
# Define receiver geometry (same as source, but spread across x)
rec = Receiver(name='nrec', grid=model.grid, ntime=nt, npoint=nrec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solver = AcousticWaveSolver(model, source=src, receiver=rec, dse=dse, dle='basic')
rec, u, _ = solver.forward(save=False)
return u, rec
@skipif_yask
def test_acoustic_rewrite_basic():
ret1 = run_acoustic_forward(dse=None)
ret2 = run_acoustic_forward(dse='basic')
assert np.allclose(ret1[0].data, ret2[0].data, atol=10e-5)
assert np.allclose(ret1[1].data, ret2[1].data, atol=10e-5)
# TTI
def tti_operator(dse=False, space_order=4):
nrec = 101
t0 = 0.0
tn = 250.
nbpml = 10
shape = (50, 50, 50)
spacing = (20., 20., 20.)
# Two layer model for true velocity
model = demo_model('layers-tti', ratio=3, nbpml=nbpml,
shape=shape, spacing=spacing)
# Derive timestepping from model spacing
# Derive timestepping from model spacing
dt = model.critical_dt
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time_values = np.linspace(t0, tn, nt) # Discretized time axis
# Define source geometry (center of domain, just below surface)
src = GaborSource(name='src', grid=model.grid, f0=0.01, time=time_values)
src.coordinates.data[0, :] = np.array(model.domain_size) * .5
src.coordinates.data[0, -1] = model.origin[-1] + 2 * spacing[-1]
# Define receiver geometry (spread across x, lust below surface)
rec = Receiver(name='nrec', grid=model.grid, ntime=nt, npoint=nrec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
return AnisotropicWaveSolver(model, source=src, receiver=rec,
time_order=2, space_order=space_order, dse=dse)
@pytest.fixture(scope="session")
def tti_nodse():
operator = tti_operator(dse=None)
rec, u, v, _ = operator.forward()
return v, rec
@skipif_yask
def test_tti_clusters_to_graph():
solver = tti_operator()
nodes = FindNodes(Expression).visit(solver.op_fwd('centered').elemental_functions +
(solver.op_fwd('centered'),))
expressions = [n.expr for n in nodes]
stencils = solver.op_fwd('centered')._retrieve_stencils(expressions)
clusters = clusterize(expressions, stencils)
assert len(clusters) == 3
main_cluster = clusters[0]
n_output_tensors = len(main_cluster.trace)
clusters = rewrite([main_cluster], mode='basic')
assert len(clusters) == 1
main_cluster = clusters[0]
graph = main_cluster.trace
assert len([v for v in graph.values() if v.is_tensor]) == n_output_tensors # u and v
assert all(v.reads or v.readby for v in graph.values())
@skipif_yask
def test_tti_rewrite_basic(tti_nodse):
operator = tti_operator(dse='basic')
rec, u, v, _ = operator.forward()
assert np.allclose(tti_nodse[0].data, v.data, atol=10e-3)
assert np.allclose(tti_nodse[1].data, rec.data, atol=10e-3)
@skipif_yask
def test_tti_rewrite_advanced(tti_nodse):
operator = tti_operator(dse='advanced')
rec, u, v, _ = operator.forward()
assert np.allclose(tti_nodse[0].data, v.data, atol=10e-1)
assert np.allclose(tti_nodse[1].data, rec.data, atol=10e-1)
@skipif_yask
def test_tti_rewrite_speculative(tti_nodse):
operator = tti_operator(dse='speculative')
rec, u, v, _ = operator.forward()
assert np.allclose(tti_nodse[0].data, v.data, atol=10e-1)
assert np.allclose(tti_nodse[1].data, rec.data, atol=10e-1)
@skipif_yask
def test_tti_rewrite_aggressive(tti_nodse):
operator = tti_operator(dse='aggressive')
rec, u, v, _ = operator.forward()
assert np.allclose(tti_nodse[0].data, v.data, atol=10e-1)
assert np.allclose(tti_nodse[1].data, rec.data, atol=10e-1)
@skipif_yask
@pytest.mark.parametrize('kernel,space_order,expected', [
('shifted', 8, 355), ('shifted', 16, 811),
('centered', 8, 168), ('centered', 16, 300)
])
def test_tti_rewrite_aggressive_opcounts(kernel, space_order, expected):
operator = tti_operator(dse='aggressive', space_order=space_order)
_, _, _, summary = operator.forward(kernel=kernel, save=False)
assert summary['main'].ops == expected
# DSE manipulation
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
# simple
(['Eq(ti1, 4.)', 'Eq(ti0, 3.)', 'Eq(tu, ti0 + ti1 + 5.)'],
['ti0[x, y, z] + ti1[x, y, z]']),
# more ops
(['Eq(ti1, 4.)', 'Eq(ti0, 3.)', 'Eq(t0, 0.2)', 'Eq(t1, t0 + 2.)',
'Eq(tw, 2. + ti0*t1)', 'Eq(tu, (ti0*ti1*t0) + (ti1*tv) + (t1 + ti1)*tw)'],
['t1*ti0[x, y, z]', 't1 + ti1[x, y, z]', 't0*ti0[x, y, z]*ti1[x, y, z]']),
# wrapped
(['Eq(ti1, 4.)', 'Eq(ti0, 3.)', 'Eq(t0, 0.2)', 'Eq(t1, t0 + 2.)', 'Eq(tv, 2.4)',
'Eq(tu, ((ti0*ti1*t0)*tv + (ti0*ti1*tv)*t1))'],
['t0*ti0[x, y, z]*ti1[x, y, z]', 't1*ti0[x, y, z]*ti1[x, y, z]']),
])
def test_xreplace_constrained_time_invariants(tu, tv, tw, ti0, ti1, t0, t1,
exprs, expected):
exprs = EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1)
make = lambda i: Scalar(name='r%d' % i).indexify()
processed, found = xreplace_constrained(exprs, make,
iq_timeinvariant(TemporariesGraph(exprs)),
lambda i: estimate_cost(i) > 0)
assert len(found) == len(expected)
assert all(str(i.rhs) == j for i, j in zip(found, expected))
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
# simple
(['Eq(ti0, 3.)', 'Eq(tv, 2.4)', 'Eq(tu, tv + 5. + ti0)'],
['tv[t, x, y, z] + 5.0']),
# more ops
(['Eq(tv, 2.4)', 'Eq(tw, tv*2.3)', 'Eq(ti1, 4.)', 'Eq(ti0, 3. + ti1)',
'Eq(tu, tv*tw*4.*ti0 + ti1*tv)'],
['4.0*tv[t, x, y, z]*tw[t, x, y, z]']),
# wrapped
(['Eq(tv, 2.4)', 'Eq(tw, tv*tw*2.3)', 'Eq(ti1, 4.)', 'Eq(ti0, 3. + ti1)',
'Eq(tu, ((tv + 4.)*ti0*ti1 + (tv + tw)/3.)*ti1*t0)'],
['tv[t, x, y, z] + 4.0',
'0.333333333333333*tv[t, x, y, z] + 0.333333333333333*tw[t, x, y, z]']),
])
def test_xreplace_constrained_time_varying(tu, tv, tw, ti0, ti1, t0, t1,
exprs, expected):
exprs = EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1)
make = lambda i: Scalar(name='r%d' % i).indexify()
processed, found = xreplace_constrained(exprs, make,
iq_timevarying(TemporariesGraph(exprs)),
lambda i: estimate_cost(i) > 0)
assert len(found) == len(expected)
assert all(str(i.rhs) == j for i, j in zip(found, expected))
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
# simple
(['Eq(tu, (tv + tw + 5.)*(ti0 + ti1) + (t0 + t1)*(ti0 + ti1))'],
['ti0[x, y, z] + ti1[x, y, z]',
'r0*(t0 + t1) + r0*(tv[t, x, y, z] + tw[t, x, y, z] + 5.0)']),
# across expressions
(['Eq(tu, tv*4 + tw*5 + tw*5*t0)', 'Eq(tv, tw*5)'],
['5*tw[t, x, y, z]', 'r0 + 5*t0*tw[t, x, y, z] + 4*tv[t, x, y, z]', 'r0']),
# intersecting
pytest.mark.xfail((['Eq(tu, ti0*ti1 + ti0*ti1*t0 + ti0*ti1*t0*t1)'],
['ti0*ti1', 'r0', 'r0*t0', 'r0*t0*t1'])),
])
def test_common_subexprs_elimination(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected):
make = lambda i: Scalar(name='r%d' % i).indexify()
processed = common_subexprs_elimination(EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1),
make)
assert len(processed) == len(expected)
assert all(str(i.rhs) == j for i, j in zip(processed, expected))
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
(['Eq(t0, 3.)', 'Eq(t1, 7.)', 'Eq(ti0, t0*3. + 2.)', 'Eq(ti1, t1 + t0 + 1.5)',
'Eq(tv, (ti0 + ti1)*t0)', 'Eq(tw, (ti0 + ti1)*t1)',
'Eq(tu, (tv + tw + 5.)*(ti0 + ti1) + (t0 + t1)*(ti0 + ti1))'],
'{tu: {tu, tv, tw, ti0, ti1, t0, t1}, tv: {ti0, ti1, t0, tv},\
tw: {ti0, ti1, t1, tw}, ti0: {ti0, t0}, ti1: {ti1, t1, t0}, t0: {t0}, t1: {t1}}'),
])
def test_graph_trace(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected):
g = TemporariesGraph(EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1))
mapper = eval(expected)
for i in [tu, tv, tw, ti0, ti1, t0, t1]:
assert set([j.lhs for j in g.trace(i)]) == mapper[i]
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
# trivial
(['Eq(t0, 1.)', 'Eq(t1, fa[x] + fb[x])'],
'{t0: False, t1: False}'),
# trivial
(['Eq(t0, 1)', 'Eq(t1, fa[t0] + fb[x])'],
'{t0: True, t1: False}'),
# simple
(['Eq(t0, 1)', 'Eq(t1, fa[t0*4 + 1] + fb[x])'],
'{t0: True, t1: False}'),
# two-steps
(['Eq(t0, 1.)', 'Eq(t1, t0 + 4)', 'Eq(t2, fa[t1*4 + 1] + fb[x])'],
'{t0: False, t1: True, t2: False}'),
# indirect
pytest.mark.xfail((['Eq(t0, 1)', 'Eq(t1, fa[fb[t0]] + fb[x])'],
'{t0: True, t1: False}')),
])
def test_graph_isindex(fa, fb, fc, t0, t1, t2, exprs, expected):
g = TemporariesGraph(EVAL(exprs, fa, fb, fc, t0, t1, t2))
mapper = eval(expected)
for k, v in mapper.items():
assert g.is_index(k) == v
@skipif_yask
@pytest.mark.parametrize('expr,expected', [
('2*fa[x] + fb[x]', '2*fa[x] + fb[x]'),
('fa[x]**2', 'fa[x]*fa[x]'),
('fa[x]**2 + fb[x]**3', 'fa[x]*fa[x] + fb[x]*fb[x]*fb[x]'),
('3*fa[x]**4', '3*(fa[x]*fa[x]*fa[x]*fa[x])'),
('fa[x]**2', 'fa[x]*fa[x]'),
('1/(fa[x]**2)', 'fa[x]**(-2)'),
('1/(fa[x] + fb[x])', '1/(fa[x] + fb[x])'),
('3*sin(fa[x])**2', '3*(sin(fa[x])*sin(fa[x]))'),
])
def test_pow_to_mul(fa, fb, expr, expected):
assert str(pow_to_mul(eval(expr))) == expected
@skipif_yask
@pytest.mark.parametrize('exprs,expected', [
# none (different distance)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x+1] + fb[x])'],
{'fa[x] + fb[x]': None, 'fa[x+1] + fb[x]': None}),
# none (different dimension)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x] + fb[y])'],
{'fa[x] + fb[x]': None, 'fa[x] + fb[y]': None}),
# none (different operation)
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x] - fb[x])'],
{'fa[x] + fb[x]': None, 'fa[x] - fb[x]': None}),
# simple
(['Eq(t0, fa[x] + fb[x])', 'Eq(t1, fa[x+1] + fb[x+1])', 'Eq(t2, fa[x-1] + fb[x-1])'],
{'fa[x] + fb[x]': Stencil([(x, {-1, 0, 1})])}),
# 2D simple
(['Eq(t0, fc[x,y] + fd[x,y])', 'Eq(t1, fc[x+1,y+1] + fd[x+1,y+1])'],
{'fc[x,y] + fd[x,y]': Stencil([(x, {0, 1}), (y, {0, 1})])}),
# 2D with stride
(['Eq(t0, fc[x,y] + fd[x+1,y+2])', 'Eq(t1, fc[x+1,y+1] + fd[x+2,y+3])'],
{'fc[x,y] + fd[x+1,y+2]': Stencil([(x, {0, 1}), (y, {0, 1})])}),
# complex (two 2D aliases with stride inducing relaxation)
(['Eq(t0, fc[x,y] + fd[x+1,y+2])', 'Eq(t1, fc[x+1,y+1] + fd[x+2,y+3])',
'Eq(t2, fc[x-2,y-2]*3. + fd[x+2,y+2])', 'Eq(t3, fc[x-4,y-4]*3. + fd[x,y])'],
{'fc[x,y] + fd[x+1,y+2]': Stencil([(x, {-1, 0, 1}), (y, {-1, 0, 1})]),
'3.*fc[x-3,y-3] + fd[x+1,y+1]': Stencil([(x, {-1, 0, 1}), (y, {-1, 0, 1})])}),
])
def test_collect_aliases(fa, fb, fc, fd, t0, t1, t2, t3, exprs, expected):
scope = [fa, fb, fc, fd, t0, t1, t2, t3]
mapper = dict([(EVAL(k, *scope), v) for k, v in expected.items()])
_, aliases = collect(EVAL(exprs, *scope))
for k, v in aliases.items():
assert k in mapper
assert (len(v.aliased) == 1 and mapper[k] is None) or v.anti_stencil == mapper[k]
@skipif_yask
@pytest.mark.parametrize('expr,expected', [
('Eq(t0, t1)', 0),
('Eq(t0, fa[x] + fb[x])', 1),
('Eq(t0, fa[x + 1] + fb[x - 1])', 1),
('Eq(t0, fa[fb[x+1]] + fa[x])', 1),
('Eq(t0, fa[fb[x+1]] + fc[x+2, y+1])', 1),
('Eq(t0, t1*t2)', 1),
('Eq(t0, 2.*t0*t1*t2)', 3),
('Eq(t0, cos(t1*t2))', 2),
('Eq(t0, 2.*t0*t1*t2 + t0*fa[x+1])', 5),
('Eq(t0, (2.*t0*t1*t2 + t0*fa[x+1])*3. - t0)', 7),
('[Eq(t0, (2.*t0*t1*t2 + t0*fa[x+1])*3. - t0), Eq(t0, cos(t1*t2))]', 9),
])
def test_estimate_cost(fa, fb, fc, t0, t1, t2, expr, expected):
# Note: integer arithmetic isn't counted
assert estimate_cost(EVAL(expr, fa, fb, fc, t0, t1, t2)) == expected
| 38.75493
| 89
| 0.566071
|
4a168fb77dc07945c99df32b9a259bbc17575dc8
| 608
|
py
|
Python
|
ENCODN/FRAMES/CODONS.py
|
akshitadixit/ENCODN
|
7b4ecaba10314f9f59f53e9b479016b21f8b632b
|
[
"RSA-MD"
] | 6
|
2020-10-07T13:09:38.000Z
|
2021-01-16T17:16:51.000Z
|
ENCODN/FRAMES/CODONS.py
|
akshitadixit/ENCODN
|
7b4ecaba10314f9f59f53e9b479016b21f8b632b
|
[
"RSA-MD"
] | 27
|
2020-10-09T09:14:23.000Z
|
2021-01-22T07:16:43.000Z
|
ENCODN/FRAMES/CODONS.py
|
DSC-IIIT-Kalyani/ENCODN
|
62752ac7e368b294ec9613a1f73cb3f1f7c878f5
|
[
"RSA-MD"
] | 14
|
2020-10-07T14:25:59.000Z
|
2021-02-21T16:54:37.000Z
|
from tkinter import *
from tkinter import ttk
t_names = ["CODONS"]
frames = []
fr_names = []
def CODONS(master=None):
s = ttk.Style(master)
s.configure('lefttab.TNotebook',padding=[20,20], tabposition='wn')
nb = ttk.Notebook(master, s='lefttab.TNotebook', width=800, height=570)
nb.grid(row=0, column=0, sticky="e", padx=20, pady=15)
nb.grid_propagate(0)
for i in range(len(t_names)):
frames.append(Frame(nb,bg="#7ad159", width = 750, height=500))
nb.add(frames[i], text=t_names[i])
#calling frame setups here
for i in range(len(fr_names)):
fr_names[i](frames[i])
| 24.32
| 73
| 0.662829
|
4a16914556135e3e400a94736e1cc043c193defc
| 1,072
|
py
|
Python
|
swift/common/middleware/tempurl.py
|
IPVL/swift_test
|
41d1ee3d575036a0788425b20da87aeee00bb605
|
[
"MIT"
] | null | null | null |
swift/common/middleware/tempurl.py
|
IPVL/swift_test
|
41d1ee3d575036a0788425b20da87aeee00bb605
|
[
"MIT"
] | null | null | null |
swift/common/middleware/tempurl.py
|
IPVL/swift_test
|
41d1ee3d575036a0788425b20da87aeee00bb605
|
[
"MIT"
] | null | null | null |
from swift.ipvl.inspect_custom import whoami, whosdaddy
pass # (WIS) print __name__
class TempURL(object):
"""docstring for TempURL"""
def __init__(self, app, conf, methods):
pass # (WIS) print "%s %s (%s -> %s)" % (__name__, self.__class__.__name__, whosdaddy(), whoami())
self.app = app
self.conf = conf
self.methods = methods
def __call__(self, env, start_response):
pass # (WIS) print "%s %s\n" % (self.__class__.__name__, env)
start_response('200 OK', [('Content-Type', 'text/plain')])
return self.__class__.__name__ + " -> " + self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
pass # (WIS) print "%s (%s -> %s)" % (__name__, whosdaddy(), whoami())
conf = global_conf.copy()
conf.update(local_conf)
methods = conf.get('methods', 'GET HEAD PUT POST DELETE').split()
# register_swift_info('tempurl', methods=methods)
return lambda app: TempURL(app, conf, methods=methods)
| 34.580645
| 107
| 0.635261
|
4a1691e0314caa229d15b5bd9f9f1714472ec996
| 1,716
|
py
|
Python
|
homeassistant/components/iaqualink/sensor.py
|
joshwapohlmann/home-assistant
|
f3fa073045a39845e638aacf48e658d1f04e4801
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/iaqualink/sensor.py
|
joshwapohlmann/home-assistant
|
f3fa073045a39845e638aacf48e658d1f04e4801
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
homeassistant/components/iaqualink/sensor.py
|
joshwapohlmann/home-assistant
|
f3fa073045a39845e638aacf48e658d1f04e4801
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Aqualink temperature sensors."""
import logging
from typing import Optional
from iaqualink import AqualinkSensor
from homeassistant.components.sensor import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered sensors."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkSensor(dev))
async_add_entities(devs, True)
class HassAqualinkSensor(AqualinkEntity):
"""Representation of a sensor."""
def __init__(self, dev: AqualinkSensor):
"""Initialize the sensor."""
self.dev = dev
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self.dev.label
@property
def unit_of_measurement(self) -> str:
"""Return the measurement unit for the sensor."""
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def state(self) -> str:
"""Return the state of the sensor."""
return int(self.dev.state) if self.dev.state != "" else None
@property
def device_class(self) -> Optional[str]:
"""Return the class of the sensor."""
if self.dev.name.endswith("_temp"):
return DEVICE_CLASS_TEMPERATURE
return None
| 28.6
| 87
| 0.695804
|
4a1692dbd53bff4c76fa30e905b9f838eff69448
| 91,624
|
py
|
Python
|
sympy/assumptions/tests/test_query.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 1
|
2016-02-22T22:46:50.000Z
|
2016-02-22T22:46:50.000Z
|
sympy/assumptions/tests/test_query.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 7
|
2017-05-01T14:15:32.000Z
|
2017-09-06T20:44:24.000Z
|
sympy/assumptions/tests/test_query.py
|
ovolve/sympy
|
0a15782f20505673466b940454b33b8014a25c13
|
[
"BSD-3-Clause"
] | 1
|
2016-04-06T15:05:37.000Z
|
2016-04-06T15:05:37.000Z
|
from sympy.abc import t, w, x, y, z, n, k, m, p, i
from sympy.assumptions import (ask, AssumptionsContext, Q, register_handler,
remove_handler)
from sympy.assumptions.assume import global_assumptions
from sympy.assumptions.ask import compute_known_facts, single_fact_lookup
from sympy.assumptions.handlers import AskHandler
from sympy.core.add import Add
from sympy.core.numbers import (I, Integer, Rational, oo, pi)
from sympy.core.singleton import S
from sympy.core.power import Pow
from sympy.core.symbol import symbols
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (Abs, im, re, sign)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (
acos, acot, asin, atan, cos, cot, sin, tan)
from sympy.logic.boolalg import Equivalent, Implies, Xor, And, to_cnf
from sympy.utilities.pytest import raises, XFAIL, slow, raises
from sympy.assumptions.assume import assuming
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_int_1():
z = 1
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is True
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is True
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_int_11():
z = 11
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is True
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is True
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is True
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_int_12():
z = 12
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is True
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is True
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is True
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_float_1():
z = 1.0
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
z = 7.2123
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_zero_0():
z = Integer(0)
assert ask(Q.nonzero(z)) is False
assert ask(Q.zero(z)) is True
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is True
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is False
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is True
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_negativeone():
z = Integer(-1)
assert ask(Q.nonzero(z)) is True
assert ask(Q.zero(z)) is False
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is True
assert ask(Q.rational(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is False
assert ask(Q.negative(z)) is True
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is True
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_infinity():
assert ask(Q.commutative(oo)) is True
assert ask(Q.integer(oo)) is False
assert ask(Q.rational(oo)) is False
assert ask(Q.algebraic(oo)) is False
assert ask(Q.real(oo)) is False
assert ask(Q.extended_real(oo)) is True
assert ask(Q.complex(oo)) is False
assert ask(Q.irrational(oo)) is False
assert ask(Q.imaginary(oo)) is False
assert ask(Q.positive(oo)) is True
assert ask(Q.negative(oo)) is False
assert ask(Q.even(oo)) is False
assert ask(Q.odd(oo)) is False
assert ask(Q.finite(oo)) is False
assert ask(Q.prime(oo)) is False
assert ask(Q.composite(oo)) is False
assert ask(Q.hermitian(oo)) is False
assert ask(Q.antihermitian(oo)) is False
def test_neg_infinity():
mm = S.NegativeInfinity
assert ask(Q.commutative(mm)) is True
assert ask(Q.integer(mm)) is False
assert ask(Q.rational(mm)) is False
assert ask(Q.algebraic(mm)) is False
assert ask(Q.real(mm)) is False
assert ask(Q.extended_real(mm)) is True
assert ask(Q.complex(mm)) is False
assert ask(Q.irrational(mm)) is False
assert ask(Q.imaginary(mm)) is False
assert ask(Q.positive(mm)) is False
assert ask(Q.negative(mm)) is True
assert ask(Q.even(mm)) is False
assert ask(Q.odd(mm)) is False
assert ask(Q.finite(mm)) is False
assert ask(Q.prime(mm)) is False
assert ask(Q.composite(mm)) is False
assert ask(Q.hermitian(mm)) is False
assert ask(Q.antihermitian(mm)) is False
def test_nan():
nan = S.NaN
assert ask(Q.commutative(nan)) is True
assert ask(Q.integer(nan)) is False
assert ask(Q.rational(nan)) is False
assert ask(Q.algebraic(nan)) is False
assert ask(Q.real(nan)) is False
assert ask(Q.extended_real(nan)) is False
assert ask(Q.complex(nan)) is False
assert ask(Q.irrational(nan)) is False
assert ask(Q.imaginary(nan)) is False
assert ask(Q.positive(nan)) is False
assert ask(Q.nonzero(nan)) is True
assert ask(Q.zero(nan)) is False
assert ask(Q.even(nan)) is False
assert ask(Q.odd(nan)) is False
assert ask(Q.finite(nan)) is False
assert ask(Q.prime(nan)) is False
assert ask(Q.composite(nan)) is False
assert ask(Q.hermitian(nan)) is False
assert ask(Q.antihermitian(nan)) is False
def test_Rational_number():
r = Rational(3, 4)
assert ask(Q.commutative(r)) is True
assert ask(Q.integer(r)) is False
assert ask(Q.rational(r)) is True
assert ask(Q.real(r)) is True
assert ask(Q.complex(r)) is True
assert ask(Q.irrational(r)) is False
assert ask(Q.imaginary(r)) is False
assert ask(Q.positive(r)) is True
assert ask(Q.negative(r)) is False
assert ask(Q.even(r)) is False
assert ask(Q.odd(r)) is False
assert ask(Q.finite(r)) is True
assert ask(Q.prime(r)) is False
assert ask(Q.composite(r)) is False
assert ask(Q.hermitian(r)) is True
assert ask(Q.antihermitian(r)) is False
r = Rational(1, 4)
assert ask(Q.positive(r)) is True
assert ask(Q.negative(r)) is False
r = Rational(5, 4)
assert ask(Q.negative(r)) is False
assert ask(Q.positive(r)) is True
r = Rational(5, 3)
assert ask(Q.positive(r)) is True
assert ask(Q.negative(r)) is False
r = Rational(-3, 4)
assert ask(Q.positive(r)) is False
assert ask(Q.negative(r)) is True
r = Rational(-1, 4)
assert ask(Q.positive(r)) is False
assert ask(Q.negative(r)) is True
r = Rational(-5, 4)
assert ask(Q.negative(r)) is True
assert ask(Q.positive(r)) is False
r = Rational(-5, 3)
assert ask(Q.positive(r)) is False
assert ask(Q.negative(r)) is True
def test_sqrt_2():
z = sqrt(2)
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_pi():
z = S.Pi
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
z = S.Pi + 1
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
z = 2*S.Pi
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
z = S.Pi ** 2
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
z = (1 + S.Pi) ** 2
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_E():
z = S.Exp1
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is False
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_GoldenRatio():
z = S.GoldenRatio
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is True
assert ask(Q.real(z)) is True
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is True
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is True
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is True
assert ask(Q.antihermitian(z)) is False
def test_I():
z = I
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is True
assert ask(Q.real(z)) is False
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is True
assert ask(Q.positive(z)) is False
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is False
assert ask(Q.antihermitian(z)) is True
z = 1 + I
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is True
assert ask(Q.real(z)) is False
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is False
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is False
assert ask(Q.antihermitian(z)) is False
z = I*(1 + I)
assert ask(Q.commutative(z)) is True
assert ask(Q.integer(z)) is False
assert ask(Q.rational(z)) is False
assert ask(Q.algebraic(z)) is True
assert ask(Q.real(z)) is False
assert ask(Q.complex(z)) is True
assert ask(Q.irrational(z)) is False
assert ask(Q.imaginary(z)) is False
assert ask(Q.positive(z)) is False
assert ask(Q.negative(z)) is False
assert ask(Q.even(z)) is False
assert ask(Q.odd(z)) is False
assert ask(Q.finite(z)) is True
assert ask(Q.prime(z)) is False
assert ask(Q.composite(z)) is False
assert ask(Q.hermitian(z)) is False
assert ask(Q.antihermitian(z)) is False
z = I**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (-I)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (3*I)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is False
z = (1)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (-1)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (1+I)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is False
z = (I)**(I+3)
assert ask(Q.imaginary(z)) is True
assert ask(Q.real(z)) is False
z = (I)**(I+2)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (I)**(2)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
z = (I)**(3)
assert ask(Q.imaginary(z)) is True
assert ask(Q.real(z)) is False
z = (3)**(I)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is False
z = (I)**(0)
assert ask(Q.imaginary(z)) is False
assert ask(Q.real(z)) is True
@slow
def test_bounded1():
x, y, z = symbols('x,y,z')
assert ask(Q.finite(x)) is None
assert ask(Q.finite(x), Q.finite(x)) is True
assert ask(Q.finite(x), Q.finite(y)) is None
assert ask(Q.finite(x), Q.complex(x)) is None
assert ask(Q.finite(x + 1)) is None
assert ask(Q.finite(x + 1), Q.finite(x)) is True
a = x + y
x, y = a.args
# B + B
assert ask(Q.finite(a), Q.finite(x) & Q.finite(y)) is True
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.positive(x)) is True
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.positive(y)) is True
assert ask(Q.finite(a),
Q.finite(x) & Q.finite(y) & Q.positive(x) & Q.positive(y)) is True
assert ask(Q.finite(a),
Q.finite(x) & Q.finite(y) & Q.positive(x) & ~Q.positive(y)) is True
assert ask(Q.finite(a),
Q.finite(x) & Q.finite(y) & ~Q.positive(x) & Q.positive(y)) is True
assert ask(Q.finite(a),
Q.finite(x) & Q.finite(y) & ~Q.positive(x) & ~Q.positive(y)) is True
# B + U
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y)) is False
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.positive(x)) is False
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.positive(y)) is False
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.positive(x) &
Q.positive(y)) is False
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.positive(x) &
~Q.positive(y)) is False
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y) & ~Q.positive(x) &
Q.positive(y)) is False
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y) & ~Q.positive(x) &
~Q.positive(y)) is False
# B + ?
assert ask(Q.finite(a), Q.finite(x)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.positive(x)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.positive(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.positive(x) & Q.positive(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.positive(x) & ~Q.positive(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.positive(x) & Q.positive(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.positive(x) & ~Q.positive(y)) is None
# U + U
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.positive(x)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.positive(y)) is None
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.positive(x) &
Q.positive(y)) is False
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.positive(x) &
~Q.positive(y)) is None
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & ~Q.positive(x) &
Q.positive(y)) is None
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & ~Q.positive(x) &
~Q.positive(y)) is False
# U + ?
assert ask(Q.finite(a), ~Q.finite(y)) is None
assert ask(Q.finite(a), ~Q.finite(y) & Q.positive(x)) is None
assert ask(Q.finite(a), ~Q.finite(y) & Q.positive(y)) is None
assert ask(
Q.finite(a), ~Q.finite(y) & Q.positive(x) & Q.positive(y)) is False
assert ask(
Q.finite(a), ~Q.finite(y) & Q.positive(x) & ~Q.positive(y)) is None
assert ask(
Q.finite(a), ~Q.finite(y) & ~Q.positive(x) & Q.positive(y)) is None
assert ask(
Q.finite(a), ~Q.finite(y) & ~Q.positive(x) & ~Q.positive(y)) is False
# ? + ?
assert ask(Q.finite(a),) is None
assert ask(Q.finite(a), Q.positive(x)) is None
assert ask(Q.finite(a), Q.positive(y)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.positive(y)) is None
assert ask(Q.finite(a), Q.positive(x) & ~Q.positive(y)) is None
assert ask(Q.finite(a), ~Q.positive(x) & Q.positive(y)) is None
assert ask(Q.finite(a), ~Q.positive(x) & ~Q.positive(y)) is None
@slow
def test_bounded2a():
x, y, z = symbols('x,y,z')
a = x + y + z
x, y, z = a.args
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
Q.finite(y) & Q.negative(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & Q.finite(y) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.negative(y) & Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.finite(y) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.finite(x) & Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.positive(y) & Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.negative(z)) is False
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.negative(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.finite(x) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is False
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.finite(x) & Q.negative(y)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.finite(x)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.finite(x) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
Q.finite(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.finite(z)) is True
assert ask(Q.finite(a),
Q.finite(x) & Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.finite(x) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.finite(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) &
Q.positive(y) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.positive(y) & Q.finite(y)) is None
assert ask(Q.finite(a), Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.negative(z)) is False
assert ask(
Q.finite(a), Q.finite(x) & Q.negative(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(x) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
@slow
def test_bounded2b():
x, y, z = symbols('x,y,z')
a = x + y + z
x, y, z = a.args
assert ask(Q.finite(a), Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is False
assert ask(
Q.finite(a), Q.finite(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.negative(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.finite(x)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.positive(z)) is None
assert ask(
Q.finite(a), Q.finite(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & Q.finite(z)) is True
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.positive(y) &
Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.positive(y) & Q.finite(y)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.positive(y) & Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.negative(z)) is False
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.negative(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.positive(x) & Q.finite(x) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is False
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.positive(x) & Q.finite(x) & Q.negative(y)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & Q.finite(x)) is None
assert ask(
Q.finite(a), Q.positive(x) & Q.finite(x) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
Q.finite(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.negative(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) & Q.negative(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.negative(z)) is False
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & Q.negative(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
Q.negative(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.negative(x) & ~Q.finite(x) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & Q.negative(y) & Q.negative(z)) is False
assert ask(
Q.finite(a), Q.negative(x) & ~Q.finite(x) & Q.negative(y)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) & ~Q.finite(x)) is None
assert ask(
Q.finite(a), Q.negative(x) & ~Q.finite(x) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x) &
~Q.finite(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) & Q.negative(y)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), ~Q.finite(x)) is None
assert ask(Q.finite(a), ~Q.finite(x) & Q.positive(z)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & ~Q.finite(x) & Q.positive(y) &
~Q.finite(y) & Q.positive(z) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.positive(x) & ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
~Q.finite(x) & Q.positive(y) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.positive(x) & ~Q.finite(x) &
Q.positive(y) & ~Q.finite(y) & Q.positive(z)) is False
assert ask(Q.finite(a), Q.positive(x) &
~Q.finite(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(
Q.finite(a), Q.positive(x) & ~Q.finite(x) & Q.negative(y)) is None
assert ask(Q.finite(a), Q.positive(x) &
~Q.finite(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) & ~Q.finite(x)) is None
assert ask(
Q.finite(a), Q.positive(x) & ~Q.finite(x) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(x) &
~Q.finite(x) & Q.positive(y) & Q.positive(z)) is False
assert ask(
Q.finite(a), Q.negative(x) & Q.negative(y) & Q.negative(z)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.negative(y)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.negative(y) & Q.positive(z)) is None
assert ask(Q.finite(a), Q.negative(x)) is None
assert ask(Q.finite(a), Q.negative(x) & Q.positive(z)) is None
assert ask(
Q.finite(a), Q.negative(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(Q.finite(a)) is None
assert ask(Q.finite(a), Q.positive(z)) is None
assert ask(Q.finite(a), Q.positive(y) & Q.positive(z)) is None
assert ask(
Q.finite(a), Q.positive(x) & Q.positive(y) & Q.positive(z)) is None
assert ask(Q.finite(2*x)) is None
assert ask(Q.finite(2*x), Q.finite(x)) is True
@slow
def test_bounded3():
x, y, z = symbols('x,y,z')
a = x*y
x, y = a.args
assert ask(Q.finite(a), Q.finite(x) & Q.finite(y)) is True
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y)) is False
assert ask(Q.finite(a), Q.finite(x)) is None
assert ask(Q.finite(a), ~Q.finite(x) & Q.finite(y)) is False
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y)) is False
assert ask(Q.finite(a), ~Q.finite(x)) is None
assert ask(Q.finite(a), Q.finite(y)) is None
assert ask(Q.finite(a), ~Q.finite(y)) is None
assert ask(Q.finite(a)) is None
a = x*y*z
x, y, z = a.args
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & Q.finite(z)) is True
assert ask(
Q.finite(a), Q.finite(x) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) & Q.finite(y)) is None
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & Q.finite(z)) is False
assert ask(
Q.finite(a), Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.finite(x) & Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(x) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(x)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & Q.finite(y) & Q.finite(z)) is False
assert ask(
Q.finite(a), ~Q.finite(x) & Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), ~Q.finite(x) & Q.finite(y)) is None
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & Q.finite(z)) is False
assert ask(
Q.finite(a), ~Q.finite(x) & ~Q.finite(y) & ~Q.finite(z)) is False
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(y)) is None
assert ask(Q.finite(a), ~Q.finite(x) & Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(x) & ~Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(x)) is None
assert ask(Q.finite(a), Q.finite(y) & Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), Q.finite(y)) is None
assert ask(Q.finite(a), ~Q.finite(y) & Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(y) & ~Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(y)) is None
assert ask(Q.finite(a), Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(z)) is None
assert ask(Q.finite(a), ~Q.finite(z) &
Q.nonzero(x) & Q.nonzero(y) & Q.nonzero(z)) is None
assert ask(Q.finite(a), ~Q.finite(y) & ~Q.finite(z) &
Q.nonzero(x) & Q.nonzero(y) & Q.nonzero(z)) is False
x, y, z = symbols('x,y,z')
assert ask(Q.finite(x**2)) is None
assert ask(Q.finite(2**x)) is None
assert ask(Q.finite(2**x), Q.finite(x)) is True
assert ask(Q.finite(x**x)) is None
assert ask(Q.finite(Rational(1, 2) ** x)) is None
assert ask(Q.finite(Rational(1, 2) ** x), Q.positive(x)) is True
assert ask(Q.finite(Rational(1, 2) ** x), Q.negative(x)) is None
assert ask(Q.finite(2**x), Q.negative(x)) is True
assert ask(Q.finite(sqrt(x))) is None
assert ask(Q.finite(2**x), ~Q.finite(x)) is False
assert ask(Q.finite(x**2), ~Q.finite(x)) is False
# sign function
assert ask(Q.finite(sign(x))) is True
assert ask(Q.finite(sign(x)), ~Q.finite(x)) is True
# exponential functions
assert ask(Q.finite(log(x))) is None
assert ask(Q.finite(log(x)), Q.finite(x)) is True
assert ask(Q.finite(exp(x))) is None
assert ask(Q.finite(exp(x)), Q.finite(x)) is True
assert ask(Q.finite(exp(2))) is True
# trigonometric functions
assert ask(Q.finite(sin(x))) is True
assert ask(Q.finite(sin(x)), ~Q.finite(x)) is True
assert ask(Q.finite(cos(x))) is True
assert ask(Q.finite(cos(x)), ~Q.finite(x)) is True
assert ask(Q.finite(2*sin(x))) is True
assert ask(Q.finite(sin(x)**2)) is True
assert ask(Q.finite(cos(x)**2)) is True
assert ask(Q.finite(cos(x) + sin(x))) is True
@XFAIL
def test_bounded_xfail():
"""We need to support relations in ask for this to work"""
assert ask(Q.finite(sin(x)**x)) is True
assert ask(Q.finite(cos(x)**x)) is True
def test_commutative():
"""By default objects are Q.commutative that is why it returns True
for both key=True and key=False"""
assert ask(Q.commutative(x)) is True
assert ask(Q.commutative(x), ~Q.commutative(x)) is False
assert ask(Q.commutative(x), Q.complex(x)) is True
assert ask(Q.commutative(x), Q.imaginary(x)) is True
assert ask(Q.commutative(x), Q.real(x)) is True
assert ask(Q.commutative(x), Q.positive(x)) is True
assert ask(Q.commutative(x), ~Q.commutative(y)) is True
assert ask(Q.commutative(2*x)) is True
assert ask(Q.commutative(2*x), ~Q.commutative(x)) is False
assert ask(Q.commutative(x + 1)) is True
assert ask(Q.commutative(x + 1), ~Q.commutative(x)) is False
assert ask(Q.commutative(x**2)) is True
assert ask(Q.commutative(x**2), ~Q.commutative(x)) is False
assert ask(Q.commutative(log(x))) is True
def test_complex():
assert ask(Q.complex(x)) is None
assert ask(Q.complex(x), Q.complex(x)) is True
assert ask(Q.complex(x), Q.complex(y)) is None
assert ask(Q.complex(x), ~Q.complex(x)) is False
assert ask(Q.complex(x), Q.real(x)) is True
assert ask(Q.complex(x), ~Q.real(x)) is None
assert ask(Q.complex(x), Q.rational(x)) is True
assert ask(Q.complex(x), Q.irrational(x)) is True
assert ask(Q.complex(x), Q.positive(x)) is True
assert ask(Q.complex(x), Q.imaginary(x)) is True
assert ask(Q.complex(x), Q.algebraic(x)) is True
# a+b
assert ask(Q.complex(x + 1), Q.complex(x)) is True
assert ask(Q.complex(x + 1), Q.real(x)) is True
assert ask(Q.complex(x + 1), Q.rational(x)) is True
assert ask(Q.complex(x + 1), Q.irrational(x)) is True
assert ask(Q.complex(x + 1), Q.imaginary(x)) is True
assert ask(Q.complex(x + 1), Q.integer(x)) is True
assert ask(Q.complex(x + 1), Q.even(x)) is True
assert ask(Q.complex(x + 1), Q.odd(x)) is True
assert ask(Q.complex(x + y), Q.complex(x) & Q.complex(y)) is True
assert ask(Q.complex(x + y), Q.real(x) & Q.imaginary(y)) is True
# a*x +b
assert ask(Q.complex(2*x + 1), Q.complex(x)) is True
assert ask(Q.complex(2*x + 1), Q.real(x)) is True
assert ask(Q.complex(2*x + 1), Q.positive(x)) is True
assert ask(Q.complex(2*x + 1), Q.rational(x)) is True
assert ask(Q.complex(2*x + 1), Q.irrational(x)) is True
assert ask(Q.complex(2*x + 1), Q.imaginary(x)) is True
assert ask(Q.complex(2*x + 1), Q.integer(x)) is True
assert ask(Q.complex(2*x + 1), Q.even(x)) is True
assert ask(Q.complex(2*x + 1), Q.odd(x)) is True
# x**2
assert ask(Q.complex(x**2), Q.complex(x)) is True
assert ask(Q.complex(x**2), Q.real(x)) is True
assert ask(Q.complex(x**2), Q.positive(x)) is True
assert ask(Q.complex(x**2), Q.rational(x)) is True
assert ask(Q.complex(x**2), Q.irrational(x)) is True
assert ask(Q.complex(x**2), Q.imaginary(x)) is True
assert ask(Q.complex(x**2), Q.integer(x)) is True
assert ask(Q.complex(x**2), Q.even(x)) is True
assert ask(Q.complex(x**2), Q.odd(x)) is True
# 2**x
assert ask(Q.complex(2**x), Q.complex(x)) is True
assert ask(Q.complex(2**x), Q.real(x)) is True
assert ask(Q.complex(2**x), Q.positive(x)) is True
assert ask(Q.complex(2**x), Q.rational(x)) is True
assert ask(Q.complex(2**x), Q.irrational(x)) is True
assert ask(Q.complex(2**x), Q.imaginary(x)) is True
assert ask(Q.complex(2**x), Q.integer(x)) is True
assert ask(Q.complex(2**x), Q.even(x)) is True
assert ask(Q.complex(2**x), Q.odd(x)) is True
assert ask(Q.complex(x**y), Q.complex(x) & Q.complex(y)) is True
# trigonometric expressions
assert ask(Q.complex(sin(x))) is True
assert ask(Q.complex(sin(2*x + 1))) is True
assert ask(Q.complex(cos(x))) is True
assert ask(Q.complex(cos(2*x + 1))) is True
# exponential
assert ask(Q.complex(exp(x))) is True
assert ask(Q.complex(exp(x))) is True
# Q.complexes
assert ask(Q.complex(Abs(x))) is True
assert ask(Q.complex(re(x))) is True
assert ask(Q.complex(im(x))) is True
def test_even():
assert ask(Q.even(x)) is None
assert ask(Q.even(x), Q.integer(x)) is None
assert ask(Q.even(x), ~Q.integer(x)) is False
assert ask(Q.even(x), Q.rational(x)) is None
assert ask(Q.even(x), Q.positive(x)) is None
assert ask(Q.even(2*x)) is None
assert ask(Q.even(2*x), Q.integer(x)) is True
assert ask(Q.even(2*x), Q.even(x)) is True
assert ask(Q.even(2*x), Q.irrational(x)) is False
assert ask(Q.even(2*x), Q.odd(x)) is True
assert ask(Q.even(2*x), ~Q.integer(x)) is None
assert ask(Q.even(3*x), Q.integer(x)) is None
assert ask(Q.even(3*x), Q.even(x)) is True
assert ask(Q.even(3*x), Q.odd(x)) is False
assert ask(Q.even(x + 1), Q.odd(x)) is True
assert ask(Q.even(x + 1), Q.even(x)) is False
assert ask(Q.even(x + 2), Q.odd(x)) is False
assert ask(Q.even(x + 2), Q.even(x)) is True
assert ask(Q.even(7 - x), Q.odd(x)) is True
assert ask(Q.even(7 + x), Q.odd(x)) is True
assert ask(Q.even(x + y), Q.odd(x) & Q.odd(y)) is True
assert ask(Q.even(x + y), Q.odd(x) & Q.even(y)) is False
assert ask(Q.even(x + y), Q.even(x) & Q.even(y)) is True
assert ask(Q.even(2*x + 1), Q.integer(x)) is False
assert ask(Q.even(2*x*y), Q.rational(x) & Q.rational(x)) is None
assert ask(Q.even(2*x*y), Q.irrational(x) & Q.irrational(x)) is None
assert ask(Q.even(x + y + z), Q.odd(x) & Q.odd(y) & Q.even(z)) is True
assert ask(Q.even(x + y + z + t),
Q.odd(x) & Q.odd(y) & Q.even(z) & Q.integer(t)) is None
assert ask(Q.even(Abs(x)), Q.even(x)) is True
assert ask(Q.even(Abs(x)), ~Q.even(x)) is None
assert ask(Q.even(re(x)), Q.even(x)) is True
assert ask(Q.even(re(x)), ~Q.even(x)) is None
assert ask(Q.even(im(x)), Q.even(x)) is True
assert ask(Q.even(im(x)), Q.real(x)) is True
assert ask(Q.even((-1)**n), Q.integer(n)) is False
assert ask(Q.even(k**2), Q.even(k)) is True
assert ask(Q.even(n**2), Q.odd(n)) is False
assert ask(Q.even(2**k), Q.even(k)) is None
assert ask(Q.even(x**2)) is None
assert ask(Q.even(k**m), Q.even(k) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.even(n**m), Q.odd(n) & Q.integer(m) & ~Q.negative(m)) is False
assert ask(Q.even(k**p), Q.even(k) & Q.integer(p) & Q.positive(p)) is True
assert ask(Q.even(n**p), Q.odd(n) & Q.integer(p) & Q.positive(p)) is False
assert ask(Q.even(m**k), Q.even(k) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.even(p**k), Q.even(k) & Q.integer(p) & Q.positive(p)) is None
assert ask(Q.even(m**n), Q.odd(n) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.even(p**n), Q.odd(n) & Q.integer(p) & Q.positive(p)) is None
assert ask(Q.even(k**x), Q.even(k)) is None
assert ask(Q.even(n**x), Q.odd(n)) is None
assert ask(Q.even(x*y), Q.integer(x) & Q.integer(y)) is None
assert ask(Q.even(x*x), Q.integer(x)) is None
assert ask(Q.even(x*(x + y)), Q.integer(x) & Q.odd(y)) is True
assert ask(Q.even(x*(x + y)), Q.integer(x) & Q.even(y)) is None
@XFAIL
def test_evenness_in_ternary_integer_product_with_odd():
# Tests that oddness inference is independent of term ordering.
# Term ordering at the point of testing depends on SymPy's symbol order, so
# we try to force a different order by modifying symbol names.
assert ask(Q.even(x*y*(y + z)), Q.integer(x) & Q.integer(y) & Q.odd(z)) is True
assert ask(Q.even(y*x*(x + z)), Q.integer(x) & Q.integer(y) & Q.odd(z)) is True
def test_evenness_in_ternary_integer_product_with_even():
assert ask(Q.even(x*y*(y + z)), Q.integer(x) & Q.integer(y) & Q.even(z)) is None
def test_extended_real():
assert ask(Q.extended_real(x), Q.positive(x)) is True
assert ask(Q.extended_real(-x), Q.positive(x)) is True
assert ask(Q.extended_real(-x), Q.negative(x)) is True
assert ask(Q.extended_real(x + S.Infinity), Q.real(x)) is True
def test_rational():
assert ask(Q.rational(x), Q.integer(x)) is True
assert ask(Q.rational(x), Q.irrational(x)) is False
assert ask(Q.rational(x), Q.real(x)) is None
assert ask(Q.rational(x), Q.positive(x)) is None
assert ask(Q.rational(x), Q.negative(x)) is None
assert ask(Q.rational(x), Q.nonzero(x)) is None
assert ask(Q.rational(x), ~Q.algebraic(x)) is False
assert ask(Q.rational(2*x), Q.rational(x)) is True
assert ask(Q.rational(2*x), Q.integer(x)) is True
assert ask(Q.rational(2*x), Q.even(x)) is True
assert ask(Q.rational(2*x), Q.odd(x)) is True
assert ask(Q.rational(2*x), Q.irrational(x)) is False
assert ask(Q.rational(x/2), Q.rational(x)) is True
assert ask(Q.rational(x/2), Q.integer(x)) is True
assert ask(Q.rational(x/2), Q.even(x)) is True
assert ask(Q.rational(x/2), Q.odd(x)) is True
assert ask(Q.rational(x/2), Q.irrational(x)) is False
assert ask(Q.rational(1/x), Q.rational(x)) is True
assert ask(Q.rational(1/x), Q.integer(x)) is True
assert ask(Q.rational(1/x), Q.even(x)) is True
assert ask(Q.rational(1/x), Q.odd(x)) is True
assert ask(Q.rational(1/x), Q.irrational(x)) is False
assert ask(Q.rational(2/x), Q.rational(x)) is True
assert ask(Q.rational(2/x), Q.integer(x)) is True
assert ask(Q.rational(2/x), Q.even(x)) is True
assert ask(Q.rational(2/x), Q.odd(x)) is True
assert ask(Q.rational(2/x), Q.irrational(x)) is False
assert ask(Q.rational(x), ~Q.algebraic(x)) is False
# with multiple symbols
assert ask(Q.rational(x*y), Q.irrational(x) & Q.irrational(y)) is None
assert ask(Q.rational(y/x), Q.rational(x) & Q.rational(y)) is True
assert ask(Q.rational(y/x), Q.integer(x) & Q.rational(y)) is True
assert ask(Q.rational(y/x), Q.even(x) & Q.rational(y)) is True
assert ask(Q.rational(y/x), Q.odd(x) & Q.rational(y)) is True
assert ask(Q.rational(y/x), Q.irrational(x) & Q.rational(y)) is False
for f in [exp, sin, tan, asin, atan, cos]:
assert ask(Q.rational(f(7))) is False
assert ask(Q.rational(f(7, evaluate=False))) is False
assert ask(Q.rational(f(0, evaluate=False))) is True
assert ask(Q.rational(f(x)), Q.rational(x)) is None
assert ask(Q.rational(f(x)), Q.rational(x) & Q.nonzero(x)) is False
for g in [log, acos]:
assert ask(Q.rational(g(7))) is False
assert ask(Q.rational(g(7, evaluate=False))) is False
assert ask(Q.rational(g(1, evaluate=False))) is True
assert ask(Q.rational(g(x)), Q.rational(x)) is None
assert ask(Q.rational(g(x)), Q.rational(x) & Q.nonzero(x - 1)) is False
for h in [cot, acot]:
assert ask(Q.rational(h(7))) is False
assert ask(Q.rational(h(7, evaluate=False))) is False
assert ask(Q.rational(h(x)), Q.rational(x)) is False
def test_hermitian():
assert ask(Q.hermitian(x)) is None
assert ask(Q.hermitian(x), Q.antihermitian(x)) is False
assert ask(Q.hermitian(x), Q.imaginary(x)) is False
assert ask(Q.hermitian(x), Q.prime(x)) is True
assert ask(Q.hermitian(x), Q.real(x)) is True
assert ask(Q.hermitian(x + 1), Q.antihermitian(x)) is False
assert ask(Q.hermitian(x + 1), Q.complex(x)) is None
assert ask(Q.hermitian(x + 1), Q.hermitian(x)) is True
assert ask(Q.hermitian(x + 1), Q.imaginary(x)) is False
assert ask(Q.hermitian(x + 1), Q.real(x)) is True
assert ask(Q.hermitian(x + I), Q.antihermitian(x)) is None
assert ask(Q.hermitian(x + I), Q.complex(x)) is None
assert ask(Q.hermitian(x + I), Q.hermitian(x)) is False
assert ask(Q.hermitian(x + I), Q.imaginary(x)) is None
assert ask(Q.hermitian(x + I), Q.real(x)) is False
assert ask(
Q.hermitian(x + y), Q.antihermitian(x) & Q.antihermitian(y)) is None
assert ask(Q.hermitian(x + y), Q.antihermitian(x) & Q.complex(y)) is None
assert ask(
Q.hermitian(x + y), Q.antihermitian(x) & Q.hermitian(y)) is False
assert ask(Q.hermitian(x + y), Q.antihermitian(x) & Q.imaginary(y)) is None
assert ask(Q.hermitian(x + y), Q.antihermitian(x) & Q.real(y)) is False
assert ask(Q.hermitian(x + y), Q.hermitian(x) & Q.complex(y)) is None
assert ask(Q.hermitian(x + y), Q.hermitian(x) & Q.hermitian(y)) is True
assert ask(Q.hermitian(x + y), Q.hermitian(x) & Q.imaginary(y)) is False
assert ask(Q.hermitian(x + y), Q.hermitian(x) & Q.real(y)) is True
assert ask(Q.hermitian(x + y), Q.imaginary(x) & Q.complex(y)) is None
assert ask(Q.hermitian(x + y), Q.imaginary(x) & Q.imaginary(y)) is None
assert ask(Q.hermitian(x + y), Q.imaginary(x) & Q.real(y)) is False
assert ask(Q.hermitian(x + y), Q.real(x) & Q.complex(y)) is None
assert ask(Q.hermitian(x + y), Q.real(x) & Q.real(y)) is True
assert ask(Q.hermitian(I*x), Q.antihermitian(x)) is True
assert ask(Q.hermitian(I*x), Q.complex(x)) is None
assert ask(Q.hermitian(I*x), Q.hermitian(x)) is False
assert ask(Q.hermitian(I*x), Q.imaginary(x)) is True
assert ask(Q.hermitian(I*x), Q.real(x)) is False
assert ask(Q.hermitian(x*y), Q.hermitian(x) & Q.real(y)) is True
assert ask(
Q.hermitian(x + y + z), Q.real(x) & Q.real(y) & Q.real(z)) is True
assert ask(Q.hermitian(x + y + z),
Q.real(x) & Q.real(y) & Q.imaginary(z)) is False
assert ask(Q.hermitian(x + y + z),
Q.real(x) & Q.imaginary(y) & Q.imaginary(z)) is None
assert ask(Q.hermitian(x + y + z),
Q.imaginary(x) & Q.imaginary(y) & Q.imaginary(z)) is None
assert ask(Q.antihermitian(x)) is None
assert ask(Q.antihermitian(x), Q.real(x)) is False
assert ask(Q.antihermitian(x), Q.prime(x)) is False
assert ask(Q.antihermitian(x + 1), Q.antihermitian(x)) is False
assert ask(Q.antihermitian(x + 1), Q.complex(x)) is None
assert ask(Q.antihermitian(x + 1), Q.hermitian(x)) is None
assert ask(Q.antihermitian(x + 1), Q.imaginary(x)) is False
assert ask(Q.antihermitian(x + 1), Q.real(x)) is False
assert ask(Q.antihermitian(x + I), Q.antihermitian(x)) is True
assert ask(Q.antihermitian(x + I), Q.complex(x)) is None
assert ask(Q.antihermitian(x + I), Q.hermitian(x)) is False
assert ask(Q.antihermitian(x + I), Q.imaginary(x)) is True
assert ask(Q.antihermitian(x + I), Q.real(x)) is False
assert ask(
Q.antihermitian(x + y), Q.antihermitian(x) & Q.antihermitian(y)
) is True
assert ask(
Q.antihermitian(x + y), Q.antihermitian(x) & Q.complex(y)) is None
assert ask(
Q.antihermitian(x + y), Q.antihermitian(x) & Q.hermitian(y)) is False
assert ask(
Q.antihermitian(x + y), Q.antihermitian(x) & Q.imaginary(y)) is True
assert ask(Q.antihermitian(x + y), Q.antihermitian(x) & Q.real(y)
) is False
assert ask(Q.antihermitian(x + y), Q.hermitian(x) & Q.complex(y)) is None
assert ask(Q.antihermitian(x + y), Q.hermitian(x) & Q.hermitian(y)
) is None
assert ask(
Q.antihermitian(x + y), Q.hermitian(x) & Q.imaginary(y)) is False
assert ask(Q.antihermitian(x + y), Q.hermitian(x) & Q.real(y)) is None
assert ask(Q.antihermitian(x + y), Q.imaginary(x) & Q.complex(y)) is None
assert ask(Q.antihermitian(x + y), Q.imaginary(x) & Q.imaginary(y)) is True
assert ask(Q.antihermitian(x + y), Q.imaginary(x) & Q.real(y)) is False
assert ask(Q.antihermitian(x + y), Q.real(x) & Q.complex(y)) is None
assert ask(Q.antihermitian(x + y), Q.real(x) & Q.real(y)) is False
assert ask(Q.antihermitian(I*x), Q.real(x)) is True
assert ask(Q.antihermitian(I*x), Q.antihermitian(x)) is False
assert ask(Q.antihermitian(I*x), Q.complex(x)) is None
assert ask(Q.antihermitian(x*y), Q.antihermitian(x) & Q.real(y)) is True
assert ask(Q.antihermitian(x + y + z),
Q.real(x) & Q.real(y) & Q.real(z)) is False
assert ask(Q.antihermitian(x + y + z),
Q.real(x) & Q.real(y) & Q.imaginary(z)) is None
assert ask(Q.antihermitian(x + y + z),
Q.real(x) & Q.imaginary(y) & Q.imaginary(z)) is False
assert ask(Q.antihermitian(x + y + z),
Q.imaginary(x) & Q.imaginary(y) & Q.imaginary(z)) is True
def test_imaginary():
assert ask(Q.imaginary(x)) is None
assert ask(Q.imaginary(x), Q.real(x)) is False
assert ask(Q.imaginary(x), Q.prime(x)) is False
assert ask(Q.imaginary(x + 1), Q.real(x)) is False
assert ask(Q.imaginary(x + 1), Q.imaginary(x)) is False
assert ask(Q.imaginary(x + I), Q.real(x)) is False
assert ask(Q.imaginary(x + I), Q.imaginary(x)) is True
assert ask(Q.imaginary(x + y), Q.imaginary(x) & Q.imaginary(y)) is True
assert ask(Q.imaginary(x + y), Q.real(x) & Q.real(y)) is False
assert ask(Q.imaginary(x + y), Q.imaginary(x) & Q.real(y)) is False
assert ask(Q.imaginary(x + y), Q.complex(x) & Q.real(y)) is None
assert ask(
Q.imaginary(x + y + z), Q.real(x) & Q.real(y) & Q.real(z)) is False
assert ask(Q.imaginary(x + y + z),
Q.real(x) & Q.real(y) & Q.imaginary(z)) is None
assert ask(Q.imaginary(x + y + z),
Q.real(x) & Q.imaginary(y) & Q.imaginary(z)) is False
assert ask(Q.imaginary(I*x), Q.real(x)) is True
assert ask(Q.imaginary(I*x), Q.imaginary(x)) is False
assert ask(Q.imaginary(I*x), Q.complex(x)) is None
assert ask(Q.imaginary(x*y), Q.imaginary(x) & Q.real(y)) is True
assert ask(Q.imaginary(x*y), Q.real(x) & Q.real(y)) is False
assert ask(Q.imaginary(I**x), Q.negative(x)) is None
assert ask(Q.imaginary(I**x), Q.positive(x)) is None
assert ask(Q.imaginary(I**x), Q.even(x)) is False
assert ask(Q.imaginary(I**x), Q.odd(x)) is True
assert ask(Q.imaginary(I**x), Q.imaginary(x)) is False
assert ask(Q.imaginary((2*I)**x), Q.imaginary(x)) is False
assert ask(Q.imaginary(x**0), Q.imaginary(x)) is False
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.imaginary(y)) is None
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.real(y)) is None
assert ask(Q.imaginary(x**y), Q.real(x) & Q.imaginary(y)) is None
assert ask(Q.imaginary(x**y), Q.real(x) & Q.real(y)) is None
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.integer(y)) is None
assert ask(Q.imaginary(x**y), Q.imaginary(y) & Q.integer(x)) is None
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.odd(y)) is True
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.rational(y)) is None
assert ask(Q.imaginary(x**y), Q.imaginary(x) & Q.even(y)) is False
assert ask(Q.imaginary(x**y), Q.real(x) & Q.integer(y)) is False
assert ask(Q.imaginary(x**y), Q.positive(x) & Q.real(y)) is False
assert ask(Q.imaginary(x**y), Q.negative(x) & Q.real(y)) is None
assert ask(Q.imaginary(x**y), Q.negative(x) & Q.real(y) & ~Q.rational(y)) is False
assert ask(Q.imaginary(x**y), Q.integer(x) & Q.imaginary(y)) is None
assert ask(Q.imaginary(x**y), Q.negative(x) & Q.rational(y) & Q.integer(2*y)) is True
assert ask(Q.imaginary(x**y), Q.negative(x) & Q.rational(y) & ~Q.integer(2*y)) is False
assert ask(Q.imaginary(x**y), Q.negative(x) & Q.rational(y)) is None
assert ask(Q.imaginary(x**y), Q.real(x) & Q.rational(y) & ~Q.integer(2*y)) is False
assert ask(Q.imaginary(x**y), Q.real(x) & Q.rational(y) & Q.integer(2*y)) is None
# logarithm
assert ask(Q.imaginary(log(I))) is True
assert ask(Q.imaginary(log(2*I))) is False
assert ask(Q.imaginary(log(I + 1))) is False
assert ask(Q.imaginary(log(x)), Q.complex(x)) is None
assert ask(Q.imaginary(log(x)), Q.imaginary(x)) is None
assert ask(Q.imaginary(log(x)), Q.positive(x)) is False
assert ask(Q.imaginary(log(exp(x))), Q.complex(x)) is None
assert ask(Q.imaginary(log(exp(x))), Q.imaginary(x)) is None # zoo/I/a+I*b
assert ask(Q.imaginary(log(exp(I)))) is True
# exponential
assert ask(Q.imaginary(exp(x)**x), Q.imaginary(x)) is False
eq = Pow(exp(pi*I*x, evaluate=False), x, evaluate=False)
assert ask(Q.imaginary(eq), Q.even(x)) is False
eq = Pow(exp(pi*I*x/2, evaluate=False), x, evaluate=False)
assert ask(Q.imaginary(eq), Q.odd(x)) is True
assert ask(Q.imaginary(exp(3*I*pi*x)**x), Q.integer(x)) is False
assert ask(Q.imaginary(exp(2*pi*I, evaluate=False))) is False
assert ask(Q.imaginary(exp(pi*I/2, evaluate=False))) is True
# issue 7886
assert ask(Q.imaginary(Pow(x, S.One/4)), Q.real(x) & Q.negative(x)) is False
def test_integer():
assert ask(Q.integer(x)) is None
assert ask(Q.integer(x), Q.integer(x)) is True
assert ask(Q.integer(x), ~Q.integer(x)) is False
assert ask(Q.integer(x), ~Q.real(x)) is False
assert ask(Q.integer(x), ~Q.positive(x)) is None
assert ask(Q.integer(x), Q.even(x) | Q.odd(x)) is True
assert ask(Q.integer(2*x), Q.integer(x)) is True
assert ask(Q.integer(2*x), Q.even(x)) is True
assert ask(Q.integer(2*x), Q.prime(x)) is True
assert ask(Q.integer(2*x), Q.rational(x)) is None
assert ask(Q.integer(2*x), Q.real(x)) is None
assert ask(Q.integer(sqrt(2)*x), Q.integer(x)) is False
assert ask(Q.integer(sqrt(2)*x), Q.irrational(x)) is None
assert ask(Q.integer(x/2), Q.odd(x)) is False
assert ask(Q.integer(x/2), Q.even(x)) is True
assert ask(Q.integer(x/3), Q.odd(x)) is None
assert ask(Q.integer(x/3), Q.even(x)) is None
def test_negative():
assert ask(Q.negative(x), Q.negative(x)) is True
assert ask(Q.negative(x), Q.positive(x)) is False
assert ask(Q.negative(x), ~Q.real(x)) is False
assert ask(Q.negative(x), Q.prime(x)) is False
assert ask(Q.negative(x), ~Q.prime(x)) is None
assert ask(Q.negative(-x), Q.positive(x)) is True
assert ask(Q.negative(-x), ~Q.positive(x)) is None
assert ask(Q.negative(-x), Q.negative(x)) is False
assert ask(Q.negative(-x), Q.positive(x)) is True
assert ask(Q.negative(x - 1), Q.negative(x)) is True
assert ask(Q.negative(x + y)) is None
assert ask(Q.negative(x + y), Q.negative(x)) is None
assert ask(Q.negative(x + y), Q.negative(x) & Q.negative(y)) is True
assert ask(Q.negative(x + y), Q.negative(x) & Q.nonpositive(y)) is True
assert ask(Q.negative(2 + I)) is False
# although this could be False, it is representative of expressions
# that don't evaluate to a zero with precision
assert ask(Q.negative(cos(I)**2 + sin(I)**2 - 1)) is None
assert ask(Q.negative(-I + I*(cos(2)**2 + sin(2)**2))) is None
assert ask(Q.negative(x**2)) is None
assert ask(Q.negative(x**2), Q.real(x)) is False
assert ask(Q.negative(x**1.4), Q.real(x)) is None
assert ask(Q.negative(x**I), Q.positive(x)) is None
assert ask(Q.negative(x*y)) is None
assert ask(Q.negative(x*y), Q.positive(x) & Q.positive(y)) is False
assert ask(Q.negative(x*y), Q.positive(x) & Q.negative(y)) is True
assert ask(Q.negative(x*y), Q.complex(x) & Q.complex(y)) is None
assert ask(Q.negative(x**y)) is None
assert ask(Q.negative(x**y), Q.negative(x) & Q.even(y)) is False
assert ask(Q.negative(x**y), Q.negative(x) & Q.odd(y)) is True
assert ask(Q.negative(x**y), Q.positive(x) & Q.integer(y)) is False
assert ask(Q.negative(Abs(x))) is False
def test_nonzero():
assert ask(Q.nonzero(x)) is None
assert ask(Q.nonzero(x), Q.real(x)) is None
assert ask(Q.nonzero(x), Q.positive(x)) is True
assert ask(Q.nonzero(x), Q.negative(x)) is True
assert ask(Q.nonzero(x), Q.negative(x) | Q.positive(x)) is True
assert ask(Q.nonzero(x + y)) is None
assert ask(Q.nonzero(x + y), Q.positive(x) & Q.positive(y)) is True
assert ask(Q.nonzero(x + y), Q.positive(x) & Q.negative(y)) is None
assert ask(Q.nonzero(x + y), Q.negative(x) & Q.negative(y)) is True
assert ask(Q.nonzero(2*x)) is None
assert ask(Q.nonzero(2*x), Q.positive(x)) is True
assert ask(Q.nonzero(2*x), Q.negative(x)) is True
assert ask(Q.nonzero(x*y), Q.nonzero(x)) is None
assert ask(Q.nonzero(x*y), Q.nonzero(x) & Q.nonzero(y)) is True
assert ask(Q.nonzero(x**y), Q.nonzero(x)) is True
assert ask(Q.nonzero(Abs(x))) is None
assert ask(Q.nonzero(Abs(x)), Q.nonzero(x)) is True
assert ask(Q.nonzero(log(exp(2*I)))) is False
# although this could be False, it is representative of expressions
# that don't evaluate to a zero with precision
assert ask(Q.nonzero(cos(1)**2 + sin(1)**2 - 1)) is None
def test_zero():
assert ask(Q.zero(x)) is None
assert ask(Q.zero(x), Q.real(x)) is None
assert ask(Q.zero(x), Q.positive(x)) is False
assert ask(Q.zero(x), Q.negative(x)) is False
assert ask(Q.zero(x), Q.negative(x) | Q.positive(x)) is False
assert ask(Q.zero(x), Q.nonnegative(x) & Q.nonpositive(x)) is True
assert ask(Q.zero(x + y)) is None
assert ask(Q.zero(x + y), Q.positive(x) & Q.positive(y)) is False
assert ask(Q.zero(x + y), Q.positive(x) & Q.negative(y)) is None
assert ask(Q.zero(x + y), Q.negative(x) & Q.negative(y)) is False
assert ask(Q.zero(2*x)) is None
assert ask(Q.zero(2*x), Q.positive(x)) is False
assert ask(Q.zero(2*x), Q.negative(x)) is False
assert ask(Q.zero(x*y), Q.nonzero(x)) is None
assert ask(Q.zero(Abs(x))) is None
assert ask(Q.zero(Abs(x)), Q.zero(x)) is True
assert ask(Q.integer(x), Q.zero(x)) is True
assert ask(Q.even(x), Q.zero(x)) is True
assert ask(Q.odd(x), Q.zero(x)) is False
assert ask(Q.zero(x), Q.even(x)) is None
assert ask(Q.zero(x), Q.odd(x)) is False
assert ask(Q.zero(x) | Q.zero(y), Q.zero(x*y)) is True
def test_odd():
assert ask(Q.odd(x)) is None
assert ask(Q.odd(x), Q.odd(x)) is True
assert ask(Q.odd(x), Q.integer(x)) is None
assert ask(Q.odd(x), ~Q.integer(x)) is False
assert ask(Q.odd(x), Q.rational(x)) is None
assert ask(Q.odd(x), Q.positive(x)) is None
assert ask(Q.odd(-x), Q.odd(x)) is True
assert ask(Q.odd(2*x)) is None
assert ask(Q.odd(2*x), Q.integer(x)) is False
assert ask(Q.odd(2*x), Q.odd(x)) is False
assert ask(Q.odd(2*x), Q.irrational(x)) is False
assert ask(Q.odd(2*x), ~Q.integer(x)) is None
assert ask(Q.odd(3*x), Q.integer(x)) is None
assert ask(Q.odd(x/3), Q.odd(x)) is None
assert ask(Q.odd(x/3), Q.even(x)) is None
assert ask(Q.odd(x + 1), Q.even(x)) is True
assert ask(Q.odd(x + 2), Q.even(x)) is False
assert ask(Q.odd(x + 2), Q.odd(x)) is True
assert ask(Q.odd(3 - x), Q.odd(x)) is False
assert ask(Q.odd(3 - x), Q.even(x)) is True
assert ask(Q.odd(3 + x), Q.odd(x)) is False
assert ask(Q.odd(3 + x), Q.even(x)) is True
assert ask(Q.odd(x + y), Q.odd(x) & Q.odd(y)) is False
assert ask(Q.odd(x + y), Q.odd(x) & Q.even(y)) is True
assert ask(Q.odd(x - y), Q.even(x) & Q.odd(y)) is True
assert ask(Q.odd(x - y), Q.odd(x) & Q.odd(y)) is False
assert ask(Q.odd(x + y + z), Q.odd(x) & Q.odd(y) & Q.even(z)) is False
assert ask(Q.odd(x + y + z + t),
Q.odd(x) & Q.odd(y) & Q.even(z) & Q.integer(t)) is None
assert ask(Q.odd(2*x + 1), Q.integer(x)) is True
assert ask(Q.odd(2*x + y), Q.integer(x) & Q.odd(y)) is True
assert ask(Q.odd(2*x + y), Q.integer(x) & Q.even(y)) is False
assert ask(Q.odd(2*x + y), Q.integer(x) & Q.integer(y)) is None
assert ask(Q.odd(x*y), Q.odd(x) & Q.even(y)) is False
assert ask(Q.odd(x*y), Q.odd(x) & Q.odd(y)) is True
assert ask(Q.odd(2*x*y), Q.rational(x) & Q.rational(x)) is None
assert ask(Q.odd(2*x*y), Q.irrational(x) & Q.irrational(x)) is None
assert ask(Q.odd(Abs(x)), Q.odd(x)) is True
assert ask(Q.odd((-1)**n), Q.integer(n)) is True
assert ask(Q.odd(k**2), Q.even(k)) is False
assert ask(Q.odd(n**2), Q.odd(n)) is True
assert ask(Q.odd(3**k), Q.even(k)) is None
assert ask(Q.odd(k**m), Q.even(k) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.odd(n**m), Q.odd(n) & Q.integer(m) & ~Q.negative(m)) is True
assert ask(Q.odd(k**p), Q.even(k) & Q.integer(p) & Q.positive(p)) is False
assert ask(Q.odd(n**p), Q.odd(n) & Q.integer(p) & Q.positive(p)) is True
assert ask(Q.odd(m**k), Q.even(k) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.odd(p**k), Q.even(k) & Q.integer(p) & Q.positive(p)) is None
assert ask(Q.odd(m**n), Q.odd(n) & Q.integer(m) & ~Q.negative(m)) is None
assert ask(Q.odd(p**n), Q.odd(n) & Q.integer(p) & Q.positive(p)) is None
assert ask(Q.odd(k**x), Q.even(k)) is None
assert ask(Q.odd(n**x), Q.odd(n)) is None
assert ask(Q.odd(x*y), Q.integer(x) & Q.integer(y)) is None
assert ask(Q.odd(x*x), Q.integer(x)) is None
assert ask(Q.odd(x*(x + y)), Q.integer(x) & Q.odd(y)) is False
assert ask(Q.odd(x*(x + y)), Q.integer(x) & Q.even(y)) is None
@XFAIL
def test_oddness_in_ternary_integer_product_with_odd():
# Tests that oddness inference is independent of term ordering.
# Term ordering at the point of testing depends on SymPy's symbol order, so
# we try to force a different order by modifying symbol names.
assert ask(Q.odd(x*y*(y + z)), Q.integer(x) & Q.integer(y) & Q.odd(z)) is False
assert ask(Q.odd(y*x*(x + z)), Q.integer(x) & Q.integer(y) & Q.odd(z)) is False
def test_oddness_in_ternary_integer_product_with_even():
assert ask(Q.odd(x*y*(y + z)), Q.integer(x) & Q.integer(y) & Q.even(z)) is None
def test_prime():
assert ask(Q.prime(x), Q.prime(x)) is True
assert ask(Q.prime(x), ~Q.prime(x)) is False
assert ask(Q.prime(x), Q.integer(x)) is None
assert ask(Q.prime(x), ~Q.integer(x)) is False
assert ask(Q.prime(2*x), Q.integer(x)) is False
assert ask(Q.prime(x*y)) is None
assert ask(Q.prime(x*y), Q.prime(x)) is None
assert ask(Q.prime(x*y), Q.integer(x) & Q.integer(y)) is False
assert ask(Q.prime(x**2), Q.integer(x)) is False
assert ask(Q.prime(x**2), Q.prime(x)) is False
assert ask(Q.prime(x**y), Q.integer(x) & Q.integer(y)) is False
def test_positive():
assert ask(Q.positive(x), Q.positive(x)) is True
assert ask(Q.positive(x), Q.negative(x)) is False
assert ask(Q.positive(x), Q.nonzero(x)) is None
assert ask(Q.positive(-x), Q.positive(x)) is False
assert ask(Q.positive(-x), Q.negative(x)) is True
assert ask(Q.positive(x + y), Q.positive(x) & Q.positive(y)) is True
assert ask(Q.positive(x + y), Q.positive(x) & Q.nonnegative(y)) is True
assert ask(Q.positive(x + y), Q.positive(x) & Q.negative(y)) is None
assert ask(Q.positive(x + y), Q.positive(x) & Q.imaginary(y)) is False
assert ask(Q.positive(2*x), Q.positive(x)) is True
assumptions = Q.positive(x) & Q.negative(y) & Q.negative(z) & Q.positive(w)
assert ask(Q.positive(x*y*z)) is None
assert ask(Q.positive(x*y*z), assumptions) is True
assert ask(Q.positive(-x*y*z), assumptions) is False
assert ask(Q.positive(x**I), Q.positive(x)) is None
assert ask(Q.positive(x**2), Q.positive(x)) is True
assert ask(Q.positive(x**2), Q.negative(x)) is True
assert ask(Q.positive(x**3), Q.negative(x)) is False
assert ask(Q.positive(1/(1 + x**2)), Q.real(x)) is True
assert ask(Q.positive(2**I)) is False
assert ask(Q.positive(2 + I)) is False
# although this could be False, it is representative of expressions
# that don't evaluate to a zero with precision
assert ask(Q.positive(cos(I)**2 + sin(I)**2 - 1)) is None
assert ask(Q.positive(-I + I*(cos(2)**2 + sin(2)**2))) is None
#exponential
assert ask(Q.positive(exp(x)), Q.real(x)) is True
assert ask(~Q.negative(exp(x)), Q.real(x)) is True
assert ask(Q.positive(x + exp(x)), Q.real(x)) is None
# logarithm
assert ask(Q.positive(log(x)), Q.imaginary(x)) is False
assert ask(Q.positive(log(x)), Q.negative(x)) is False
assert ask(Q.positive(log(x)), Q.positive(x)) is None
assert ask(Q.positive(log(x + 2)), Q.positive(x)) is True
# factorial
assert ask(Q.positive(factorial(x)), Q.integer(x) & Q.positive(x))
assert ask(Q.positive(factorial(x)), Q.integer(x)) is None
#absolute value
assert ask(Q.positive(Abs(x))) is None # Abs(0) = 0
assert ask(Q.positive(Abs(x)), Q.positive(x)) is True
def test_nonpositive():
assert ask(Q.nonpositive(-1))
assert ask(Q.nonpositive(0))
assert ask(Q.nonpositive(1)) is False
assert ask(~Q.positive(x), Q.nonpositive(x))
assert ask(Q.nonpositive(x), Q.positive(x)) is False
assert ask(Q.nonpositive(sqrt(-1))) is False
assert ask(Q.nonpositive(x), Q.imaginary(x)) is False
def test_nonnegative():
assert ask(Q.nonnegative(-1)) is False
assert ask(Q.nonnegative(0))
assert ask(Q.nonnegative(1))
assert ask(~Q.negative(x), Q.nonnegative(x))
assert ask(Q.nonnegative(x), Q.negative(x)) is False
assert ask(Q.nonnegative(sqrt(-1))) is False
assert ask(Q.nonnegative(x), Q.imaginary(x)) is False
def test_real():
assert ask(Q.real(x)) is None
assert ask(Q.real(x), Q.real(x)) is True
assert ask(Q.real(x), Q.nonzero(x)) is True
assert ask(Q.real(x), Q.positive(x)) is True
assert ask(Q.real(x), Q.negative(x)) is True
assert ask(Q.real(x), Q.integer(x)) is True
assert ask(Q.real(x), Q.even(x)) is True
assert ask(Q.real(x), Q.prime(x)) is True
assert ask(Q.real(x/sqrt(2)), Q.real(x)) is True
assert ask(Q.real(x/sqrt(-2)), Q.real(x)) is False
assert ask(Q.real(x + 1), Q.real(x)) is True
assert ask(Q.real(x + I), Q.real(x)) is False
assert ask(Q.real(x + I), Q.complex(x)) is None
assert ask(Q.real(2*x), Q.real(x)) is True
assert ask(Q.real(I*x), Q.real(x)) is False
assert ask(Q.real(I*x), Q.imaginary(x)) is True
assert ask(Q.real(I*x), Q.complex(x)) is None
assert ask(Q.real(x**2), Q.real(x)) is True
assert ask(Q.real(sqrt(x)), Q.negative(x)) is False
assert ask(Q.real(x**y), Q.real(x) & Q.integer(y)) is True
assert ask(Q.real(x**y), Q.real(x) & Q.real(y)) is None
assert ask(Q.real(x**y), Q.positive(x) & Q.real(y)) is True
assert ask(Q.real(x**y), Q.imaginary(x) & Q.imaginary(y)) is None # I**I or (2*I)**I
assert ask(Q.real(x**y), Q.imaginary(x) & Q.real(y)) is None # I**1 or I**0
assert ask(Q.real(x**y), Q.real(x) & Q.imaginary(y)) is None # could be exp(2*pi*I) or 2**I
assert ask(Q.real(x**0), Q.imaginary(x)) is True
assert ask(Q.real(x**y), Q.real(x) & Q.integer(y)) is True
assert ask(Q.real(x**y), Q.positive(x) & Q.real(y)) is True
assert ask(Q.real(x**y), Q.real(x) & Q.rational(y)) is None
assert ask(Q.real(x**y), Q.imaginary(x) & Q.integer(y)) is None
assert ask(Q.real(x**y), Q.imaginary(x) & Q.odd(y)) is False
assert ask(Q.real(x**y), Q.imaginary(x) & Q.even(y)) is True
assert ask(Q.real(x**(y/z)), Q.real(x) & Q.real(y/z) & Q.rational(y/z) & Q.even(z) & Q.positive(x)) is True
assert ask(Q.real(x**(y/z)), Q.real(x) & Q.rational(y/z) & Q.even(z) & Q.negative(x)) is False
assert ask(Q.real(x**(y/z)), Q.real(x) & Q.integer(y/z)) is True
assert ask(Q.real(x**(y/z)), Q.real(x) & Q.real(y/z) & Q.positive(x)) is True
assert ask(Q.real(x**(y/z)), Q.real(x) & Q.real(y/z) & Q.negative(x)) is False
assert ask(Q.real((-I)**i), Q.imaginary(i)) is True
assert ask(Q.real(I**i), Q.imaginary(i)) is True
assert ask(Q.real(i**i), Q.imaginary(i)) is None # i might be 2*I
assert ask(Q.real(x**i), Q.imaginary(i)) is None # x could be 0
assert ask(Q.real(x**(I*pi/log(x))), Q.real(x)) is True
# trigonometric functions
assert ask(Q.real(sin(x))) is None
assert ask(Q.real(cos(x))) is None
assert ask(Q.real(sin(x)), Q.real(x)) is True
assert ask(Q.real(cos(x)), Q.real(x)) is True
# exponential function
assert ask(Q.real(exp(x))) is None
assert ask(Q.real(exp(x)), Q.real(x)) is True
assert ask(Q.real(x + exp(x)), Q.real(x)) is True
assert ask(Q.real(exp(2*pi*I, evaluate=False))) is True
assert ask(Q.real(exp(pi*I/2, evaluate=False))) is False
# logarithm
assert ask(Q.real(log(I))) is False
assert ask(Q.real(log(2*I))) is False
assert ask(Q.real(log(I + 1))) is False
assert ask(Q.real(log(x)), Q.complex(x)) is None
assert ask(Q.real(log(x)), Q.imaginary(x)) is False
assert ask(Q.real(log(exp(x))), Q.imaginary(x)) is False # exp(x) will be 0 or a + I*b
assert ask(Q.real(log(exp(x))), Q.complex(x)) is None
eq = Pow(exp(2*pi*I*x, evaluate=False), x, evaluate=False)
assert ask(Q.real(eq), Q.integer(x)) is True
assert ask(Q.real(exp(x)**x), Q.imaginary(x)) is True
assert ask(Q.real(exp(x)**x), Q.complex(x)) is None
# Q.complexes
assert ask(Q.real(re(x))) is True
assert ask(Q.real(im(x))) is True
def test_algebraic():
assert ask(Q.algebraic(x)) is None
assert ask(Q.algebraic(I)) is True
assert ask(Q.algebraic(2*I)) is True
assert ask(Q.algebraic(I/3)) is True
assert ask(Q.algebraic(sqrt(7))) is True
assert ask(Q.algebraic(2*sqrt(7))) is True
assert ask(Q.algebraic(sqrt(7)/3)) is True
assert ask(Q.algebraic(I*sqrt(3))) is True
assert ask(Q.algebraic(sqrt(1 + I*sqrt(3)))) is True
assert ask(Q.algebraic((1 + I*sqrt(3)**(S(17)/31)))) is True
assert ask(Q.algebraic((1 + I*sqrt(3)**(S(17)/pi)))) is False
for f in [exp, sin, tan, asin, atan, cos]:
assert ask(Q.algebraic(f(7))) is False
assert ask(Q.algebraic(f(7, evaluate=False))) is False
assert ask(Q.algebraic(f(0, evaluate=False))) is True
assert ask(Q.algebraic(f(x)), Q.algebraic(x)) is None
assert ask(Q.algebraic(f(x)), Q.algebraic(x) & Q.nonzero(x)) is False
for g in [log, acos]:
assert ask(Q.algebraic(g(7))) is False
assert ask(Q.algebraic(g(7, evaluate=False))) is False
assert ask(Q.algebraic(g(1, evaluate=False))) is True
assert ask(Q.algebraic(g(x)), Q.algebraic(x)) is None
assert ask(Q.algebraic(g(x)), Q.algebraic(x) & Q.nonzero(x - 1)) is False
for h in [cot, acot]:
assert ask(Q.algebraic(h(7))) is False
assert ask(Q.algebraic(h(7, evaluate=False))) is False
assert ask(Q.algebraic(h(x)), Q.algebraic(x)) is False
assert ask(Q.algebraic(sqrt(sin(7)))) is False
assert ask(Q.algebraic(sqrt(y + I*sqrt(7)))) is None
assert ask(Q.algebraic(2.47)) is True
assert ask(Q.algebraic(x), Q.transcendental(x)) is False
assert ask(Q.transcendental(x), Q.algebraic(x)) is False
def test_global():
"""Test ask with global assumptions"""
assert ask(Q.integer(x)) is None
global_assumptions.add(Q.integer(x))
assert ask(Q.integer(x)) is True
global_assumptions.clear()
assert ask(Q.integer(x)) is None
def test_custom_context():
"""Test ask with custom assumptions context"""
assert ask(Q.integer(x)) is None
local_context = AssumptionsContext()
local_context.add(Q.integer(x))
assert ask(Q.integer(x), context=local_context) is True
assert ask(Q.integer(x)) is None
def test_functions_in_assumptions():
assert ask(Q.negative(x), Q.real(x) >> Q.positive(x)) is False
assert ask(Q.negative(x), Equivalent(Q.real(x), Q.positive(x))) is False
assert ask(Q.negative(x), Xor(Q.real(x), Q.negative(x))) is False
def test_composite_ask():
assert ask(Q.negative(x) & Q.integer(x),
assumptions=Q.real(x) >> Q.positive(x)) is False
def test_composite_proposition():
assert ask(True) is True
assert ask(False) is False
assert ask(~Q.negative(x), Q.positive(x)) is True
assert ask(~Q.real(x), Q.commutative(x)) is None
assert ask(Q.negative(x) & Q.integer(x), Q.positive(x)) is False
assert ask(Q.negative(x) & Q.integer(x)) is None
assert ask(Q.real(x) | Q.integer(x), Q.positive(x)) is True
assert ask(Q.real(x) | Q.integer(x)) is None
assert ask(Q.real(x) >> Q.positive(x), Q.negative(x)) is False
assert ask(Implies(
Q.real(x), Q.positive(x), evaluate=False), Q.negative(x)) is False
assert ask(Implies(Q.real(x), Q.positive(x), evaluate=False)) is None
assert ask(Equivalent(Q.integer(x), Q.even(x)), Q.even(x)) is True
assert ask(Equivalent(Q.integer(x), Q.even(x))) is None
assert ask(Equivalent(Q.positive(x), Q.integer(x)), Q.integer(x)) is None
assert ask(Q.real(x) | Q.integer(x), Q.real(x) | Q.integer(x)) is True
def test_tautology():
assert ask(Q.real(x) | ~Q.real(x)) is True
assert ask(Q.real(x) & ~Q.real(x)) is False
def test_composite_assumptions():
assert ask(Q.real(x), Q.real(x) & Q.real(y)) is True
assert ask(Q.positive(x), Q.positive(x) | Q.positive(y)) is None
assert ask(Q.positive(x), Q.real(x) >> Q.positive(y)) is None
assert ask(Q.real(x), ~(Q.real(x) >> Q.real(y))) is True
def test_incompatible_resolutors():
class Prime2AskHandler(AskHandler):
@staticmethod
def Number(expr, assumptions):
return True
register_handler('prime', Prime2AskHandler)
raises(ValueError, lambda: ask(Q.prime(4)))
remove_handler('prime', Prime2AskHandler)
class InconclusiveHandler(AskHandler):
@staticmethod
def Number(expr, assumptions):
return None
register_handler('prime', InconclusiveHandler)
assert ask(Q.prime(3)) is True
def test_key_extensibility():
"""test that you can add keys to the ask system at runtime"""
# make sure the key is not defined
raises(AttributeError, lambda: ask(Q.my_key(x)))
class MyAskHandler(AskHandler):
@staticmethod
def Symbol(expr, assumptions):
return True
register_handler('my_key', MyAskHandler)
assert ask(Q.my_key(x)) is True
assert ask(Q.my_key(x + 1)) is None
remove_handler('my_key', MyAskHandler)
del Q.my_key
raises(AttributeError, lambda: ask(Q.my_key(x)))
def test_type_extensibility():
"""test that new types can be added to the ask system at runtime
We create a custom type MyType, and override ask Q.prime=True with handler
MyAskHandler for this type
TODO: test incompatible resolutors
"""
from sympy.core import Basic
class MyType(Basic):
pass
class MyAskHandler(AskHandler):
@staticmethod
def MyType(expr, assumptions):
return True
a = MyType()
register_handler(Q.prime, MyAskHandler)
assert ask(Q.prime(a)) is True
def test_single_fact_lookup():
known_facts = And(Implies(Q.integer, Q.rational),
Implies(Q.rational, Q.real),
Implies(Q.real, Q.complex))
known_facts_keys = set([Q.integer, Q.rational, Q.real, Q.complex])
known_facts_cnf = to_cnf(known_facts)
mapping = single_fact_lookup(known_facts_keys, known_facts_cnf)
assert mapping[Q.rational] == set([Q.real, Q.rational, Q.complex])
def test_compute_known_facts():
known_facts = And(Implies(Q.integer, Q.rational),
Implies(Q.rational, Q.real),
Implies(Q.real, Q.complex))
known_facts_keys = set([Q.integer, Q.rational, Q.real, Q.complex])
s = compute_known_facts(known_facts, known_facts_keys)
@slow
def test_known_facts_consistent():
""""Test that ask_generated.py is up-to-date"""
from sympy.assumptions.ask import get_known_facts, get_known_facts_keys
from os.path import abspath, dirname, join
filename = join(dirname(dirname(abspath(__file__))), 'ask_generated.py')
with open(filename, 'r') as f:
assert f.read() == \
compute_known_facts(get_known_facts(), get_known_facts_keys())
def test_Add_queries():
assert ask(Q.prime(12345678901234567890 + (cos(1)**2 + sin(1)**2))) is True
assert ask(Q.even(Add(S(2), S(2), evaluate=0))) is True
assert ask(Q.prime(Add(S(2), S(2), evaluate=0))) is False
assert ask(Q.integer(Add(S(2), S(2), evaluate=0))) is True
def test_positive_assuming():
with assuming(Q.positive(x + 1)):
assert not ask(Q.positive(x))
def test_issue_5421():
raises(TypeError, lambda: ask(pi/log(x), Q.real))
def test_issue_3906():
raises(TypeError, lambda: ask(Q.positive))
def test_issue_5833():
assert ask(Q.positive(log(x)**2), Q.positive(x)) is None
assert ask(~Q.negative(log(x)**2), Q.positive(x)) is True
def test_issue_6732():
raises(ValueError, lambda: ask(Q.positive(x), Q.positive(x) & Q.negative(x)))
raises(ValueError, lambda: ask(Q.negative(x), Q.positive(x) & Q.negative(x)))
def test_issue_7246():
assert ask(Q.positive(atan(p)), Q.positive(p)) is True
assert ask(Q.positive(atan(p)), Q.negative(p)) is False
assert ask(Q.positive(atan(p)), Q.zero(p)) is False
assert ask(Q.positive(atan(x))) is None
assert ask(Q.positive(asin(p)), Q.positive(p)) is None
assert ask(Q.positive(asin(p)), Q.zero(p)) is None
assert ask(Q.positive(asin(Rational(1, 7)))) is True
assert ask(Q.positive(asin(x)), Q.positive(x) & Q.nonpositive(x - 1)) is True
assert ask(Q.positive(asin(x)), Q.negative(x) & Q.nonnegative(x + 1)) is False
assert ask(Q.positive(acos(p)), Q.positive(p)) is None
assert ask(Q.positive(acos(Rational(1, 7)))) is True
assert ask(Q.positive(acos(x)), Q.nonnegative(x + 1) & Q.nonpositive(x - 1)) is True
assert ask(Q.positive(acos(x)), Q.nonnegative(x - 1)) is None
assert ask(Q.positive(acot(x)), Q.positive(x)) is True
assert ask(Q.positive(acot(x)), Q.real(x)) is True
assert ask(Q.positive(acot(x)), Q.imaginary(x)) is False
assert ask(Q.positive(acot(x))) is None
@XFAIL
def test_issue_7246_failing():
#Move this test to test_issue_7246 once
#the new assumptions module is improved.
assert ask(Q.positive(acos(x)), Q.zero(x)) is True
def test_deprecated_Q_bounded():
with raises(SymPyDeprecationWarning):
Q.bounded
def test_deprecated_Q_infinity():
with raises(SymPyDeprecationWarning):
Q.infinity
def test_check_old_assumption():
x = symbols('x', real=True)
assert ask(Q.real(x)) is True
assert ask(Q.imaginary(x)) is False
assert ask(Q.complex(x)) is True
x = symbols('x', imaginary=True)
assert ask(Q.real(x)) is False
assert ask(Q.imaginary(x)) is True
assert ask(Q.complex(x)) is True
x = symbols('x', complex=True)
assert ask(Q.real(x)) is None
assert ask(Q.complex(x)) is True
x = symbols('x', positive=True)
assert ask(Q.positive(x)) is True
assert ask(Q.negative(x)) is False
assert ask(Q.real(x)) is True
x = symbols('x', commutative=False)
assert ask(Q.commutative(x)) is False
x = symbols('x', negative=True)
assert ask(Q.positive(x)) is False
assert ask(Q.negative(x)) is True
x = symbols('x', nonnegative=True)
assert ask(Q.negative(x)) is False
assert ask(Q.positive(x)) is None
assert ask(Q.zero(x)) is None
x = symbols('x', finite=True)
assert ask(Q.finite(x)) is True
x = symbols('x', prime=True)
assert ask(Q.prime(x)) is True
assert ask(Q.composite(x)) is False
x = symbols('x', composite=True)
assert ask(Q.prime(x)) is False
assert ask(Q.composite(x)) is True
x = symbols('x', even=True)
assert ask(Q.even(x)) is True
assert ask(Q.odd(x)) is False
x = symbols('x', odd=True)
assert ask(Q.even(x)) is False
assert ask(Q.odd(x)) is True
x = symbols('x', nonzero=True)
assert ask(Q.nonzero(x)) is True
assert ask(Q.zero(x)) is False
x = symbols('x', zero=True)
assert ask(Q.zero(x)) is True
x = symbols('x', integer=True)
assert ask(Q.integer(x)) is True
x = symbols('x', rational=True)
assert ask(Q.rational(x)) is True
assert ask(Q.irrational(x)) is False
x = symbols('x', irrational=True)
assert ask(Q.irrational(x)) is True
assert ask(Q.rational(x)) is False
def test_issue_9636():
assert ask(Q.integer(1.0)) is False
assert ask(Q.prime(3.0)) is False
assert ask(Q.composite(4.0)) is False
assert ask(Q.even(2.0)) is False
assert ask(Q.odd(3.0)) is False
@XFAIL
def test_autosimp_fails():
# Unxfail after fixing issue #9807
assert ask(Q.imaginary(0**I)) is False
assert ask(Q.imaginary(0**(-I))) is False
assert ask(Q.real(0**I)) is False
assert ask(Q.real(0**(-I))) is False
| 41.421338
| 111
| 0.620056
|
4a16938e8edfed0e79226a6f214abf171b03016c
| 1,995
|
py
|
Python
|
bot/youtube.py
|
zd4y/discordbot
|
57432b4e577241058e02c609ca36eae4b52911dc
|
[
"MIT"
] | null | null | null |
bot/youtube.py
|
zd4y/discordbot
|
57432b4e577241058e02c609ca36eae4b52911dc
|
[
"MIT"
] | null | null | null |
bot/youtube.py
|
zd4y/discordbot
|
57432b4e577241058e02c609ca36eae4b52911dc
|
[
"MIT"
] | null | null | null |
import asyncio
from .utils import fetch
from .config import Settings
from aiohttp import ClientSession
base_url = 'https://www.googleapis.com/youtube/v3/{}'
async def fetch_youtube(session: ClientSession, resource: str, part: str, **kwargs) -> dict:
params = {
'part': part,
'key': Settings.YOUTUBE_API_KEY,
**kwargs
}
url = base_url.format(resource)
json = await fetch(session, url, params=params)
return json
async def search_channel(session: ClientSession, query: str) -> str:
json = await fetch_youtube(session, 'search', 'id', type='channel', maxResults=1, q=query)
result = json['items'][0]['id']['channelId']
return result
async def fetch_channel(session: ClientSession, channel_id: str, snippet=False):
part = 'contentDetails'
if snippet:
part += ',snippet'
json = await fetch_youtube(session, 'channels', part, id=channel_id)
channel = json['items'][0]
print(channel)
return channel
async def get_channel_playlists(session: ClientSession, channel_id: str):
channel = await fetch_channel(session, channel_id)
channel_playlist = channel['contentDetails']['relatedPlaylists']['uploads']
return channel_playlist
async def get_playlist_videos(session: ClientSession, playlist_id: str, max_results: int = 5) -> list:
json = await fetch_youtube(session, 'playlistItems', 'snippet,contentDetails', playlistId=playlist_id, maxResults=max_results)
videos = json['items'][::-1]
return videos
async def get_playlist_videos_id(session: ClientSession, channel_id: str):
videos = await get_playlist_videos(session, channel_id)
return map(lambda video: video['snippet']['resourceId']['videoId'], videos)
async def main():
async with ClientSession() as session:
channel_id = await search_channel(session, 'test')
channel = await fetch_channel(session, channel_id)
print(channel)
if __name__ == '__main__':
asyncio.run(main())
| 28.5
| 130
| 0.701754
|
4a16939300c91a4bc6c2820b7421c58ecf1d1105
| 2,728
|
py
|
Python
|
queue_/queue_from_stack.py
|
mgaranina/programming-2021-19fpl
|
e3f9b95167cd3fd793dcd891a222127c2f1c9abf
|
[
"MIT"
] | null | null | null |
queue_/queue_from_stack.py
|
mgaranina/programming-2021-19fpl
|
e3f9b95167cd3fd793dcd891a222127c2f1c9abf
|
[
"MIT"
] | null | null | null |
queue_/queue_from_stack.py
|
mgaranina/programming-2021-19fpl
|
e3f9b95167cd3fd793dcd891a222127c2f1c9abf
|
[
"MIT"
] | null | null | null |
"""
Programming for linguists
Implementation of the data structure "Queue" from stack
"""
from queue_.queue_ import FullQueue, InfiniteQueue
from stack.stack import Stack
# pylint: disable=invalid-name
class Queue_:
"""
Queue Data Structure from stack
"""
def __init__(self, data: Stack = Stack(), max_size: int = 0):
if max_size and len(data.data) > max_size:
data.data = data.data[:max_size]
self.in_stack = data
self.out_stack = Stack()
self.maximum_size = max_size
def put(self, element):
"""
Add the element ‘element’ at the end of queue_
:param element: element to add to queue_
"""
if not self.maximum_size or not self.full():
self.in_stack.push(element)
else:
raise FullQueue
def get(self):
"""
Remove and return an item from queue_
"""
if self.in_stack.empty():
raise IndexError
while self.in_stack.size() != 1:
self.out_stack.push(self.in_stack.top())
self.in_stack.pop()
top_element = self.in_stack.top()
self.in_stack.pop()
while not self.out_stack.empty():
self.in_stack.push(self.out_stack.top())
self.out_stack.pop()
return top_element
def empty(self) -> bool:
"""
Return whether queue_ is empty or not
:return: True if queue_ does not contain any elements.
False if the queue_ contains elements
"""
return self.in_stack.empty()
def size(self) -> int:
"""
Return the number of elements in queue_
:return: Number of elements in queue_
"""
return self.in_stack.size()
def top(self):
"""
Return the element on the top of queue_
:return: the element that is on the top of queue_
"""
if self.in_stack.empty():
raise IndexError
while self.in_stack.size() != 1:
self.out_stack.push(self.in_stack.top())
self.in_stack.pop()
top_element = self.in_stack.top()
while not self.out_stack.empty():
self.in_stack.push(self.out_stack.top())
self.out_stack.pop()
return top_element
def full(self):
"""
Return whether queue_ created from stack is full or not
:return: True if queue_ created from stack is full.
False if the queue_ created from stack is not full
"""
if not self.maximum_size:
raise InfiniteQueue
if self.maximum_size and self.size() == self.maximum_size:
return True
return False
| 26.745098
| 67
| 0.578446
|
4a169453e35d3d586acca4930dd23d93d6d21daf
| 6,858
|
py
|
Python
|
homeassistant/components/influxdb/sensor.py
|
lkollar/home-assistant
|
f4f7c25f744c0678b12acb2cc905894cca9f46ef
|
[
"Apache-2.0"
] | 5
|
2020-09-17T10:48:51.000Z
|
2021-11-22T00:08:17.000Z
|
homeassistant/components/influxdb/sensor.py
|
lkollar/home-assistant
|
f4f7c25f744c0678b12acb2cc905894cca9f46ef
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/components/influxdb/sensor.py
|
lkollar/home-assistant
|
f4f7c25f744c0678b12acb2cc905894cca9f46ef
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:06:52.000Z
|
2020-09-17T00:57:06.000Z
|
"""InfluxDB component which allows you to get data from an Influx database."""
from datetime import timedelta
import logging
from influxdb import InfluxDBClient, exceptions
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
STATE_UNKNOWN,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from . import CONF_DB_NAME
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8086
DEFAULT_DATABASE = "home_assistant"
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_GROUP_FUNCTION = "mean"
DEFAULT_FIELD = "value"
CONF_QUERIES = "queries"
CONF_GROUP_FUNCTION = "group_function"
CONF_FIELD = "field"
CONF_MEASUREMENT_NAME = "measurement"
CONF_WHERE = "where"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Required(CONF_WHERE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION): cv.string,
vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_QUERIES): [_QUERY_SCHEME],
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the InfluxDB component."""
influx_conf = {
"host": config[CONF_HOST],
"password": config.get(CONF_PASSWORD),
"port": config.get(CONF_PORT),
"ssl": config.get(CONF_SSL),
"username": config.get(CONF_USERNAME),
"verify_ssl": config.get(CONF_VERIFY_SSL),
}
dev = []
for query in config.get(CONF_QUERIES):
sensor = InfluxSensor(hass, influx_conf, query)
if sensor.connected:
dev.append(sensor)
add_entities(dev, True)
class InfluxSensor(Entity):
"""Implementation of a Influxdb sensor."""
def __init__(self, hass, influx_conf, query):
"""Initialize the sensor."""
self._name = query.get(CONF_NAME)
self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
self._value_template = value_template
self._value_template.hass = hass
else:
self._value_template = None
database = query.get(CONF_DB_NAME)
self._state = None
self._hass = hass
where_clause = query.get(CONF_WHERE)
where_clause.hass = hass
influx = InfluxDBClient(
host=influx_conf["host"],
port=influx_conf["port"],
username=influx_conf["username"],
password=influx_conf["password"],
database=database,
ssl=influx_conf["ssl"],
verify_ssl=influx_conf["verify_ssl"],
)
try:
influx.query("SHOW SERIES LIMIT 1;")
self.connected = True
self.data = InfluxSensorData(
influx,
query.get(CONF_GROUP_FUNCTION),
query.get(CONF_FIELD),
query.get(CONF_MEASUREMENT_NAME),
where_clause,
)
except exceptions.InfluxDBClientError as exc:
_LOGGER.error(
"Database host is not accessible due to '%s', please"
" check your entries in the configuration file and"
" that the database exists and is READ/WRITE",
exc,
)
self.connected = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return True
def update(self):
"""Get the latest data from Influxdb and updates the states."""
self.data.update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
if self._value_template is not None:
value = self._value_template.render_with_possible_json_value(
str(value), STATE_UNKNOWN
)
self._state = value
class InfluxSensorData:
"""Class for handling the data retrieval."""
def __init__(self, influx, group, field, measurement, where):
"""Initialize the data object."""
self.influx = influx
self.group = group
self.field = field
self.measurement = measurement
self.where = where
self.value = None
self.query = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data with a shell command."""
_LOGGER.info("Rendering where: %s", self.where)
try:
where_clause = self.where.render()
except TemplateError as ex:
_LOGGER.error("Could not render where clause template: %s", ex)
return
self.query = "select {}({}) as value from {} where {}".format(
self.group, self.field, self.measurement, where_clause
)
_LOGGER.info("Running query: %s", self.query)
points = list(self.influx.query(self.query).get_points())
if not points:
_LOGGER.warning(
"Query returned no points, sensor state set to UNKNOWN: %s", self.query,
)
self.value = None
else:
if len(points) > 1:
_LOGGER.warning(
"Query returned multiple points, only first one shown: %s",
self.query,
)
self.value = points[0].get("value")
| 31.458716
| 88
| 0.631234
|
4a1694929dc9a5a1d78bce2f99be04de0f1ba8e5
| 5,752
|
py
|
Python
|
fluid/PaddleNLP/text_matching_on_quora/quora_question_pairs.py
|
awesome-archive/models-1
|
747f947eff1511d3291d86dcd27736214dfbf2bc
|
[
"Apache-2.0"
] | 1
|
2019-03-07T12:50:05.000Z
|
2019-03-07T12:50:05.000Z
|
fluid/PaddleNLP/text_matching_on_quora/quora_question_pairs.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | null | null | null |
fluid/PaddleNLP/text_matching_on_quora/quora_question_pairs.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | 1
|
2019-04-02T07:57:06.000Z
|
2019-04-02T07:57:06.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
import paddle.dataset.common
import collections
import tarfile
import re
import string
import random
import os, sys
import nltk
from os.path import expanduser
__all__ = ['word_dict', 'train', 'dev', 'test']
URL = "https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view"
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
DATA_DIR = "Quora_question_pair_partition"
QUORA_TRAIN_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'train.tsv')
QUORA_DEV_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'dev.tsv')
QUORA_TEST_FILE_NAME = os.path.join(DATA_HOME, DATA_DIR, 'test.tsv')
# punctuation or nltk or space
TOKENIZE_METHOD='space'
COLUMN_COUNT = 4
def tokenize(s):
if sys.version_info <= (3, 0): # for python2
s = s.decode('utf-8')
if TOKENIZE_METHOD == "nltk":
return nltk.tokenize.word_tokenize(s)
elif TOKENIZE_METHOD == "punctuation":
return s.translate({ord(char): None for char in string.punctuation}).lower().split()
elif TOKENIZE_METHOD == "space":
return s.split()
else:
raise RuntimeError("Invalid tokenize method")
def maybe_open(file_name):
if not os.path.isfile(file_name):
msg = "file not exist: %s\nPlease download the dataset firstly from: %s\n\n" % (file_name, URL) + \
("# The finally dataset dir should be like\n\n"
"$HOME/.cache/paddle/dataset\n"
" |- Quora_question_pair_partition\n"
" |- train.tsv\n"
" |- test.tsv\n"
" |- dev.tsv\n"
" |- readme.txt\n"
" |- wordvec.txt\n")
raise RuntimeError(msg)
if sys.version_info <= (3, 0): # for python2
return open(file_name, 'r')
else:
return open(file_name, 'r', encoding="utf-8")
def tokenized_question_pairs(file_name):
"""
"""
with maybe_open(file_name) as f:
questions = {}
lines = f.readlines()
for line in lines:
info = line.strip().split('\t')
if len(info) != COLUMN_COUNT:
# formatting error
continue
(label, question1, question2, id) = info
question1 = tokenize(question1)
question2 = tokenize(question2)
yield question1, question2, int(label)
def tokenized_questions(file_name):
"""
"""
with maybe_open(file_name) as f:
lines = f.readlines()
for line in lines:
info = line.strip().split('\t')
if len(info) != COLUMN_COUNT:
# formatting error
continue
(label, question1, question2, id) = info
yield tokenize(question1)
yield tokenize(question2)
def build_dict(file_name, cutoff):
"""
Build a word dictionary from the corpus. Keys of the dictionary are words,
and values are zero-based IDs of these words.
"""
word_freq = collections.defaultdict(int)
for doc in tokenized_questions(file_name):
for word in doc:
word_freq[word] += 1
word_freq = filter(lambda x: x[1] > cutoff, word_freq.items())
dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*dictionary))
word_idx = dict(zip(words, range(len(words))))
word_idx['<unk>'] = len(words)
word_idx['<pad>'] = len(words) + 1
return word_idx
def reader_creator(file_name, word_idx):
UNK_ID = word_idx['<unk>']
def reader():
for (q1, q2, label) in tokenized_question_pairs(file_name):
q1_ids = [word_idx.get(w, UNK_ID) for w in q1]
q2_ids = [word_idx.get(w, UNK_ID) for w in q2]
if q1_ids != [] and q2_ids != []: # [] is not allowed in fluid
assert(label in [0, 1])
yield q1_ids, q2_ids, label
return reader
def train(word_idx):
"""
Quora training set creator.
It returns a reader creator, each sample in the reader is two zero-based ID
list and label in [0, 1].
:param word_idx: word dictionary
:type word_idx: dict
:return: Training reader creator
:rtype: callable
"""
return reader_creator(QUORA_TRAIN_FILE_NAME, word_idx)
def dev(word_idx):
"""
Quora develop set creator.
It returns a reader creator, each sample in the reader is two zero-based ID
list and label in [0, 1].
:param word_idx: word dictionary
:type word_idx: dict
:return: develop reader creator
:rtype: callable
"""
return reader_creator(QUORA_DEV_FILE_NAME, word_idx)
def test(word_idx):
"""
Quora test set creator.
It returns a reader creator, each sample in the reader is two zero-based ID
list and label in [0, 1].
:param word_idx: word dictionary
:type word_idx: dict
:return: Test reader creator
:rtype: callable
"""
return reader_creator(QUORA_TEST_FILE_NAME, word_idx)
def word_dict():
"""
Build a word dictionary from the corpus.
:return: Word dictionary
:rtype: dict
"""
return build_dict(file_name=QUORA_TRAIN_FILE_NAME, cutoff=4)
| 29.19797
| 107
| 0.631085
|
4a16966add4aa0356c31409a94ea8d01231f5ec8
| 208
|
py
|
Python
|
w05/e15.py
|
Luccifer/PythonCoruseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | 1
|
2020-01-12T12:55:07.000Z
|
2020-01-12T12:55:07.000Z
|
w05/e15.py
|
Luccifer/PythonCourseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | null | null | null |
w05/e15.py
|
Luccifer/PythonCourseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | null | null | null |
# Количество положительных
def number_of_positive(nums):
i = 0
for num in nums:
if num > 0:
i += 1
print(i)
nums = list(map(int, input().split()))
number_of_positive(nums)
| 14.857143
| 38
| 0.586538
|
4a16975aed7cca7304da6dd733ece2218dd21774
| 5,538
|
py
|
Python
|
opencell/scripts/export_annotations.py
|
czbiohub/opencell-portal-pub
|
2b056924e4f55490b16349ff0dcf3e719ab516c7
|
[
"Unlicense"
] | 2
|
2022-02-17T16:24:49.000Z
|
2022-03-02T22:26:48.000Z
|
opencell/scripts/export_annotations.py
|
czbiohub/opencell-portal-pub
|
2b056924e4f55490b16349ff0dcf3e719ab516c7
|
[
"Unlicense"
] | null | null | null |
opencell/scripts/export_annotations.py
|
czbiohub/opencell-portal-pub
|
2b056924e4f55490b16349ff0dcf3e719ab516c7
|
[
"Unlicense"
] | null | null | null |
import argparse
import datetime
import os
import pandas as pd
import re
import sqlalchemy as sa
from opencell.database import utils
def timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dst-dir', dest='dst_dir')
parser.add_argument('--credentials', dest='credentials', required=False)
for dest in ['raw', 'clean', 'public_only']:
flag = '--%s' % dest.replace('_', '-')
parser.add_argument(flag, dest=dest, action='store_true', required=False)
parser.set_defaults(**{dest: False})
args = parser.parse_args()
return args
def export_raw_annotations(engine, dst_dir):
'''
A CSV of all annotations (categories and comments) for all cell lines
'''
annotations = pd.read_sql(
'''
select * from cell_line_metadata
left join cell_line_annotation
on cell_line_metadata.cell_line_id = cell_line_annotation.cell_line_id
order by (plate_id, well_id);
''',
engine
)
filepath = os.path.join(dst_dir, '%s_all-raw-cell-line-annotations.csv' % timestamp())
annotations.to_csv(filepath)
def export_clean_graded_annotations(engine, dst_dir, public_only=False):
'''
Export the graded annotations as CSV files in two different versions:
one with a row for every annotation and columns for annotation_name and annotation_grade,
and one with a row for every target and columns for each of the three grades,
with all annotations for each target and grade concatenated into semicolon-separated strings.
public_only : whether to filter out both unpublished cell lines and private annotations
'''
# hard-coded list of graded localization categories that are not published/public
non_public_categories = [
'small_aggregates',
'nucleus_cytoplasm_variation',
'textured',
'diffuse',
'cilia',
'nucleolar_ring',
'nuclear',
'peri_golgi',
]
annotations = pd.read_sql(
'''
select line.id as cell_line_id, cd.target_name, cd.ensg_id, ant.categories
from cell_line line
left join crispr_design cd on cd.id = line.crispr_design_id
left join cell_line_annotation ant on ant.cell_line_id = line.id;
''',
engine
)
annotations = (
annotations.dropna(axis=0, how='any')
.explode('categories')
.rename(columns={'categories': 'category'})
)
# parse the grade from the category
annotations['grade'] = annotations.category.apply(
lambda s: s[-1] if not pd.isna(s) and s[-1] in ['1', '2', '3'] else 'none'
)
# remove the grade from the category names
annotations['category'] = annotations.category.apply(
lambda s: re.sub('_[1,2,3]$', '', s) if not pd.isna(s) else None
)
all_pr_cell_line_ids = (
annotations.loc[annotations.category == 'publication_ready'].cell_line_id.values
)
public_lines_mask = annotations.cell_line_id.isin(all_pr_cell_line_ids)
public_ants_mask = ~annotations.category.isin(non_public_categories)
graded_mask = annotations.grade.isin(['1', '2', '3'])
# the graded public annotations for all public targets
annotations = annotations.loc[
(public_lines_mask & public_ants_mask & graded_mask) if public_only else graded_mask
]
annotations = annotations[['ensg_id', 'target_name', 'category', 'grade']]
# move the annotation names into grade-specific columns
# and concatenate them into semicolon-separated strings
grades = {
('annotations_grade_%s' % grade): (
annotations.loc[annotations.grade == grade]
.groupby('ensg_id')
.category
.agg(lambda s: ';'.join(s))
)
for grade in ['3', '2', '1']
}
grades['target_name'] = annotations.groupby('ensg_id').first().target_name
annotations_by_grade = pd.DataFrame(data=grades)
annotations_by_grade.index.name = 'ensg_id'
# reorder the columns and write the CSV
column_order = [
'ensg_id',
'target_name',
'annotations_grade_3',
'annotations_grade_2',
'annotations_grade_1'
]
kind = 'public' if public_only else 'all'
(
annotations_by_grade.reset_index()[column_order]
.sort_values(by='target_name')
.to_csv(
os.path.join(dst_dir, '%s-%s-annotations-by-grade.csv' % (timestamp(), kind)),
index=False
)
)
# finally, save the annotations as they are (one row for each annotation)
(
annotations
.rename(
columns={'category': 'annotation_name', 'grade': 'annotation_grade'}
)
.sort_values(
by=['target_name', 'annotation_grade', 'annotation_name'],
ascending=[True, False, True]
)
.to_csv(
os.path.join(dst_dir, '%s-%s-annotations-flat.csv' % (timestamp(), kind)),
index=False
)
)
if __name__ == '__main__':
args = parse_args()
url = utils.url_from_credentials(args.credentials)
engine = sa.create_engine(url)
if args.raw:
export_raw_annotations(engine, args.dst_dir)
print('Raw annotations exported to %s' % args.dst_dir)
if args.clean:
export_clean_graded_annotations(engine, args.dst_dir, public_only=args.public_only)
print('Clean annotations exported to %s' % args.dst_dir)
| 31.645714
| 97
| 0.644276
|
4a1698ade7b95251f3fa2601f7b754ca4aed5580
| 1,978
|
py
|
Python
|
apps/accounts/tests/test_authentication.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/tests/test_authentication.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/tests/test_authentication.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
from apps.accounts.client import WocatWebsiteUserClient
from django.contrib.auth import get_user_model
from apps.qcat.tests import TestCase
User = get_user_model()
def get_mock_validate_session_values():
"""
Returns mock values that correspond to the actual validate_session
function in accounts.authentication.
"""
return 1
def get_mock_user_information_values():
"""
Returns mock values that correspond to the actual
get_user_information function in accounts.authentication.
"""
return {
'uid': 1,
'username': 'foo@bar.com',
'first_name': 'Foo',
'last_name': 'Bar',
}
def get_mock_user_information_values_cms():
return {
'pk': 1,
'email': 'foo@bar.com',
'first_name': 'Foo',
'last_name': 'Bar',
}
class WOCATCMSAuthenticateTest(TestCase):
def setUp(self):
self.remote_user_client = WocatWebsiteUserClient()
def test_existing_user_updates(self):
# Existing users have their information updated
user_info = get_mock_user_information_values_cms()
User.objects.create(id=user_info['pk'], email=user_info['email'])
user = self.remote_user_client.get_and_update_django_user(**user_info)
self.assertEqual(user.id, user_info['pk'])
self.assertEqual(user.email, user_info['email'])
self.assertEqual(user.firstname, user_info['first_name'])
self.assertEqual(user.lastname, user_info['last_name'])
def test_new_user_updates(self):
# New users should also have their information updated
user_info = get_mock_user_information_values_cms()
user = self.remote_user_client.get_and_update_django_user(**user_info)
self.assertEqual(user.id, user_info['pk'])
self.assertEqual(user.email, user_info['email'])
self.assertEqual(user.firstname, user_info['first_name'])
self.assertEqual(user.lastname, user_info['last_name'])
| 29.969697
| 78
| 0.689585
|
4a1698fafc9517037afd6ce00f4b449009b48d79
| 2,499
|
py
|
Python
|
tests/integ/test_tf_cifar.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_tf_cifar.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_tf_cifar.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pickle
import numpy as np
import pytest
from sagemaker.tensorflow import TensorFlow
from tests.integ import DATA_DIR
from tests.integ.timeout import timeout_and_delete_endpoint_by_name, timeout
PICKLE_CONTENT_TYPE = 'application/python-pickle'
class PickleSerializer(object):
def __init__(self):
self.content_type = PICKLE_CONTENT_TYPE
def __call__(self, data):
return pickle.dumps(data, protocol=2)
@pytest.mark.continuous_testing
def test_cifar(sagemaker_session, tf_full_version):
with timeout(minutes=20):
script_path = os.path.join(DATA_DIR, 'cifar_10', 'source')
dataset_path = os.path.join(DATA_DIR, 'cifar_10', 'data')
estimator = TensorFlow(entry_point='resnet_cifar_10.py', source_dir=script_path, role='SageMakerRole',
framework_version=tf_full_version, training_steps=500, evaluation_steps=5,
train_instance_count=2, train_instance_type='ml.p2.xlarge',
sagemaker_session=sagemaker_session, train_max_run=20 * 60,
base_job_name='test-cifar')
inputs = estimator.sagemaker_session.upload_data(path=dataset_path, key_prefix='data/cifar10')
estimator.fit(inputs, logs=False)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.p2.xlarge')
predictor.serializer = PickleSerializer()
predictor.content_type = PICKLE_CONTENT_TYPE
data = np.random.randn(32, 32, 3)
predict_response = predictor.predict(data)
assert len(predict_response['outputs']['probabilities']['floatVal']) == 10
| 40.306452
| 110
| 0.720688
|
4a16999cb76dbaf316bdcfbe3135547370167c60
| 103
|
py
|
Python
|
python/random/random_id.py
|
gerritjvv/program-helpers
|
c219531a461ff5a8eb3c1c10a9029e98712e8047
|
[
"MIT"
] | null | null | null |
python/random/random_id.py
|
gerritjvv/program-helpers
|
c219531a461ff5a8eb3c1c10a9029e98712e8047
|
[
"MIT"
] | null | null | null |
python/random/random_id.py
|
gerritjvv/program-helpers
|
c219531a461ff5a8eb3c1c10a9029e98712e8047
|
[
"MIT"
] | null | null | null |
import binascii
import os
def random_id():
return binascii.b2a_hex(os.urandom(8)).decode('utf-8')
| 17.166667
| 58
| 0.728155
|
4a16999f28afa49a2bb470e850cd49a2c0e5147b
| 325
|
py
|
Python
|
binary_tree/migrations/0002_remove_binarytree_status.py
|
vintkor/cryptotrade
|
cd27b5d58e4149cf9ad5e035983fcec566369833
|
[
"MIT"
] | 1
|
2019-07-26T09:54:32.000Z
|
2019-07-26T09:54:32.000Z
|
binary_tree/migrations/0002_remove_binarytree_status.py
|
shamanu4/test_project
|
8ec52b5ab88c7bae4e469dc04fe64630e2f081fa
|
[
"MIT"
] | 6
|
2020-06-05T19:00:20.000Z
|
2022-03-11T23:29:35.000Z
|
binary_tree/migrations/0002_remove_binarytree_status.py
|
vintkor/cryptotrade
|
cd27b5d58e4149cf9ad5e035983fcec566369833
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.7 on 2018-07-12 16:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('binary_tree', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='binarytree',
name='status',
),
]
| 18.055556
| 47
| 0.587692
|
4a169ac071b49f9f2229214c2f852d9e468fa411
| 3,976
|
py
|
Python
|
Chapter 09/Windows/import_world_borders.py
|
PacktPublishing/Python-Geospatial-Development-Third-Edition
|
e1e1f52da8509876e8576e081de9d5f251a21f77
|
[
"MIT"
] | 44
|
2016-06-14T05:36:10.000Z
|
2022-01-30T18:29:44.000Z
|
Chapter 09/Windows/import_world_borders.py
|
KonstantinKlepikov/Python-Geospatial-Development-Third-Edition
|
ca3545dbab75dac63080582538de40d4d1c15dab
|
[
"MIT"
] | null | null | null |
Chapter 09/Windows/import_world_borders.py
|
KonstantinKlepikov/Python-Geospatial-Development-Third-Edition
|
ca3545dbab75dac63080582538de40d4d1c15dab
|
[
"MIT"
] | 48
|
2016-12-11T08:53:46.000Z
|
2022-02-10T12:10:34.000Z
|
# import_world_borders.py
# DELETE FROM countries;
# ALTER TABLE countries DROP COLUMN outline;
# ALTER TABLE countries ADD COLUMN outline GEOMETRY(GEOMETRY, 4326);
import os.path
import psycopg2
import osgeo.ogr
import shapely.wkt
from shapely.geometry import MultiPolygon
from shapely.affinity import translate
#############################################################################
def adjust_for_antimeridian(name, wkt):
""" Adjust the given country if it crosses the anti-meridian line.
We return the country's original or adjusted outline, in wkt format.
"""
outline = shapely.wkt.loads(wkt)
# Ignore the country if it doesn't have multiple parts.
if outline.geom_type != "MultiPolygon":
print("Importing {}".format(name))
return wkt
# Ignore the country if it doesn't sit close to the anti-meridian line on
# both the left and right sides.
minLong,minLat,maxLong,maxLat = outline.bounds
if minLong >= -160 or maxLong <= 160:
print("Importing {}".format(name))
return wkt # No need to adjust.
# Split the country up into individual parts, recording whether each part
# is closer to the anti-meridian line on the left side or the right side.
parts = [] # List of parts. Each entry is a dictionary with 'side' and
# 'geom' entries.
for geom in outline.geoms:
left = geom.bounds[0]
right = geom.bounds[2]
if left == -180 and right == +180:
print("{} spans the entire world, so we can't shift it."
.format(name))
return wkt
distance_to_left_side = -(-180 - left)
distance_to_right_side = 180 - right
if distance_to_left_side < distance_to_right_side:
side = "left"
else:
side = "right"
parts.append({'side' : side,
'geom' : geom})
# Decide whether to shift the country to the left side or the right side of
# the world map. We do this based on the number of parts on each side.
num_on_left = 0
num_on_right = 0
for part in parts:
if part['side'] == "left":
num_on_left = num_on_left + 1
else:
num_on_right = num_on_right + 1
if num_on_left > num_on_right:
print("Shifting {} to left".format(name))
shift_direction = "left"
else:
print("Shifting {} to right".format(name))
shift_direction = "right"
# Shift the parts.
for part in parts:
old_bounds = part['geom'].bounds
if part['side'] == "left" and shift_direction == "right":
part['geom'] = translate(part['geom'], 360)
elif part['side'] == "right" and shift_direction == "left":
part['geom'] = translate(part['geom'], -360)
# Combine the translated parts back into a MultiPolygon.
polygons = []
for part in parts:
polygons.append(part['geom'])
combined = MultiPolygon(polygons)
return combined.wkt
#############################################################################
connection = psycopg2.connect(database="distal",
user="distal_user",
password="...")
cursor = connection.cursor()
cursor.execute("DELETE FROM countries")
srcFile = os.path.join("data", "TM_WORLD_BORDERS-0.3",
"TM_WORLD_BORDERS-0.3.shp")
shapefile = osgeo.ogr.Open(srcFile)
layer = shapefile.GetLayer(0)
num_done = 0
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
name = feature.GetField("NAME")
wkt = feature.GetGeometryRef().ExportToWkt()
wkt = adjust_for_antimeridian(name, wkt)
cursor.execute("INSERT INTO countries (name,outline) " +
"VALUES (%s, ST_GeometryFromText(%s, 4326))",
(name, wkt))
num_done = num_done + 1
connection.commit()
print("Imported {} countries".format(num_done))
| 29.894737
| 79
| 0.595825
|
4a169aed019c69300fdec39bda5cd4f7bca45ae5
| 270
|
py
|
Python
|
pickle_io.py
|
steveli/mogp
|
d142e7b9e5b7dbc67cfae4760c837cafd9691a51
|
[
"MIT"
] | 7
|
2019-07-18T19:55:26.000Z
|
2022-02-14T13:55:04.000Z
|
pickle_io.py
|
steveli/mogp
|
d142e7b9e5b7dbc67cfae4760c837cafd9691a51
|
[
"MIT"
] | null | null | null |
pickle_io.py
|
steveli/mogp
|
d142e7b9e5b7dbc67cfae4760c837cafd9691a51
|
[
"MIT"
] | 2
|
2020-01-28T15:46:15.000Z
|
2020-09-16T07:31:13.000Z
|
import cPickle as pickle
def pickle_load(filename):
with open(filename, 'rb') as f:
dat = pickle.load(f)
return dat
def pickle_save(filename, *args):
with open(filename, 'wb') as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)
| 19.285714
| 62
| 0.662963
|
4a169c4eed216a3a9025750f87e733f810e723b8
| 5,618
|
py
|
Python
|
otk/asbp/plotting.py
|
draustin/otk
|
c6e91423ec79b85b380ee9385f6d27c91f92503d
|
[
"MIT"
] | 7
|
2020-05-17T14:26:42.000Z
|
2022-02-14T04:52:54.000Z
|
otk/asbp/plotting.py
|
uamhforever/otk
|
c6e91423ec79b85b380ee9385f6d27c91f92503d
|
[
"MIT"
] | 17
|
2020-04-10T22:50:00.000Z
|
2020-06-18T04:54:19.000Z
|
otk/asbp/plotting.py
|
uamhforever/otk
|
c6e91423ec79b85b380ee9385f6d27c91f92503d
|
[
"MIT"
] | 1
|
2022-02-14T04:52:45.000Z
|
2022-02-14T04:52:45.000Z
|
import numpy as np
from PyQt5 import QtCore
import pyqtgraph_extended as pg
from . import sa, math
def make_Er_image_item(r_support, Er, rs_center=(0, 0), quantity='waves'):
x, y, Eru = sa.unroll_r(r_support, Er, rs_center)
if quantity == 'amplitude':
data = abs(Eru)
lut = pg.get_colormap_lut()
levels = 0, data.max()
elif quantity == 'waves':
data = np.angle(Eru)/(2*np.pi)
lut = pg.get_colormap_lut('bipolar')
levels = -0.5, 0.5
item = pg.ImageItem(data, lut=lut)
item.setRect(pg.axes_to_rect(x*1e3, y*1e3))
item.setLevels(levels)
return item
def set_Er_image_item(item, r_support, Er, rs_center=(0, 0), quantity='waves'):
x, y, Eru = sa.unroll_r(r_support, Er, rs_center)
if quantity == 'amplitude':
data = abs(Eru)
levels = 0, data.max()
elif quantity == 'waves':
data = np.angle(Eru)/(2*np.pi)
levels = -0.5, 0.5
else:
raise ValueError('Unknown quantity %s.', quantity)
item.setImage(data)
item.setRect(pg.axes_to_rect(x*1e3, y*1e3))
item.setLevels(levels)
return item
def make_Eq_image_item(r_support, Eq, qs_center=(0, 0), quantity='waves'):
kx, ky, Equ = sa.unroll_q(r_support, Eq, qs_center)
if quantity == 'amplitude':
data = abs(Equ)
lut = pg.get_colormap_lut()
levels = 0, data.max()
elif quantity == 'waves':
data = np.angle(Equ)/(2*np.pi)
lut = pg.get_colormap_lut('bipolar')
levels = -0.5, 0.5
item = pg.ImageItem(data, lut=lut)
item.setRect(pg.axes_to_rect(kx/1e3, ky/1e3))
item.setLevels(levels)
return item
def set_Eq_image_item(item, r_support, Eq, qs_center=(0, 0), quantity='waves'):
kx, ky, Equ = sa.unroll_q(r_support, Eq, qs_center)
if quantity == 'amplitude':
data = abs(Equ)
levels = 0, data.max()
elif quantity == 'waves':
data = np.angle(Equ)/(2*np.pi)
levels = -0.5, 0.5
else:
raise ValueError('Unknown quantity %s.', quantity)
item.setImage(data)
item.setRect(pg.axes_to_rect(kx/1e3, ky/1e3))
item.setLevels(levels)
return item
def plot_r_q_polar(rs_support, Er, rs_center=(0, 0), qs_center=(0, 0), gl=None, Eq=None):
if gl is None:
glw = pg.GraphicsLayoutWidget()
plots = plot_r_q_polar(rs_support, Er, rs_center, qs_center, glw.ci, Eq)
glw.resize(830, 675)
glw.show()
return glw, plots
else:
if Eq is None:
Eq = math.fft2(Er)
absEr_plot = gl.addAlignedPlot(labels={'left':'y (mm)', 'bottom':'x (mm)'}, title='Real space amplitude')
image = make_Er_image_item(rs_support, Er, rs_center, 'amplitude')
absEr_plot.addItem(image)
gl.addHorizontalSpacer(10)
gl.addColorBar(image=image, rel_row=2, label='Amplitude')
gl.addHorizontalSpacer(10)
wavesr_plot = gl.addAlignedPlot(labels={'left':'y (mm)', 'bottom':'x (mm)'}, title='Real space wavefront')
image = make_Er_image_item(rs_support, Er, rs_center, 'waves')
wavesr_plot.addItem(image)
wavesr_plot.setXYLink(absEr_plot)
gl.addHorizontalSpacer(10)
gl.addColorBar(image=image, rel_row=2, label='Waves')
gl.nextRows()
absEq_plot = gl.addAlignedPlot(labels={'left':'ky (rad/mm)', 'bottom':'kx (rad/mm)'}, title='Angular space amplitude')
image = make_Eq_image_item(rs_support, Eq, qs_center, 'amplitude')
absEq_plot.addItem(image)
gl.addHorizontalSpacer(10)
gl.addColorBar(image=image, rel_row=2, label='Amplitude')
gl.addHorizontalSpacer(10)
wavesq_plot = gl.addAlignedPlot(labels={'left':'ky (rad/mm)', 'bottom':'kx (rad/mm)'},
title='Angular space wavefront')
image = make_Eq_image_item(rs_support, Eq, qs_center, 'waves')
wavesq_plot.addItem(image)
wavesq_plot.setXYLink(absEq_plot)
gl.addHorizontalSpacer(10)
gl.addColorBar(image=image, rel_row=2, label='Waves')
return sa.RQ(sa.AbsPhase(absEr_plot, wavesr_plot), sa.AbsPhase(absEq_plot, wavesq_plot))
def add_r_q_polar_scatter(plots, rs, qs, **kwargs):
for plot in plots.r:
item = pg.ScatterPlotItem(rs[0]*1e3, rs[1]*1e3, **kwargs)
plot.addItem(item)
for plot in plots.q:
item = pg.ScatterPlotItem(qs[0]/1e3, qs[1]/1e3, **kwargs)
plot.addItem(item)
def plot_projection(profile, projected, residual):
glw = pg.GraphicsLayoutWidget()
field_plot = glw.addAlignedPlot(labels={'left':'y (mm)', 'bottom':'x (mm)'}, title='Field')
field_image = make_Er_image_item(profile.rs_support, profile.Er, profile.rs_center, 'amplitude')
field_plot.addItem(field_image)
glw.addHorizontalSpacer()
projected_plot = glw.addAlignedPlot(labels={'left':'y (mm)', 'bottom':'x (mm)'}, title='Projected')
projected_plot.setXYLink(field_plot)
projected_image = make_Er_image_item(profile.rs_support, projected, profile.rs_center, 'amplitude')
projected_plot.addItem(projected_image)
glw.addHorizontalSpacer()
glw.addColorBar(images=(field_image, projected_image), rel_row=2)
glw.addHorizontalSpacer()
residual_plot = glw.addAlignedPlot(labels={'left':'y (mm)', 'bottom':'x (mm)'}, title='Residual')
residual_image = make_Er_image_item(profile.rs_support, residual, profile.rs_center, 'amplitude')
residual_plot.addItem(residual_image)
residual_plot.setXYLink(projected_plot)
glw.addColorBar(image=residual_image, rel_row=2)
glw.resize(1200, 360)
glw.show()
return glw
| 40.417266
| 126
| 0.647739
|
4a169c914a3aed3e96b57c1b28da5bf6533aec52
| 4,263
|
py
|
Python
|
OnshoreBattlebot2018/Entities/Ranger.py
|
roy-love/BattleCode2018-ReferenceBot
|
c017cf804728dd1555ab6a60acdd2a6f7a929281
|
[
"MIT"
] | null | null | null |
OnshoreBattlebot2018/Entities/Ranger.py
|
roy-love/BattleCode2018-ReferenceBot
|
c017cf804728dd1555ab6a60acdd2a6f7a929281
|
[
"MIT"
] | null | null | null |
OnshoreBattlebot2018/Entities/Ranger.py
|
roy-love/BattleCode2018-ReferenceBot
|
c017cf804728dd1555ab6a60acdd2a6f7a929281
|
[
"MIT"
] | null | null | null |
import random
import sys
import traceback
import battlecode as bc
from Controllers.MissionController import *
from .IRobot import IRobot
class Ranger(IRobot):
"""This is the Ranger robot"""
# change init definition to include any controllers needed in the instructor as we need them
# For example: it will eventually need to access the Targeting and Pathfinding controllers
def __init__(self, gameController, unitController, \
pathfindingController, missionController, unit, mapController):
super().__init__(gameController, unitController, \
pathfindingController, missionController, unit, bc.UnitType.Ranger,mapController)
def run(self):
if not self.unit.location.is_in_garrison() and not self.unit.location.is_in_space():
self.update_mission()
#First priority is to kill enemy troops
if not self.mission is None:
if self.mission.action == Missions.Idle:
self.idle()
elif self.mission.action == Missions.RandomMovement:
self.one_random_movement()
elif self.mission.action == Missions.DestroyTarget:
self.destroy_target()
#Attacks nearby units
nearby = self.game_controller.sense_nearby_units(self.unit.location.map_location(), 50)
for other in nearby:
if other.team != self.game_controller.team() \
and self.game_controller.is_attack_ready(self.unit.id) \
and self.game_controller.can_attack(self.unit.id, other.id):
print('Ranger {} attacked a thing!'.format(self.unit.id))
self.game_controller.attack(self.unit.id, other.id)
break
def try_attack(self, target_robot_id):
"""Trys to attack"""
# Checks to see if Ranger has enough heat to attack
if not self.game_controller.is_attack_ready(self.unit.id):
print("Ranger[{}] attack is not ready. Not enough heat".format(self.unit.id))
return False
if not self.game_controller.can_attack(self.unit.id, target_robot_id):
print("Ranger [{}] cannot attack the target [{}]".format(self.unit.id, target_robot_id))
return False
self.game_controller.attack(self.unit.id, target_robot_id)
return True
def try_snipe(self, target_location):
"""Trys to snipe"""
# Checks that the Ranger has fully been researched so that it can now Snipe at enemy units
level = bc.research_info.get_level(bc.UnitType.Ranger)
if level != 3:
return False
if not self.game_controller.is_begin_snipe_ready(self.unit.id):
print("Snipe is not ready for ranger [{}]".format(self.unit.id))
return False
if not self.game_controller.can_begin_snipe(self.unit.id, target_location):
print("Ranger [{}] cannot snipe target location".format(self.unit.id))
return False
self.game_controller.begin_snipe(self.unit.id, target_location)
return True
# Sends Rangers to a defensive waypoint between our starting location and the enemy starting location
def SetDefenderWaypoint(self):
currentLocation = mapController.my_team_start
enemyDirection = currentLocation.direction_to(mapController.enemy_team_start[0])
target_location = currentLocation.clone()
for i in range (0, 9):
if enemyDirection == bc.Direction.North:
target_location.y + 1
elif enemyDirection == bc.Direction.Northeast:
target_location.x + 1, target_location.y + 1
elif enemyDirection == bc.Direction.East:
target_location.x + 1
elif enemyDirection == bc.Direction.Southeast:
target_location.y - 1, target_location.x +1
elif enemyDirection == bc.Direction.South:
target_location.y + 1
elif enemyDirection == bc.Direction.Southwest:
target_location.y - 1, target_location.x - 1
elif enemyDirection == bc.Direction.West:
target_location.x - 1
elif enemyDirection == bc.Direction.Northwest:
target_location.y + 1, target_location.x - 1
print("Ranger adjusting target location to [{}]".format(self.target_location))
# Randomizes an offest for x and y coordinations
offset = random.randint(-5, 5)
target_location.x = target_location.x + offset
offset = random.randint(-5, 5)
target_location.y = target_location.y + offset
# Assisgns target location for our rangers to defend
self.target_location = bc.MapLocation(bc.Planet.Earth, target_location.x, target_location.y)
enemyDirection = direction_to(self.target_location)
self.try_move(enemyDirection)
| 40.216981
| 102
| 0.749472
|
4a169d04be1b41ac02cc58d6064dabfd7a79dbd8
| 38,400
|
py
|
Python
|
pygoslin/parser/FattyAcidParserEventHandler.py
|
lifs-tools/pygoslin
|
e6cdd437db5926aee5dff1b7cfb00ff267fb976d
|
[
"MIT"
] | null | null | null |
pygoslin/parser/FattyAcidParserEventHandler.py
|
lifs-tools/pygoslin
|
e6cdd437db5926aee5dff1b7cfb00ff267fb976d
|
[
"MIT"
] | 2
|
2021-04-06T09:00:21.000Z
|
2022-01-07T16:13:51.000Z
|
pygoslin/parser/FattyAcidParserEventHandler.py
|
lifs-tools/pygoslin
|
e6cdd437db5926aee5dff1b7cfb00ff267fb976d
|
[
"MIT"
] | 2
|
2020-07-13T14:16:24.000Z
|
2021-04-06T08:39:20.000Z
|
"""
MIT License
Copyright (c) 2020 Dominik Kopczynski - dominik.kopczynski {at} isas.de
Nils Hoffmann - nils.hoffmann {at} isas.de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pygoslin.parser.BaseParserEventHandler import BaseParserEventHandler
from pygoslin.domain.LipidAdduct import LipidAdduct
from pygoslin.domain.LipidLevel import LipidLevel
from pygoslin.domain.Adduct import Adduct
from pygoslin.domain.LipidFaBondType import LipidFaBondType
from pygoslin.domain.FattyAcid import FattyAcid
from pygoslin.domain.LipidCompleteStructure import LipidCompleteStructure
from pygoslin.domain.LipidFullStructure import LipidFullStructure
from pygoslin.domain.LipidStructureDefined import LipidStructureDefined
from pygoslin.domain.LipidSnPosition import LipidSnPosition
from pygoslin.domain.LipidMolecularSpecies import LipidMolecularSpecies
from pygoslin.domain.LipidSpecies import LipidSpecies
from pygoslin.domain.LipidSpeciesInfo import LipidSpeciesInfo
from pygoslin.domain.LipidExceptions import *
from pygoslin.domain.HeadGroup import HeadGroup
from pygoslin.domain.FunctionalGroup import *
from pygoslin.domain.LipidClass import *
from pygoslin.domain.Cycle import *
last_numbers = {'un': 1, 'hen': 1, 'do': 2, 'di': 2, 'tri': 3, 'buta': 4, 'but': 4, 'tetra': 4, 'penta': 5, 'pent': 5, 'hexa': 6, 'hex': 6, 'hepta': 7, 'hept': 7, 'octa': 8, 'oct': 8, 'nona': 9, 'non': 9}
second_numbers = {'deca': 10, 'dec': 10, 'eicosa': 20, 'eicos': 20 , 'cosa': 20, 'cos': 20, 'triaconta': 30, 'triacont': 30, 'tetraconta': 40, 'tetracont': 40, 'pentaconta': 50, 'pentacont': 50, 'hexaconta': 60, 'hexacont': 60, 'heptaconta': 70, 'heptacont': 70, 'octaconta': 80, 'octacont': 80, 'nonaconta': 90, 'nonacont': 90}
special_numbers = {'meth': 1, 'etha': 2, 'eth': 2, 'propa': 3, 'isoprop': 3, 'prop': 3, 'propi': 3, 'propio': 3, 'buta': 4, 'but': 4, 'butr': 4, 'furan': 5, 'valer': 5, 'eicosa': 20, 'eicos': 20, 'icosa': 20, 'icos': 20, 'prosta': 20, 'prost': 20, 'prostan': 20}
func_groups = {'keto': 'oxo', 'ethyl': 'Et', 'hydroxy': "OH", 'phospho': 'Ph', 'oxo': 'oxo', 'bromo': 'Br', 'methyl': 'Me', 'hydroperoxy': 'OOH', 'homo': '', 'Epoxy': 'Ep', 'fluro': 'F', 'fluoro': 'F', 'chloro': 'Cl', 'methylene': 'My', 'sulfooxy': 'Su', 'amino': 'NH2', 'sulfanyl': 'SH', 'methoxy': 'OMe', 'iodo': 'I', 'cyano': 'CN', 'nitro': 'NO2', 'OH': 'OH', 'thio': 'SH', 'mercapto': 'SH', 'carboxy': "COOH", 'acetoxy': 'Ac', 'cysteinyl': 'Cys', 'phenyl': 'Phe', 's-glutathionyl': "SGlu", 's-cysteinyl': "SCys", "butylperoxy": "BOO", "dimethylarsinoyl": "MMAs", "methylsulfanyl": "SMe", "imino": "NH", 's-cysteinylglycinyl': "SCG"}
ate = {'formate': 1, 'acetate': 2, 'butyrate': 4, 'propionate': 3, 'valerate': 5, 'isobutyrate': 4}
class FattyAcidParserEventHandler(BaseParserEventHandler):
def __init__(self):
super().__init__()
self.registered_events["lipid_pre_event"] = self.reset_lipid
self.registered_events["lipid_post_event"] = self.build_lipid
self.registered_events["fatty_acid_post_event"] = self.set_fatty_acid
self.registered_events["fatty_acid_recursion_post_event"] = self.set_fatty_acid
self.registered_events["acid_single_type_pre_event"] = self.set_fatty_acyl_type
self.registered_events["ol_ending_pre_event"] = self.set_fatty_acyl_type
self.registered_events["double_bond_position_pre_event"] = self.set_double_bond_information
self.registered_events["double_bond_position_post_event"] = self.add_double_bond_information
self.registered_events["db_number_post_event"] = self.set_double_bond_position
self.registered_events["cistrans_post_event"] = self.set_cistrans
self.registered_events["acid_type_double_post_event"] = self.check_db
self.registered_events["db_length_pre_event"] = self.open_db_length
self.registered_events["db_length_post_event"] = self.close_db_length
## lengths
self.registered_events["functional_length_pre_event"] = self.reset_length
self.registered_events["fatty_length_pre_event"] = self.reset_length
self.registered_events["functional_length_post_event"] = self.set_functional_length
self.registered_events["fatty_length_post_event"] = self.set_fatty_length
## numbers
self.registered_events["notation_specials_pre_event"] = self.special_number
self.registered_events["notation_last_digit_pre_event"] = self.last_number
self.registered_events["notation_second_digit_pre_event"] = self.second_number
## functional groups
self.registered_events["functional_group_pre_event"] = self.set_functional_group
self.registered_events["functional_group_post_event"] = self.add_functional_group
self.registered_events["functional_pos_pre_event"] = self.set_functional_pos
self.registered_events["functional_position_pre_event"] = self.set_functional_position
self.registered_events["functional_group_type_pre_event"] = self.set_functional_type
## cyclo / epoxy
self.registered_events["cyclo_position_pre_event"] = self.set_functional_group
self.registered_events["cyclo_position_post_event"] = self.rearrange_cycle
self.registered_events["epoxy_pre_event"] = self.set_functional_group
self.registered_events["epoxy_post_event"] = self.add_epoxy
self.registered_events["cycle_pre_event"] = self.set_cycle
self.registered_events["methylene_post_event"] = self.set_methylene
## dioic
self.registered_events["dioic_pre_event"] = self.set_functional_group
self.registered_events["dioic_post_event"] = self.set_dioic
self.registered_events["dioic_acid_pre_event"] = self.set_fatty_acyl_type
self.registered_events["dial_post_event"] = self.set_dial
## prosta
self.registered_events["prosta_pre_event"] = self.set_prosta
self.registered_events["prosta_post_event"] = self.add_cyclo
self.registered_events["reduction_pre_event"] = self.set_functional_group
self.registered_events["reduction_post_event"] = self.reduction
self.registered_events["homo_post_event"] = self.homo
## furan
self.registered_events["tetrahydrofuran_pre_event"] = self.set_tetrahydrofuran
self.registered_events["furan_pre_event"] = self.set_furan
## recursion
self.registered_events["recursion_description_pre_event"] = self.set_recursion
self.registered_events["recursion_description_post_event"] = self.add_recursion
self.registered_events["recursion_pos_pre_event"] = self.set_recursion_pos
self.registered_events["yl_ending_pre_event"] = self.set_yl_ending
self.registered_events["acetic_acid_post_event"] = self.set_acetic_acid
self.registered_events["acetic_recursion_pre_event"] = self.set_recursion
self.registered_events["acetic_recursion_post_event"] = self.add_recursion
self.registered_events["hydroxyl_number_pre_event"] = self.add_hydroxyl
self.registered_events["ol_pre_event"] = self.setup_hydroxyl
self.registered_events["ol_post_event"] = self.add_hydroxyls
self.registered_events["ol_pos_post_event"] = self.set_yl_ending
## wax esters
self.registered_events["wax_ester_pre_event"] = self.set_recursion
self.registered_events["wax_ester_post_event"] = self.add_wax_ester
self.registered_events["ate_post_event"] = self.set_ate
self.registered_events["isoprop_post_event"] = self.set_iso
self.registered_events["isobut_post_event"] = self.set_iso
## CoA
self.registered_events["coa_post_event"] = self.set_coa
self.registered_events["methyl_pre_event"] = self.set_methyl
## CAR
self.registered_events["car_pre_event"] = self.set_car
self.registered_events["car_post_event"] = self.add_car
## amine
self.registered_events["ethanolamine_post_event"] = self.add_ethanolamine
self.registered_events["amine_n_pre_event"] = self.set_recursion
self.registered_events["amine_n_post_event"] = self.add_amine
self.registered_events["amine_post_event"] = self.add_amine_name
## functional group position summary
self.registered_events["fg_pos_summary_pre_event"] = self.set_functional_group
self.registered_events["fg_pos_summary_post_event"] = self.add_summary
self.registered_events["func_stereo_pre_event"] = self.add_func_stereo
self.debug = ""
def reset_lipid(self, node):
self.level = LipidLevel.FULL_STRUCTURE
self.headgroup = ""
self.fatty_acyl_stack = [FattyAcid("FA")]
self.tmp = {"fa1": {}}
def set_car(self, node):
self.tmp["fg_pos"] = []
self.tmp["fg_type"] = ""
def add_ol(self, node):
self.headgroup = "FOH"
def homo(self, node):
self.tmp["post_adding"] = list(p[0] for p in self.tmp["fg_pos"])
def add_summary(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
self.tmp[fa_i]["fg_pos_summary"] = {k: v.upper() for k, v in self.tmp["fg_pos"]}
def set_acetic_acid(self, node):
self.fatty_acyl_stack[-1].num_carbon += 2
self.headgroup = "FA"
def add_func_stereo(self, node):
self.tmp["fg_pos"][-1][1] = node.get_text()
def set_yl_ending(self, node):
l = int(node.get_text()) - 1
if l == 0: return
curr_fa = self.fatty_acyl_stack[-1]
if "furan" in self.tmp:
curr_fa.num_carbon -= l
return
if l == 1:
fname = "Me"
fg = get_functional_group(fname)
elif l == 2:
fname = "Et"
fg = get_functional_group(fname)
else:
fa = FattyAcid("FA", num_carbon = l)
# shift functional groups
for fg_name, fg_list in curr_fa.functional_groups.items():
remove_item = []
for i, func_group in enumerate(fg_list):
if func_group.position <= l:
remove_item.append(i)
if fg_name not in fa.functional_groups: fa.functional_groups[fg_name] = []
func_group.position = l + 1 - func_group.position
fa.functional_groups[fg_name].append(func_group)
for i in remove_item[::-1]:
del curr_fa.functional_groups[fg_name][i]
curr_fa.functional_groups = {k: v for k, v in curr_fa.functional_groups.items() if len(v) > 0}
#shift double bonds
if type(curr_fa.double_bonds) == dict:
fa.double_bonds = {l + 1 - k: v for k, v in curr_fa.double_bonds.items() if k <= l}
for k in list(k for k in curr_fa.double_bonds if k <= l): del curr_fa.double_bonds[k]
fname = "cc"
fg = CarbonChain(fa)
curr_fa.num_carbon -= l
fg.position = l
curr_fa.shift_positions(-l)
if fname not in curr_fa.functional_groups: curr_fa.functional_groups[fname] = []
curr_fa.functional_groups[fname].append(fg)
def set_methylene(self, node):
self.tmp["fg_type"] = "methylene"
if len(self.tmp["fg_pos"]) > 1:
if self.tmp["fg_pos"][0][0] < self.tmp["fg_pos"][1][0]: self.tmp["fg_pos"][1][0] += 1
elif self.tmp["fg_pos"][0][0] > self.tmp["fg_pos"][1][0]: self.tmp["fg_pos"][0][0] += 1
self.fatty_acyl_stack[-1].num_carbon += 1
self.tmp["add_methylene"] = True
def check_db(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
curr_fa = self.fatty_acyl_stack[-1]
if "fg_pos_summary" in self.tmp[fa_i]:
if type(curr_fa.double_bonds) != dict: curr_fa.double_bonds = {}
for k, v in self.tmp[fa_i]["fg_pos_summary"].items():
if v in {"E", "Z", ""} and k > 0 and k not in curr_fa.double_bonds: curr_fa.double_bonds[k] = v
def add_car(self, node):
self.headgroup = "CAR"
def add_hydroxyl(self, node):
self.tmp["hydroxyl_pos"].append(int(node.get_text()))
def set_coa(self, node):
self.headgroup = "CoA"
def set_methyl(self, node):
self.fatty_acyl_stack[-1].num_carbon += 1
def setup_hydroxyl(self, node):
self.tmp["hydroxyl_pos"] = []
def set_iso(self, node):
curr_fa = self.fatty_acyl_stack[-1]
curr_fa.num_carbon -= 1
fg = get_functional_group("Me")
fg.position = 2
if "Me" not in curr_fa.functional_groups: curr_fa.functional_groups["Me"] = []
curr_fa.functional_groups["Me"].append(fg)
def set_ate(self, node):
self.fatty_acyl_stack[-1].num_carbon += ate[node.get_text()]
self.headgroup = "WE"
def add_amine(self, node):
fa = self.fatty_acyl_stack.pop()
fa.lipid_FA_bond_type = LipidFaBondType.AMIDE
self.fatty_acyl_stack[-1].lipid_FA_bond_type = LipidFaBondType.AMIDE
self.fatty_acyl_stack.insert(0, fa)
def add_amine_name(self, node):
self.headgroup = "NA"
def add_ethanolamine(self, node):
self.headgroup = "NAE"
def add_hydroxyls(self, node):
if len(self.tmp["hydroxyl_pos"]) > 1:
fg_oh = get_functional_group("OH")
for pos in sorted(self.tmp["hydroxyl_pos"], reverse = True)[:-1]:
fg_insert = fg_oh.copy()
fg_insert.position = pos
if "OH" not in self.fatty_acyl_stack[-1].functional_groups: self.fatty_acyl_stack[-1].functional_groups["OH"] = []
self.fatty_acyl_stack[-1].functional_groups["OH"].append(fg_insert)
def set_double_bond_position(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
pos = int(node.get_text())
self.tmp[fa_i]["db_position"] = pos - (sum([1 for p in self.tmp["reduction"] if p < pos]) if "reduction" in self.tmp else 0)
def set_recursion_pos(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
self.tmp[fa_i]["recursion_pos"] = int(node.get_text())
def set_recursion(self, node):
self.tmp["fg_pos"] = []
self.tmp["fg_type"] = ""
self.fatty_acyl_stack.append(FattyAcid("FA"))
fa_i = "fa%i" % len(self.fatty_acyl_stack)
self.tmp[fa_i] = {"recursion_pos": 0}
def add_recursion(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
pos = self.tmp[fa_i]["recursion_pos"]
fa = self.fatty_acyl_stack.pop()
fa.position = pos
curr_fa = self.fatty_acyl_stack[-1]
if "cyclo_yl" in self.tmp:
fname = "cyclo"
del self.tmp["cyclo_yl"]
else:
fname = self.headgroup
if fname not in curr_fa.functional_groups: curr_fa.functional_groups[fname] = []
curr_fa.functional_groups[fname].append(fa)
self.tmp["added_func_group"] = True
def add_wax_ester(self, node):
fa = self.fatty_acyl_stack.pop()
fa.lipid_FA_bond_type = LipidFaBondType.ETHER
self.fatty_acyl_stack.insert(0, fa)
def set_cycle(self, node):
self.tmp["cyclo"] = True
def set_fatty_acid(self, node):
def switch_position(func_group, switch):
func_group.position = switch - func_group.position
for fg_name, fg_list in func_group.functional_groups.items():
for fg in fg_list:
switch_position(fg, switch)
if "length_pattern" in self.tmp:
l, d, num, length_pattern = 0, 0, self.tmp["length_tokens"], self.tmp["length_pattern"]
if length_pattern in {"L", "S"}:
l += num[0]
elif length_pattern == "LS":
l += num[0] + num[1]
elif length_pattern in {"LL", "SL", "SS"}:
l += num[0]
d += num[1]
elif length_pattern in {"LSL", "LSS"}:
l += num[0] + num[1]
d += num[2]
elif length_pattern == "LSLS":
l += num[0] + num[1]
d += num[2] + num[3]
elif length_pattern == "SLS":
l += num[0]
d += num[1] + num[2]
elif len(length_pattern) > 0 and length_pattern[0] == "X":
l += num[0]
d += sum(num[1:])
elif length_pattern == "LLS": # false
raise RuntimeException("Cannot determine fatty acid and double bond length in '%s'" % node.get_text())
curr_fa = self.fatty_acyl_stack[-1]
curr_fa.num_carbon += l
if type(curr_fa.double_bonds) == int: curr_fa.double_bonds = d
if "noyloxy" in curr_fa.functional_groups:
if self.headgroup == "FA": self.headgroup = "FAHFA"
while len(curr_fa.functional_groups["noyloxy"]) > 0:
fa = curr_fa.functional_groups["noyloxy"].pop()
acyl = AcylAlkylGroup(fa)
acyl.position = fa.position
if "acyl" not in curr_fa.functional_groups: curr_fa.functional_groups["acyl"] = []
curr_fa.functional_groups["acyl"].append(acyl)
del curr_fa.functional_groups["noyloxy"]
elif "nyloxy" in curr_fa.functional_groups or "yloxy" in curr_fa.functional_groups:
yloxy = "nyloxy" if "nyloxy" in curr_fa.functional_groups else "yloxy"
while len(curr_fa.functional_groups[yloxy]) > 0:
fa = curr_fa.functional_groups[yloxy].pop()
alkyl = AcylAlkylGroup(fa, alkyl = True)
alkyl.position = fa.position
if "alkyl" not in curr_fa.functional_groups: curr_fa.functional_groups["alkyl"] = []
curr_fa.functional_groups["alkyl"].append(alkyl)
del curr_fa.functional_groups[yloxy]
elif sum([k[-2:] == "yl" for k in curr_fa.functional_groups]) > 0:
while True:
try:
yl = [k for k in curr_fa.functional_groups if k[-2:] == "yl"][0]
except Exception:
break
while len(curr_fa.functional_groups[yl]) > 0:
fa = curr_fa.functional_groups[yl].pop()
if "cyclo" in self.tmp:
cyclo_len = curr_fa.num_carbon
self.tmp["cyclo_len"] = cyclo_len
if fa.position != cyclo_len and "furan" not in self.tmp: switch_position(curr_fa, 2 + cyclo_len)
fa.shift_positions(cyclo_len)
if "furan" in self.tmp: curr_fa.shift_positions(-1)
for fg, fg_list in fa.functional_groups.items():
if fg not in curr_fa.functional_groups: curr_fa.functional_groups[fg] = fg_list
else: curr_fa.functional_groups[fg] += fg_list
curr_fa.num_carbon = cyclo_len + fa.num_carbon
if type(curr_fa.double_bonds) == int: curr_fa.double_bonds = {}
if type(fa.double_bonds) == dict:
for pos, ez in fa.double_bonds.items():
curr_fa.double_bonds[pos + cyclo_len] = ez
if "furan" in self.tmp and "tetrahydrofuran" not in self.tmp:
if type(curr_fa.double_bonds) == int:
curr_fa.double_bonds += 2
else:
curr_fa.double_bonds[1] = "E"
curr_fa.double_bonds[3] = "E"
self.tmp["cyclo_yl"] = True
else:
## add carbon chains here here
## special chains: i.e. ethyl, methyl
fg_name = ""
if (fa.double_bonds if type(fa.double_bonds) == int else len(fa.double_bonds)) == 0 and len(fa.functional_groups) == 0:
if fa.num_carbon == 1:
fg_name = "Me"
fg = get_functional_group(fg_name)
elif fa.num_carbon == 2:
fg_name = "Et"
fg = get_functional_group(fg_name)
if len(fg_name) > 0:
fg.position = fa.position
if fg_name not in curr_fa.functional_groups: curr_fa.functional_groups[fg_name] = []
curr_fa.functional_groups[fg_name].append(fg)
if len(fg_name) == 0:
cc = CarbonChain(fa, position = fa.position)
if "cc" not in curr_fa.functional_groups: curr_fa.functional_groups["cc"] = []
curr_fa.functional_groups["cc"].append(cc)
if "cyclo" in self.tmp: del self.tmp["cyclo"]
del curr_fa.functional_groups[yl]
if "cyclo" in curr_fa.functional_groups:
fa = curr_fa.functional_groups["cyclo"][0]
del curr_fa.functional_groups["cyclo"]
if "cyclo_len" not in self.tmp: self.tmp["cyclo_len"] = 5
start_pos, end_pos = curr_fa.num_carbon + 1, curr_fa.num_carbon + self.tmp["cyclo_len"]
fa.shift_positions(start_pos - 1)
if "cy" in curr_fa.functional_groups:
for cy in curr_fa.functional_groups["cy"]:
cy.shift_positions(start_pos - 1)
for fg, fg_list in fa.functional_groups.items():
if fg not in curr_fa.functional_groups: curr_fa.functional_groups[fg] = fg_list
else: curr_fa.functional_groups[fg] += fg_list
if type(curr_fa.double_bonds) == int: curr_fa.double_bonds = {}
if type(fa.double_bonds) == dict:
for pos, ez in fa.double_bonds.items():
curr_fa.double_bonds[pos + start_pos - 1] = ez
if "furan" in self.tmp and "tetrahydrofuran" not in self.tmp:
if type(curr_fa.double_bonds) == int:
curr_fa.double_bonds += 2
else:
curr_fa.double_bonds[1 + curr_fa.num_carbon] = "E"
curr_fa.double_bonds[3 + curr_fa.num_carbon] = "E"
curr_fa.num_carbon += fa.num_carbon
self.tmp["fg_pos"] = [[start_pos, ""], [end_pos, ""]]
self.add_cyclo(node)
if "cyclo_len" in self.tmp: del self.tmp["cyclo_len"]
if "cyclo" in self.tmp: del self.tmp["cyclo"]
elif "cyclo" in self.tmp:
self.tmp["cyclo_yl"] = True
self.tmp["cyclo_len"] = curr_fa.num_carbon
self.tmp["fg_pos"] = [[1, ""], [curr_fa.num_carbon, ""]]
del self.tmp["cyclo"]
self.tmp["length_pattern"] = ""
self.tmp["length_tokens"] = []
self.tmp["add_lengths"] = False
def set_double_bond_information(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
self.tmp[fa_i]["db_position"] = 0
self.tmp[fa_i]["db_cistrans"] = ""
def reduction(self, node):
self.fatty_acyl_stack[-1].num_carbon -= len(self.tmp["fg_pos"])
for fg, fg_list in self.fatty_acyl_stack[-1].functional_groups.items():
for func_group in fg_list:
func_group.shift_positions(-len(self.tmp["fg_pos"]))
self.tmp["reduction"] = [p[0] for p in self.tmp["fg_pos"]]
def add_double_bond_information(self, node):
fa_i = "fa%i" % len(self.fatty_acyl_stack)
pos = self.tmp[fa_i]["db_position"]
cistrans = self.tmp[fa_i]["db_cistrans"]
if cistrans == "" and "fg_pos_summary" in self.tmp[fa_i] and pos in self.tmp[fa_i]["fg_pos_summary"]: cistrans = self.tmp[fa_i]["fg_pos_summary"][pos]
if pos == 0: return
cistrans = cistrans.upper();
del self.tmp[fa_i]["db_position"]
del self.tmp[fa_i]["db_cistrans"]
if type(self.fatty_acyl_stack[-1].double_bonds) == int: self.fatty_acyl_stack[-1].double_bonds = {}
if pos not in self.fatty_acyl_stack[-1].double_bonds or len(self.fatty_acyl_stack[-1].double_bonds[pos]) == 0:
self.fatty_acyl_stack[-1].double_bonds[pos] = cistrans
def set_dioic(self, node):
self.headgroup = "FA"
pos = self.tmp["fg_pos"][1][0] if len(self.tmp["fg_pos"]) == 2 else self.fatty_acyl_stack[-1].num_carbon
if "reduction" in self.tmp: pos -= len(self.tmp["reduction"])
self.fatty_acyl_stack[-1].num_carbon -= 1
func_group = get_functional_group("COOH")
func_group.position = pos - 1
if "COOH" not in self.fatty_acyl_stack[-1].functional_groups: self.fatty_acyl_stack[-1].functional_groups["COOH"] = []
self.fatty_acyl_stack[-1].functional_groups["COOH"].append(func_group)
def set_cistrans(self, node):
self.tmp["fa%i" % len(self.fatty_acyl_stack)]["db_cistrans"] = node.get_text()
def set_fatty_acyl_type(self, node):
t = node.get_text()
if t[-2:] == "ol": self.headgroup = "FOH"
elif t in {"noic acid", "nic acid", "dioic_acid"}: self.headgroup = "FA"
elif t in {"nal", "dial"}: self.headgroup = "FAL"
elif t in {"acetate", "noate", "nate"}: self.headgroup = "WE"
elif t == "ne":
self.headgroup = "HC"
self.fatty_acyl_stack[-1].lipid_FA_bond_type = LipidFaBondType.ETHER
else: self.headgroup = t
def set_lipid_level(self, level):
self.level = self.level if self.level.value < level.value else level
def set_dial(self, node):
curr_fa = self.fatty_acyl_stack[-1]
pos = curr_fa.num_carbon
fg = get_functional_group("oxo")
fg.position = pos
if "oxo" not in curr_fa.functional_groups: curr_fa.functional_groups["oxo"] = []
curr_fa.functional_groups["oxo"].append(fg)
def reset_length(self, node):
self.tmp["length"] = 0
self.tmp["length_pattern"] = ""
self.tmp["length_tokens"] = []
self.tmp["add_lengths"] = True
def set_functional_length(self, node):
if self.tmp["length"] != len(self.tmp["fg_pos"]):
raise LipidException("Length of functional group '%i' does not match with number of its positions '%i'" % (self.tmp["length"], len(self.tmp["fg_pos"])))
def set_fatty_length(self, node):
self.tmp["add_lengths"] = False
def rearrange_cycle(self, node):
if "post_adding" in self.tmp:
self.fatty_acyl_stack[-1].num_carbon += len(self.tmp["post_adding"])
del self.tmp["post_adding"]
curr_fa = self.fatty_acyl_stack[-1]
start = self.tmp["fg_pos"][0][0]
if "cy" in curr_fa.functional_groups:
for cy in curr_fa.functional_groups["cy"]:
shift = start - cy.position
if shift == 0: continue
cy.rearrange_functional_groups(curr_fa, shift)
def add_cyclo(self, node):
start = self.tmp["fg_pos"][0][0]
end = self.tmp["fg_pos"][1][0]
cyclo_db = None
# check double bonds
if type(self.fatty_acyl_stack[-1].double_bonds) == dict and len(self.fatty_acyl_stack[-1].double_bonds) > 0:
cyclo_db = {db_pos: val for db_pos, val in self.fatty_acyl_stack[-1].double_bonds.items() if start <= db_pos <= end}
for pos in cyclo_db:
del self.fatty_acyl_stack[-1].double_bonds[pos]
# check functional_groups
cyclo_fg, remove_list, curr_fa = {}, set(), self.fatty_acyl_stack[-1]
if "noyloxy" in curr_fa.functional_groups:
remove_item = []
for i, func_group in enumerate(curr_fa.functional_groups["noyloxy"]):
if start <= func_group.position <= end:
cc = CarbonChain(func_group, position = func_group.position)
if "cc" not in curr_fa.functional_groups: curr_fa.functional_groups["cc"] = []
curr_fa.functional_groups["cc"].append(cc)
remove_item.append(i)
for i in remove_item[::-1]: del curr_fa.functional_groups["noyloxy"][i]
if len(curr_fa.functional_groups["noyloxy"]) == 0: remove_list.add("noyloxy")
for fg, fg_list in curr_fa.functional_groups.items():
remove_item = []
for i, func_group in enumerate(fg_list):
if start <= func_group.position <= end:
if fg not in cyclo_fg: cyclo_fg[fg] = []
cyclo_fg[fg].append(func_group)
remove_item.append(i)
for i in remove_item[::-1]: del curr_fa.functional_groups[fg][i]
if len(fg_list) == 0: remove_list.add(fg)
for fg in remove_list: del curr_fa.functional_groups[fg]
bridge_chain = []
if "furan" in self.tmp:
del self.tmp["furan"]
bridge_chain = [Element.O]
cycle = Cycle(end - start + 1 + len(bridge_chain), start = start, end = end, double_bonds = cyclo_db, functional_groups = cyclo_fg, bridge_chain = bridge_chain)
if "cy" not in self.fatty_acyl_stack[-1].functional_groups: self.fatty_acyl_stack[-1].functional_groups["cy"] = []
self.fatty_acyl_stack[-1].functional_groups["cy"].append(cycle)
def add_epoxy(self, node):
self.tmp["fg_pos"] = self.tmp["fg_pos"][:1]
self.tmp["fg_type"] = "Epoxy"
def special_number(self, node):
if self.tmp["add_lengths"]:
self.tmp["length"] += special_numbers[node.get_text()]
self.tmp["length_pattern"] += "X"
self.tmp["length_tokens"].append(special_numbers[node.get_text()])
def last_number(self, node):
if self.tmp["add_lengths"]:
self.tmp["length"] += last_numbers[node.get_text()]
self.tmp["length_pattern"] += "L"
self.tmp["length_tokens"].append(last_numbers[node.get_text()])
def second_number(self, node):
if self.tmp["add_lengths"]:
self.tmp["length"] += second_numbers[node.get_text()]
self.tmp["length_pattern"] += "S"
self.tmp["length_tokens"].append(second_numbers[node.get_text()])
def open_db_length(self, node):
self.tmp["add_lengths"] = True
def close_db_length(self, node):
self.tmp["add_lengths"] = False
def set_functional_group(self, node):
self.tmp["fg_pos"] = []
self.tmp["fg_type"] = ""
def set_prosta(self, node):
minus_pos = (sum([1 for p in self.tmp["reduction"] if p < 8]) if "reduction" in self.tmp else 0)
self.tmp["fg_pos"] = [[8 - minus_pos, ""], [12 - minus_pos, ""]]
self.tmp["fg_type"] = "cy"
def set_tetrahydrofuran(self, node):
self.tmp["furan"] = True
self.tmp["tetrahydrofuran"] = True
self.set_cycle(node)
def set_furan(self, node):
self.tmp["furan"] = True
self.set_cycle(node)
def add_functional_group(self, node):
if "added_func_group" in self.tmp:
del self.tmp["added_func_group"]
return
elif "add_methylene" in self.tmp:
del self.tmp["add_methylene"]
self.add_cyclo(node)
return
t = self.tmp["fg_type"]
if t != "acetoxy":
if t not in func_groups: raise LipidException("Unknown functional group: '%s'" % t)
t = func_groups[t]
if len(t) == 0: return
fg = get_functional_group(t)
else:
fg = AcylAlkylGroup(FattyAcid("O", num_carbon = 2))
if t not in self.fatty_acyl_stack[-1].functional_groups: self.fatty_acyl_stack[-1].functional_groups[t] = []
for pos in self.tmp["fg_pos"]:
fg_insert = fg.copy()
fg_insert.position = pos[0] - (sum([1 for p in self.tmp["reduction"] if p < pos[0]]) if "reduction" in self.tmp else 0)
self.fatty_acyl_stack[-1].functional_groups[t].append(fg_insert)
def set_functional_position(self, node):
self.tmp["fg_pos"].append([0, ""])
def set_functional_pos(self, node):
self.tmp["fg_pos"][-1][0] = int(node.get_text())
def set_functional_type(self, node):
self.tmp["fg_type"] = node.get_text()
def build_lipid(self, node):
if "cyclo_yl" in self.tmp:
self.tmp["fg_pos"] = [[1, ""], [self.tmp["cyclo_len"], ""]]
self.add_cyclo(node)
del self.tmp["cyclo_yl"]
del self.tmp["cyclo_len"]
if "post_adding" in self.tmp:
def add_position(func_group, pos):
func_group.position += func_group.position >= pos
if type(func_group) == Cycle:
func_group.start += func_group.start >= pos
func_group.end += func_group.end >= pos
for fg_name, fg_list in func_group.functional_groups.items():
for fg in fg_list:
add_position(fg, pos)
curr_fa = self.fatty_acyl_stack[-1]
curr_fa.num_carbon += len(self.tmp["post_adding"])
for pos in self.tmp["post_adding"]:
add_position(curr_fa, pos)
if type(curr_fa.double_bonds) == dict:
curr_fa.double_bonds = {(k + (k >= pos)): v for k, v in curr_fa.double_bonds.items()}
if type(self.fatty_acyl_stack[-1].double_bonds) == dict and len(self.fatty_acyl_stack[-1].double_bonds) > 0:
if sum(len(ct) > 0 for p, ct in self.fatty_acyl_stack[-1].double_bonds.items()) != len(self.fatty_acyl_stack[-1].double_bonds):
self.set_lipid_level(LipidLevel.STRUCTURE_DEFINED)
lipid_level_class = None
if self.level == LipidLevel.COMPLETE_STRUCTURE: lipid_level_class = LipidCompleteStructure
elif self.level == LipidLevel.FULL_STRUCTURE: lipid_level_class = LipidFullStructure
elif self.level == LipidLevel.STRUCTURE_DEFINED: lipid_level_class = LipidStructureDefined
elif self.level == LipidLevel.SN_POSITION: lipid_level_class = LipidSnPosition
elif self.level == LipidLevel.MOLECULAR_SPECIES: lipid_level_class = LipidMolecularSpecies
elif self.level == LipidLevel.SPECIES: lipid_level_class = LipidSpecies
headgroup = HeadGroup(self.headgroup)
self.content = LipidAdduct()
self.content.lipid = lipid_level_class(headgroup, self.fatty_acyl_stack)
| 42.430939
| 636
| 0.570339
|
4a169d1416909c47b1fa942049ce64b9a9c0d54f
| 1,531
|
py
|
Python
|
apps/modules/permission/apis/url_permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | 1
|
2019-05-12T14:54:40.000Z
|
2019-05-12T14:54:40.000Z
|
apps/modules/permission/apis/url_permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
apps/modules/permission/apis/url_permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
# -*-coding:utf-8-*-
from flask import request
from apps.configs.sys_config import METHOD_WARNING
from apps.core.blueprint import api
from apps.core.flask.login_manager import osr_login_required
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.permission.process.url_permission import get_urls, get_url, update_url, add_url, delete_url
__author__ = "Allen Woo"
@api.route('/admin/url/permission', methods=['GET', 'POST', 'PUT', 'DELETE'])
@osr_login_required
@permission_required()
def api_url_permission():
"""
GET:
获取系统的web url
type:<array>,类型, 可选api, static, page
pre:<int>,每页获取几条数据,默认10
page:<int>,第几页,默认1
keyword:<str>,搜索关键字
POST:
添加页面路由
url:<str>, 只用于添加页面路由
PUT:
更新权限
id:<str>,id
method:<str>
custom_permission:<array>, 如[1, 512, 128]
login_auth:<int>, 0 或 1, 是否需要登录验证(如果原代码路由中未指定需要登录请求, 则按照此配置)
DELETE:
删除手动添加的页面路由
ids:<array>
:return:
"""
if request.c_method == "GET":
if request.argget.all("id"):
data = get_url()
else:
data = get_urls()
elif request.c_method == "POST":
data = add_url()
elif request.c_method == "PUT":
data = update_url()
elif request.c_method == "DELETE":
data = delete_url()
else:
data = {"msg_type": "w", "msg": METHOD_WARNING, "custom_status": 405}
return response_format(data)
| 28.351852
| 109
| 0.634226
|
4a169d9c285c53f3dcfad2fc64985dda159bb23d
| 2,483
|
py
|
Python
|
antiphishme/src/models/certs_model.py
|
TheArqsz/AntiPhishMe-backend
|
3ae38059e410152ae1976815c209829ac08f47a5
|
[
"MIT"
] | 1
|
2020-05-28T11:45:22.000Z
|
2020-05-28T11:45:22.000Z
|
antiphishme/src/models/certs_model.py
|
TheArqsz/AntiPhishMe-backend
|
3ae38059e410152ae1976815c209829ac08f47a5
|
[
"MIT"
] | 1
|
2021-03-31T19:56:26.000Z
|
2021-03-31T19:56:26.000Z
|
antiphishme/src/models/certs_model.py
|
TheArqsz/AntiPhishMe-backend
|
3ae38059e410152ae1976815c209829ac08f47a5
|
[
"MIT"
] | 2
|
2020-05-28T16:45:45.000Z
|
2021-09-07T14:16:44.000Z
|
import json
import datetime
import sqlalchemy
from antiphishme.src.helpers.consts import Const
from antiphishme.src.db_config import db
class Certs(db.Model):
__tablename__ = 'certs'
id = db.Column(db.Integer(), primary_key=True)
caid = db.Column(db.Integer(), unique=True, nullable=False)
is_bad = db.Column(db.Boolean(), default=False)
subject_organizationName = db.Column(db.String(160))
subject_countryName = db.Column(db.String(160))
issuer_commonName = db.Column(db.String(160))
registered_at = db.Column(db.DateTime())
multi_dns = db.Column(db.Integer(), nullable=False)
def json(self):
return {
'caid': self.caid,
'subject_organizationName': self.subject_organizationName,
'subject_countryName': self.subject_countryName,
'issuer_commonName': self.issuer_commonName,
'registered_at': self.registered_at,
'multi_dns': self.multi_dns,
'is_bad': self.is_bad
}
@staticmethod
def add_cert(
_caid,
_subject_organizationName,
_subject_countryName,
_issuer_commonName,
_registered_at,
_multi_dns,
_is_bad=False
):
new_cert = Certs(caid=_caid,
subject_organizationName=_subject_organizationName,
subject_countryName=_subject_countryName,
issuer_commonName=_issuer_commonName,
registered_at=_registered_at,
multi_dns=_multi_dns,
is_bad=_is_bad
)
db.session.add(new_cert)
try:
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
try:
return Certs.query.filter(Certs.caid == _caid).first().id
except AttributeError:
return -1
@staticmethod
def add_cert_td():
Certs.add_cert(4, "Google Inc", "US", "Google Internet Authority", datetime.datetime(2020, 1, 27, 2, 30, 30, 549000), 5)
Certs.add_cert(16419, Const.UNKNOWN_RESULTS_MESSAGE, Const.UNKNOWN_RESULTS_MESSAGE, "Let's Encrypt Authority X3", datetime.datetime(2020, 1, 12, 8, 2, 37, 702000), 1, _is_bad=True)
Certs.add_cert(16418, Const.UNKNOWN_RESULTS_MESSAGE, Const.UNKNOWN_RESULTS_MESSAGE, "Let's Encrypt Authority X3", datetime.datetime(2020, 1, 12, 8, 2, 37, 702000), 1, _is_bad=True)
@staticmethod
def get_all_certs():
return [Certs.json(b) for b in Certs.query.all()]
| 36.514706
| 189
| 0.655256
|
4a169e3572aa2010c56b469e15c888c253654126
| 26,680
|
py
|
Python
|
panel/tests/test_param.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | 1
|
2019-10-15T13:21:20.000Z
|
2019-10-15T13:21:20.000Z
|
panel/tests/test_param.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | null | null | null |
panel/tests/test_param.py
|
rupakgoyal/panel-
|
4e1e01e1766ebfc2fc1efb409734fd51efc60c01
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import param
from bokeh.models import (
Div, Slider, Select, RangeSlider, MultiSelect, Row as BkRow,
CheckboxGroup, Toggle, Button, TextInput as BkTextInput,
Tabs as BkTabs, Column as BkColumn, TextInput)
from panel.pane import Pane, PaneBase, Matplotlib, Bokeh
from panel.layout import Tabs, Row
from panel.param import Param, ParamMethod, ParamFunction, JSONInit
from panel.widgets import LiteralInput
from panel._testing.util import mpl_available, mpl_figure
def test_instantiate_from_class():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test), Param)
def test_instantiate_from_parameter():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test.param.a), Param)
def test_instantiate_from_parameters():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test.param), Param)
def test_instantiate_from_instance():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test()), Param)
def test_instantiate_from_parameter_on_instance():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test().param.a), Param)
def test_instantiate_from_parameters_on_instance():
class Test(param.Parameterized):
a = param.Number()
assert isinstance(Pane(Test().param), Param)
def test_param_pane_repr(document, comm):
class Test(param.Parameterized):
pass
assert repr(Pane(Test())) == 'Param(Test)'
def test_param_pane_repr_with_params(document, comm):
class Test(param.Parameterized):
a = param.Number()
b = param.Number()
assert repr(Pane(Test(), parameters=['a'])) == "Param(Test, parameters=['a'])"
def test_get_root(document, comm):
class Test(param.Parameterized):
pass
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
assert isinstance(model, BkColumn)
assert len(model.children) == 1
div = model.children[0]
assert isinstance(div, Div)
assert div.text == '<b>'+test.name[:-5]+'</b>'
def test_single_param(document, comm):
class Test(param.Parameterized):
a = param.Parameter(default=0)
test = Test()
test_pane = Pane(test.param.a)
model = test_pane.get_root(document, comm=comm)
assert isinstance(model, BkColumn)
assert len(model.children) == 1
widget = model.children[0]
assert isinstance(widget, TextInput)
assert widget.value == '0'
def test_get_root_tabs(document, comm):
class Test(param.Parameterized):
pass
test = Test()
test_pane = Pane(test, expand_layout=Tabs)
model = test_pane.get_root(document, comm=comm)
assert isinstance(model, BkTabs)
assert len(model.tabs) == 1
box = model.tabs[0].child
assert isinstance(box, BkColumn)
assert len(box.children) == 0
def test_number_param(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1.2, bounds=(0, 5))
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
slider = model.children[1]
assert isinstance(slider, Slider)
assert slider.value == 1.2
assert slider.start == 0
assert slider.end == 5
assert slider.step == 0.1
assert slider.disabled == False
# Check changing param value updates widget
test.a = 3.3
assert slider.value == 3.3
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.bounds = (0.1, 5.5)
assert slider.start == 0.1
assert slider.end == 5.5
a_param.constant = True
assert slider.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
a_param.bounds = (-0.1, 3.8)
test.a = 0.5
assert slider.value == 3.3
assert slider.start == 0.1
assert slider.end == 5.5
assert slider.disabled == True
def test_boolean_param(document, comm):
class Test(param.Parameterized):
a = param.Boolean(default=False)
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
checkbox = model.children[1]
assert isinstance(checkbox, CheckboxGroup)
assert checkbox.labels == ['A']
assert checkbox.active == []
assert checkbox.disabled == False
# Check changing param value updates widget
test.a = True
assert checkbox.active == [0]
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.constant = True
assert checkbox.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
test.a = False
assert checkbox.active == [0]
assert checkbox.disabled == True
def test_range_param(document, comm):
class Test(param.Parameterized):
a = param.Range(default=(0.1, 0.5), bounds=(0, 1.1))
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
widget = model.children[1]
assert isinstance(widget, RangeSlider)
assert widget.start == 0
assert widget.end == 1.1
assert widget.value == (0.1, 0.5)
# Check changing param value updates widget
test.a = (0.2, 0.4)
assert widget.value == (0.2, 0.4)
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.bounds = (0.1, 0.6)
assert widget.start == 0.1
assert widget.end == 0.6
a_param.constant = True
assert widget.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
a_param.bounds = (-1, 1)
test.a = (0.05, 0.2)
assert widget.value == (0.2, 0.4)
assert widget.start == 0.1
assert widget.end == 0.6
assert widget.disabled == True
def test_integer_param(document, comm):
class Test(param.Parameterized):
a = param.Integer(default=2, bounds=(0, 5))
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
slider = model.children[1]
assert isinstance(slider, Slider)
assert slider.value == 2
assert slider.start == 0
assert slider.end == 5
assert slider.step == 1
assert slider.disabled == False
# Check changing param value updates widget
test.a = 3
assert slider.value == 3
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.bounds = (1, 6)
assert slider.start == 1
assert slider.end == 6
a_param.constant = True
assert slider.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
a_param.bounds = (-1, 7)
test.a = 1
assert slider.value == 3
assert slider.start == 1
assert slider.end == 6
assert slider.disabled == True
def test_object_selector_param(document, comm):
class Test(param.Parameterized):
a = param.ObjectSelector(default='b', objects=[1, 'b', 'c'])
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
slider = model.children[1]
assert isinstance(slider, Select)
assert slider.options == ['1', 'b', 'c']
assert slider.value == 'b'
assert slider.disabled == False
# Check changing param value updates widget
test.a = 1
assert slider.value == '1'
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.objects = ['c', 'd', 1]
assert slider.options == ['c', 'd', '1']
a_param.constant = True
assert slider.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
a_param.objects = [1, 'c', 'd']
test.a = 'd'
assert slider.value == '1'
assert slider.options == ['c', 'd', '1']
assert slider.disabled == True
def test_list_selector_param(document, comm):
class Test(param.Parameterized):
a = param.ListSelector(default=['b', 1], objects=[1, 'b', 'c'])
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
slider = model.children[1]
assert isinstance(slider, MultiSelect)
assert slider.options == ['1', 'b', 'c']
assert slider.value == ['b', '1']
assert slider.disabled == False
# Check changing param value updates widget
test.a = ['c', 1]
assert slider.value == ['c', '1']
# Check changing param attribute updates widget
a_param = test.param['a']
a_param.objects = ['c', 'd', 1]
assert slider.options == ['c', 'd', '1']
a_param.constant = True
assert slider.disabled == True
# Ensure cleanup works
test_pane._cleanup(model)
a_param.constant = False
a_param.objects = [1, 'c', 'd']
test.a = ['d']
assert slider.value == ['c', '1']
assert slider.options == ['c', 'd', '1']
assert slider.disabled == True
def test_action_param(document, comm):
class Test(param.Parameterized):
a = param.Action(lambda x: x.b.append(1))
b = param.List(default=[])
test = Test()
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
slider = model.children[1]
assert isinstance(slider, Button)
def test_explicit_params(document, comm):
class Test(param.Parameterized):
a = param.Boolean(default=False)
b = param.Integer(default=1)
test = Test()
test_pane = Pane(test, parameters=['a'])
model = test_pane.get_root(document, comm=comm)
assert len(model.children) == 2
assert isinstance(model.children[1], CheckboxGroup)
def test_param_precedence(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1.2, bounds=(0, 5))
test = Test()
test_pane = Pane(test)
# Check changing precedence attribute hides and shows widget
a_param = test.param['a']
a_param.precedence = -1
assert test_pane._widgets['a'] not in test_pane._widget_box.objects
a_param.precedence = 1
assert test_pane._widgets['a'] in test_pane._widget_box.objects
def test_param_label(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1.2, bounds=(0, 5), label='A')
b = param.Action(label='B')
test = Test()
test_pane = Pane(test)
# Check updating label changes widget name
a_param = test.param['a']
a_param.label = 'B'
assert test_pane._widgets['a'].name == 'B'
b_param = test.param['b']
b_param.label = 'C'
assert test_pane._widgets['b'].name == 'C'
def test_param_precedence_ordering(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1.2, bounds=(0, 5), precedence=-1)
b = param.Boolean(default=True, precedence=1)
test = Test()
test_pane = Pane(test)
# Check changing precedence attribute hides and shows widget
a_param = test.param['a']
a_param.precedence = 2
assert test_pane._widget_box.objects == [test_pane._widgets[w] for w in ('name', 'b', 'a')]
a_param.precedence = 1
assert test_pane._widget_box.objects == [test_pane._widgets[w] for w in ('name', 'a', 'b')]
def test_param_step(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1.2, bounds=(0, 5), step=0.1)
test = Test()
test_pane = Pane(test)
assert test_pane._widgets['a'].step == 0.1
a_param = test.param['a']
a_param.step = 0.25
assert test_pane._widgets['a'].step == 0.25
def test_replace_param_object(document, comm):
class Test(param.Parameterized):
a = param.Number(bounds=(0, 10))
pane = Param()
model = pane.get_root(document, comm=comm)
assert model.children == []
pane.object = Test()
assert len(model.children) == 2
title, widget = model.children
assert isinstance(title, Div)
assert title.text == '<b>Test</b>'
assert isinstance(widget, Slider)
assert widget.start == 0
assert widget.end == 10
def test_set_parameters(document, comm):
class Test(param.Parameterized):
a = param.Number(bounds=(0, 10))
b = param.String(default='A')
pane = Param(Test())
model = pane.get_root(document, comm=comm)
assert len(model.children) == 3
title, slider, text = model.children
assert isinstance(title, Div)
assert isinstance(slider, Slider)
assert isinstance(text, TextInput)
pane.parameters = ['b']
assert len(model.children) == 2
title, text = model.children
assert isinstance(title, Div)
assert isinstance(text, TextInput)
def test_set_display_threshold(document, comm):
class Test(param.Parameterized):
a = param.Number(bounds=(0, 10), precedence=1)
b = param.String(default='A', precedence=2)
pane = Param(Test())
model = pane.get_root(document, comm=comm)
assert len(model.children) == 3
title, slider, text = model.children
assert isinstance(title, Div)
assert isinstance(slider, Slider)
assert isinstance(text, TextInput)
pane.display_threshold = 1.5
assert len(model.children) == 2
title, text = model.children
assert isinstance(title, Div)
assert isinstance(text, TextInput)
def test_set_widgets(document, comm):
class Test(param.Parameterized):
a = param.Number(default=1, bounds=(0, 10), precedence=1)
b = param.String(default='A', precedence=2)
pane = Param(Test())
model = pane.get_root(document, comm=comm)
assert len(model.children) == 3
title, slider, text = model.children
assert isinstance(title, Div)
assert isinstance(slider, Slider)
assert isinstance(text, TextInput)
pane.widgets = {'a': LiteralInput(value=1, type=(float, int))}
assert len(model.children) == 3
title, number, text = model.children
assert isinstance(title, Div)
assert isinstance(number, TextInput)
assert isinstance(text, TextInput)
pane.widgets = {'a': {'height':100}}
assert len(model.children) == 3
title, number, text = model.children
assert isinstance(title, Div)
assert isinstance(number, Slider)
assert number.height == 100
assert isinstance(text, TextInput)
pane.widgets = {'a': {'type': LiteralInput, 'height':100}}
assert len(model.children) == 3
title, number, text = model.children
assert isinstance(title, Div)
assert isinstance(number, TextInput)
assert number.height == 100
assert isinstance(text, TextInput)
def test_set_show_name(document, comm):
class Test(param.Parameterized):
a = param.Number(bounds=(0, 10))
pane = Param(Test())
model = pane.get_root(document, comm=comm)
assert len(model.children) == 2
title, widget = model.children
assert isinstance(title, Div)
assert isinstance(widget, Slider)
pane.show_name = False
assert len(model.children) == 1
assert isinstance(model.children[0], Slider)
def test_set_show_labels(document, comm):
class Test(param.Parameterized):
a = param.Number(bounds=(0, 10))
pane = Param(Test())
model = pane.get_root(document, comm=comm)
assert len(model.children) == 2
title, widget = model.children
assert isinstance(title, Div)
assert isinstance(widget, Slider)
assert widget.title == 'A'
pane.show_labels = False
assert len(model.children) == 2
assert isinstance(model.children[1], Slider)
assert model.children[1].title == ''
def test_expand_param_subobject(document, comm):
class Test(param.Parameterized):
a = param.Parameter()
test = Test(a=Test(name='Nested'))
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
toggle = model.children[1].children[1]
assert isinstance(toggle, Toggle)
# Expand subpane
test_pane._widgets['a'][1].value = True
assert len(model.children) == 3
_, _, subpanel = test_pane.layout.objects
col = model.children[2]
assert isinstance(col, BkColumn)
assert isinstance(col, BkColumn)
assert len(col.children) == 2
div, widget = col.children
assert div.text == '<b>Nested</b>'
assert isinstance(widget, BkTextInput)
# Collapse subpanel
test_pane._widgets['a'][1].value = False
assert len(model.children) == 2
def test_switch_param_subobject(document, comm):
class Test(param.Parameterized):
a = param.ObjectSelector()
o1 = Test(name='Subobject 1')
o2 = Test(name='Subobject 2')
Test.param['a'].objects = [o1, o2, 3]
test = Test(a=o1, name='Nested')
test_pane = Pane(test)
model = test_pane.get_root(document, comm=comm)
toggle = model.children[1].children[1]
assert isinstance(toggle, Toggle)
# Expand subpane
test_pane._widgets['a'][1].value = True
assert len(model.children) == 3
_, _, subpanel = test_pane.layout.objects
col = model.children[2]
assert isinstance(col, BkColumn)
assert len(col.children) == 2
div, row = col.children
assert div.text == '<b>Subobject 1</b>'
assert isinstance(row.children[0], Select)
# Switch subobject
test_pane._widgets['a'][0].value = o2
_, _, subpanel = test_pane.layout.objects
col = model.children[2]
assert isinstance(col, BkColumn)
assert len(col.children) == 2
div, row = col.children
assert div.text == '<b>Subobject 2</b>'
assert isinstance(row.children[0], Select)
# Collapse subpanel
test_pane._widgets['a'][1].value = False
assert len(model.children) == 2
assert subpanel._models == {}
def test_expand_param_subobject_into_row(document, comm):
class Test(param.Parameterized):
a = param.Parameter()
test = Test(a=Test(name='Nested'))
row = Row()
test_pane = Pane(test, expand_layout=row)
layout = Row(test_pane, row)
model = layout.get_root(document, comm=comm)
toggle = model.children[0].children[1].children[1]
assert isinstance(toggle, Toggle)
# Expand subpane
test_pane._widgets['a'][1].value = True
assert len(model.children) == 2
subpanel = row.objects[0]
row = model.children[1]
assert isinstance(row, BkRow)
assert len(row.children) == 1
box = row.children[0]
assert isinstance(box, BkColumn)
assert len(box.children) == 2
div, widget = box.children
assert div.text == '<b>Nested</b>'
assert isinstance(widget, BkTextInput)
# Collapse subpanel
test_pane._widgets['a'][1].value = False
assert len(row.children) == 0
assert subpanel._models == {}
def test_expand_param_subobject_expand(document, comm):
class Test(param.Parameterized):
a = param.Parameter()
test = Test(a=Test(name='Nested'))
test_pane = Pane(test, expand=True, expand_button=True)
model = test_pane.get_root(document, comm=comm)
toggle = model.children[1].children[1]
assert isinstance(toggle, Toggle)
# Expand subpane
assert len(model.children) == 3
_, _, subpanel = test_pane.layout.objects
col = model.children[2]
assert isinstance(col, BkColumn)
assert len(col.children) == 2
div, widget = col.children
assert div.text == '<b>Nested</b>'
assert isinstance(widget, BkTextInput)
# Collapse subpanel
test_pane._widgets['a'][1].value = False
assert len(model.children) == 2
assert subpanel._models == {}
def test_param_subobject_expand_no_toggle(document, comm):
class Test(param.Parameterized):
a = param.Parameter()
test = Test(a=Test(name='Nested'))
test_pane = Pane(test, expand=True,
expand_button=False)
model = test_pane.get_root(document, comm=comm)
# Assert no toggle was added
assert len(model.children) == 3
# Expand subpane
_, _, subpanel = test_pane.layout.objects
div, widget = model.children[2].children
assert div.text == '<b>Nested</b>'
assert isinstance(widget, BkTextInput)
def test_expand_param_subobject_tabs(document, comm):
class Test(param.Parameterized):
abc = param.Parameter()
test = Test(abc=Test(name='Nested'), name='A')
test_pane = Pane(test, expand_layout=Tabs)
model = test_pane.get_root(document, comm=comm)
toggle = model.tabs[0].child.children[0].children[1]
assert isinstance(toggle, Toggle)
# Expand subpanel
test_pane._widgets['abc'][1].value = True
assert len(model.tabs) == 2
_, subpanel = test_pane.layout.objects
subtabs = model.tabs[1].child
assert model.tabs[1].title == 'Abc'
assert isinstance(subtabs, BkTabs)
assert len(subtabs.tabs) == 1
assert subtabs.tabs[0].title == 'Nested'
box = subtabs.tabs[0].child
assert isinstance(box, BkColumn)
assert len(box.children) == 1
widget = box.children[0]
assert isinstance(widget, BkTextInput)
# Collapse subpanel
test_pane._widgets['abc'][1].value = False
assert len(model.tabs) == 1
class View(param.Parameterized):
a = param.Integer(default=0)
b = param.Parameter()
@param.depends('a')
def view(self):
return Div(text='%d' % self.a)
@param.depends('b.param')
def subobject_view(self):
return Div(text='%d' % self.b.a)
@param.depends('a')
def mpl_view(self):
return mpl_figure()
@param.depends('a')
def mixed_view(self):
return self.view() if (self.a % 2) else self.mpl_view()
def test_get_param_function_pane_type():
test = View()
def view(a):
return Div(text='%d' % a)
assert PaneBase.get_pane_type(view) is not ParamFunction
assert PaneBase.get_pane_type(param.depends(test.param.a)(view)) is ParamFunction
def test_param_function_pane(document, comm):
test = View()
@param.depends(test.param.a)
def view(a):
return Div(text='%d' % a)
pane = Pane(view)
inner_pane = pane._pane
assert isinstance(inner_pane, Bokeh)
# Create pane
row = pane.get_root(document, comm=comm)
assert isinstance(row, BkRow)
assert len(row.children) == 1
inner_row = row.children[0]
model = inner_row.children[0]
assert pane._models[row.ref['id']][0] is inner_row
assert isinstance(model, Div)
assert model.text == '0'
# Update pane
test.a = 5
new_model = inner_row.children[0]
assert inner_pane is pane._pane
assert new_model.text == '5'
assert pane._models[row.ref['id']][0] is inner_row
# Cleanup pane
pane._cleanup(row)
assert pane._models == {}
assert inner_pane._models == {}
def test_get_param_method_pane_type():
assert PaneBase.get_pane_type(View().view) is ParamMethod
def test_param_method_pane(document, comm):
test = View()
pane = Pane(test.view)
inner_pane = pane._pane
assert isinstance(inner_pane, Bokeh)
# Create pane
row = pane.get_root(document, comm=comm)
assert isinstance(row, BkRow)
assert len(row.children) == 1
inner_row = row.children[0]
model = inner_row.children[0]
assert pane._models[row.ref['id']][0] is inner_row
assert isinstance(model, Div)
assert model.text == '0'
# Update pane
test.a = 5
new_model = inner_row.children[0]
assert inner_pane is pane._pane
assert new_model.text == '5'
assert pane._models[row.ref['id']][0] is inner_row
# Cleanup pane
pane._cleanup(row)
assert pane._models == {}
assert inner_pane._models == {}
def test_param_method_pane_subobject(document, comm):
subobject = View(name='Nested', a=42)
test = View(b=subobject)
pane = Pane(test.subobject_view)
inner_pane = pane._pane
assert isinstance(inner_pane, Bokeh)
# Create pane
row = pane.get_root(document, comm=comm)
assert isinstance(row, BkRow)
assert len(row.children) == 1
inner_row = row.children[0]
model = inner_row.children[0]
assert isinstance(model, Div)
assert model.text == '42'
# Ensure that subobject is being watched
watchers = pane._callbacks
assert any(w.inst is subobject for w in watchers)
assert pane._models[row.ref['id']][0] is inner_row
# Ensure that switching the subobject triggers update in watchers
new_subobject = View(name='Nested', a=42)
test.b = new_subobject
assert pane._models[row.ref['id']][0] is inner_row
watchers = pane._callbacks
assert not any(w.inst is subobject for w in watchers)
assert any(w.inst is new_subobject for w in watchers)
# Cleanup pane
pane._cleanup(row)
assert pane._models == {}
assert inner_pane._models == {}
@mpl_available
def test_param_method_pane_mpl(document, comm):
test = View()
pane = Pane(test.mpl_view)
inner_pane = pane._pane
assert isinstance(inner_pane, Matplotlib)
# Create pane
row = pane.get_root(document, comm=comm)
assert isinstance(row, BkRow)
assert len(row.children) == 1
inner_row = row.children[0]
model = inner_row.children[0]
assert pane._models[row.ref['id']][0] is inner_row
text = model.text
# Update pane
test.a = 5
new_model = inner_row.children[0]
assert inner_pane is pane._pane
assert new_model is model
assert new_model.text != text
assert pane._models[row.ref['id']][0] is inner_row
# Cleanup pane
pane._cleanup(row)
assert pane._models == {}
assert inner_pane._models == {}
@mpl_available
def test_param_method_pane_changing_type(document, comm):
test = View()
pane = Pane(test.mixed_view)
inner_pane = pane._pane
assert isinstance(inner_pane, Matplotlib)
# Create pane
row = pane.get_root(document, comm=comm)
assert isinstance(row, BkRow)
assert len(row.children) == 1
inner_row = row.children[0]
model = inner_row.children[0]
text = model.text
assert text.startswith('<img src')
# Update pane
test.a = 5
new_model = inner_row.children[0]
new_pane = pane._pane
assert isinstance(new_pane, Bokeh)
assert isinstance(new_model, Div)
assert new_model.text != text
# Cleanup pane
new_pane._cleanup(row)
assert new_pane._models == {}
def test_jsoninit_class_from_env_var():
os.environ['PARAM_JSON_INIT'] = '{"a": 1}'
json_init = JSONInit()
class Test(param.Parameterized):
a = param.Integer()
json_init(Test)
assert Test.a == 1
del os.environ['PARAM_JSON_INIT']
def test_jsoninit_instance_from_env_var():
os.environ['PARAM_JSON_INIT'] = '{"a": 2}'
json_init = JSONInit()
class Test(param.Parameterized):
a = param.Integer()
test = Test()
json_init(test)
assert test.a == 2
del os.environ['PARAM_JSON_INIT']
| 26.68
| 95
| 0.658808
|
4a169e4af95a6c920a56c7664af43f85dfed03af
| 3,786
|
py
|
Python
|
tests/unit/extraction_rules/test_audit_policy.py
|
luisr-escobar/dynatrace-gcp-function
|
b404873eb59bf79993b820aa80248d26882185f5
|
[
"Apache-2.0"
] | 19
|
2020-09-30T13:52:39.000Z
|
2022-02-23T12:43:53.000Z
|
tests/unit/extraction_rules/test_audit_policy.py
|
luisr-escobar/dynatrace-gcp-function
|
b404873eb59bf79993b820aa80248d26882185f5
|
[
"Apache-2.0"
] | 87
|
2020-10-02T19:59:43.000Z
|
2022-03-31T11:54:42.000Z
|
tests/unit/extraction_rules/test_audit_policy.py
|
luisr-escobar/dynatrace-gcp-function
|
b404873eb59bf79993b820aa80248d26882185f5
|
[
"Apache-2.0"
] | 13
|
2020-10-27T08:14:46.000Z
|
2022-01-09T23:45:44.000Z
|
# Copyright 2020 Dynatrace LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from datetime import datetime
from typing import NewType, Any
from lib.logs import logs_processor
from lib.logs.metadata_engine import ATTRIBUTE_GCP_PROJECT_ID, ATTRIBUTE_GCP_RESOURCE_TYPE, ATTRIBUTE_SEVERITY, \
ATTRIBUTE_CLOUD_PROVIDER, ATTRIBUTE_CONTENT, ATTRIBUTE_TIMESTAMP, \
ATTRIBUTE_DT_LOGPATH, ATTRIBUTE_AUDIT_IDENTITY, ATTRIBUTE_AUDIT_ACTION, ATTRIBUTE_AUDIT_RESULT
from unit.extraction_rules.common import TEST_LOGS_PROCESSING_CONTEXT
MonkeyPatchFixture = NewType("MonkeyPatchFixture", Any)
timestamp = datetime.utcnow().isoformat() + "Z"
# From https://cloud.google.com/vpc-service-controls/docs/troubleshooting
record = {
"insertId": "222lvajc6f7",
"logName": "projects/dynatrace-gcp-extension/logs/cloudaudit.googleapis.com%2Fpolicy",
"protoPayload": {
"@type": "type.googleapis.com/google.cloud.audit.AuditLog",
"authenticationInfo": {
"principalEmail": "someone@google.com"
},
"metadata": {
"@type": "type.googleapis.com/google.cloud.audit.VpcServiceControlAuditMetadata",
"resourceNames": [
"projects/_"
],
"violationReason": "NO_MATCHING_ACCESS_LEVEL"
},
"methodName": "google.storage.NoBillingOk",
"requestMetadata": {
"callerIp": "x.x.x.x",
"destinationAttributes": {},
"requestAttributes": {}
},
"resourceName": "projects/690885588241",
"serviceName": "storage.googleapis.com",
"status": {
"code": 7,
"details": [
{
"@type": "type.googleapis.com/google.rpc.PreconditionFailure",
"violations": [
{
"type": "VPC_SERVICE_CONTROLS"
}
]
}
],
"message": "Request is prohibited by organization's policy"
}
},
"receiveTimestamp": "2018-11-27T21:40:43.823209571Z",
"resource": {
"labels": {
"method": "google.storage.NoBillingOk",
"project_id": "dynatrace-gcp-extension",
"service": "storage.googleapis.com"
},
"type": "audited_resource"
},
"severity": "ERROR",
"timestamp": timestamp
}
expected_output_list = [
{
ATTRIBUTE_CLOUD_PROVIDER: 'gcp',
ATTRIBUTE_GCP_PROJECT_ID: 'dynatrace-gcp-extension',
ATTRIBUTE_GCP_RESOURCE_TYPE: 'audited_resource',
ATTRIBUTE_TIMESTAMP: timestamp,
ATTRIBUTE_CONTENT: json.dumps(record),
ATTRIBUTE_DT_LOGPATH: 'projects/dynatrace-gcp-extension/logs/cloudaudit.googleapis.com%2Fpolicy',
ATTRIBUTE_AUDIT_IDENTITY: 'someone@google.com',
ATTRIBUTE_AUDIT_ACTION: 'google.storage.NoBillingOk',
ATTRIBUTE_AUDIT_RESULT: 'Failed.PermissionDenied',
ATTRIBUTE_SEVERITY: 'ERROR',
}
]
def test_extraction():
for entry in expected_output_list:
actual_output = logs_processor._create_dt_log_payload(TEST_LOGS_PROCESSING_CONTEXT, entry[ATTRIBUTE_CONTENT])
assert actual_output == entry
| 37.485149
| 117
| 0.641838
|
4a169f719da28befbe6651b3d3f58f6c180ddb76
| 187,654
|
py
|
Python
|
lib/sqlalchemy/sql/compiler.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/compiler.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/compiler.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
# sql/compiler.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
from __future__ import annotations
import collections
import collections.abc as collections_abc
import contextlib
import itertools
import operator
import re
from time import perf_counter
from . import base
from . import coercions
from . import crud
from . import elements
from . import functions
from . import operators
from . import schema
from . import selectable
from . import sqltypes
from .base import NO_ARG
from .base import prefix_anon_map
from .elements import quoted_name
from .. import exc
from .. import util
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"authorization",
"between",
"binary",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"cross",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"for",
"foreign",
"freeze",
"from",
"full",
"grant",
"group",
"having",
"ilike",
"in",
"initially",
"inner",
"intersect",
"into",
"is",
"isnull",
"join",
"leading",
"left",
"like",
"limit",
"localtime",
"localtimestamp",
"natural",
"new",
"not",
"notnull",
"null",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"outer",
"overlaps",
"placing",
"primary",
"references",
"right",
"select",
"session_user",
"set",
"similar",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"verbose",
"when",
"where",
]
)
LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I)
LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I)
ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(["$"])
FK_ON_DELETE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_ON_UPDATE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_INITIALLY = re.compile(r"^(?:DEFERRED|IMMEDIATE)$", re.I)
BIND_PARAMS = re.compile(r"(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])", re.UNICODE)
BIND_PARAMS_ESC = re.compile(r"\x5c(:[\w\$]*)(?![:\w\$])", re.UNICODE)
BIND_TEMPLATES = {
"pyformat": "%%(%(name)s)s",
"qmark": "?",
"format": "%%s",
"numeric": ":[_POSITION]",
"named": ":%(name)s",
}
_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]]")
_BIND_TRANSLATE_CHARS = dict(zip("%():[]", "PAZC__"))
OPERATORS = {
# binary
operators.and_: " AND ",
operators.or_: " OR ",
operators.add: " + ",
operators.mul: " * ",
operators.sub: " - ",
operators.mod: " % ",
operators.neg: "-",
operators.lt: " < ",
operators.le: " <= ",
operators.ne: " != ",
operators.gt: " > ",
operators.ge: " >= ",
operators.eq: " = ",
operators.is_distinct_from: " IS DISTINCT FROM ",
operators.is_not_distinct_from: " IS NOT DISTINCT FROM ",
operators.concat_op: " || ",
operators.match_op: " MATCH ",
operators.not_match_op: " NOT MATCH ",
operators.in_op: " IN ",
operators.not_in_op: " NOT IN ",
operators.comma_op: ", ",
operators.from_: " FROM ",
operators.as_: " AS ",
operators.is_: " IS ",
operators.is_not: " IS NOT ",
operators.collate: " COLLATE ",
# unary
operators.exists: "EXISTS ",
operators.distinct_op: "DISTINCT ",
operators.inv: "NOT ",
operators.any_op: "ANY ",
operators.all_op: "ALL ",
# modifiers
operators.desc_op: " DESC",
operators.asc_op: " ASC",
operators.nulls_first_op: " NULLS FIRST",
operators.nulls_last_op: " NULLS LAST",
}
FUNCTIONS = {
functions.coalesce: "coalesce",
functions.current_date: "CURRENT_DATE",
functions.current_time: "CURRENT_TIME",
functions.current_timestamp: "CURRENT_TIMESTAMP",
functions.current_user: "CURRENT_USER",
functions.localtime: "LOCALTIME",
functions.localtimestamp: "LOCALTIMESTAMP",
functions.random: "random",
functions.sysdate: "sysdate",
functions.session_user: "SESSION_USER",
functions.user: "USER",
functions.cube: "CUBE",
functions.rollup: "ROLLUP",
functions.grouping_sets: "GROUPING SETS",
}
EXTRACT_MAP = {
"month": "month",
"day": "day",
"year": "year",
"second": "second",
"hour": "hour",
"doy": "doy",
"minute": "minute",
"quarter": "quarter",
"dow": "dow",
"week": "week",
"epoch": "epoch",
"milliseconds": "milliseconds",
"microseconds": "microseconds",
"timezone_hour": "timezone_hour",
"timezone_minute": "timezone_minute",
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: "UNION",
selectable.CompoundSelect.UNION_ALL: "UNION ALL",
selectable.CompoundSelect.EXCEPT: "EXCEPT",
selectable.CompoundSelect.EXCEPT_ALL: "EXCEPT ALL",
selectable.CompoundSelect.INTERSECT: "INTERSECT",
selectable.CompoundSelect.INTERSECT_ALL: "INTERSECT ALL",
}
RM_RENDERED_NAME = 0
RM_NAME = 1
RM_OBJECTS = 2
RM_TYPE = 3
ExpandedState = collections.namedtuple(
"ExpandedState",
[
"statement",
"additional_parameters",
"processors",
"positiontup",
"parameter_expansion",
],
)
NO_LINTING = util.symbol("NO_LINTING", "Disable all linting.", canonical=0)
COLLECT_CARTESIAN_PRODUCTS = util.symbol(
"COLLECT_CARTESIAN_PRODUCTS",
"Collect data on FROMs and cartesian products and gather "
"into 'self.from_linter'",
canonical=1,
)
WARN_LINTING = util.symbol(
"WARN_LINTING", "Emit warnings for linters that find problems", canonical=2
)
FROM_LINTING = util.symbol(
"FROM_LINTING",
"Warn for cartesian products; "
"combines COLLECT_CARTESIAN_PRODUCTS and WARN_LINTING",
canonical=COLLECT_CARTESIAN_PRODUCTS | WARN_LINTING,
)
class FromLinter(collections.namedtuple("FromLinter", ["froms", "edges"])):
def lint(self, start=None):
froms = self.froms
if not froms:
return None, None
edges = set(self.edges)
the_rest = set(froms)
if start is not None:
start_with = start
the_rest.remove(start_with)
else:
start_with = the_rest.pop()
stack = collections.deque([start_with])
while stack and the_rest:
node = stack.popleft()
the_rest.discard(node)
# comparison of nodes in edges here is based on hash equality, as
# there are "annotated" elements that match the non-annotated ones.
# to remove the need for in-python hash() calls, use native
# containment routines (e.g. "node in edge", "edge.index(node)")
to_remove = {edge for edge in edges if node in edge}
# appendleft the node in each edge that is not
# the one that matched.
stack.extendleft(edge[not edge.index(node)] for edge in to_remove)
edges.difference_update(to_remove)
# FROMS left over? boom
if the_rest:
return the_rest, start_with
else:
return None, None
def warn(self):
the_rest, start_with = self.lint()
# FROMS left over? boom
if the_rest:
froms = the_rest
if froms:
template = (
"SELECT statement has a cartesian product between "
"FROM element(s) {froms} and "
'FROM element "{start}". Apply join condition(s) '
"between each element to resolve."
)
froms_str = ", ".join(
'"{elem}"'.format(elem=self.froms[from_])
for from_ in froms
)
message = template.format(
froms=froms_str, start=self.froms[start_with]
)
util.warn(message)
class Compiled:
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
_result_columns = None
schema_translate_map = None
execution_options = util.EMPTY_DICT
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
_annotations = util.EMPTY_DICT
compile_state = None
"""Optional :class:`.CompileState` object that maintains additional
state used by the compiler.
Major executable objects such as :class:`_expression.Insert`,
:class:`_expression.Update`, :class:`_expression.Delete`,
:class:`_expression.Select` will generate this
state when compiled in order to calculate additional information about the
object. For the top level object that is to be executed, the state can be
stored here where it can also have applicability towards result set
processing.
.. versionadded:: 1.4
"""
cache_key = None
_gen_time = None
def __init__(
self,
dialect,
statement,
schema_translate_map=None,
render_schema_translate=False,
compile_kwargs=util.immutabledict(),
):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`_expression.ClauseElement` to be compiled.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.schema_translate_map = schema_translate_map
self.preparer = self.preparer._with_schema_translate(
schema_translate_map
)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self._annotations = statement._annotations
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
if render_schema_translate:
self.string = self.preparer._render_schema_translates(
self.string, schema_translate_map
)
self._gen_time = perf_counter()
def _execute_on_connection(
self, connection, distilled_params, execution_options
):
if self.can_execute:
return connection._execute_compiled(
self, distilled_params, execution_options
)
else:
raise exc.ObjectNotExecutableError(self.statement)
def visit_unsupported_compilation(self, element, err):
raise exc.UnsupportedCompilationError(self, type(element)) from err
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ""
def construct_params(self, params=None, extracted_parameters=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
class TypeCompiler(util.EnsureKWArg):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r"visit_\w+"
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
if (
type_._variant_mapping
and self.dialect.name in type_._variant_mapping
):
type_ = type_._variant_mapping[self.dialect.name]
return type_._compiler_dispatch(self, **kw)
def visit_unsupported_compilation(self, element, err, **kw):
raise exc.UnsupportedCompilationError(self, element) from err
# this was a Visitable, but to allow accurate detection of
# column elements this is actually a column element
class _CompileLabel(elements.CompilerColumnElement):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = "label"
__slots__ = "element", "name", "_alt_names"
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`_expression.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextualSelect.
"""
_loose_column_name_matching = False
"""tell the result object that the SQL statement is textual, wants to match
up to Column objects, and may be using the ._tq_label in the SELECT rather
than the base name.
"""
_numeric_binds = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
_render_postcompile = False
"""
whether to render out POSTCOMPILE params during the compile phase.
"""
insert_single_values_expr = None
"""When an INSERT is compiled with a single set of parameters inside
a VALUES expression, the string is assigned here, where it can be
used for insert batching schemes to rewrite the VALUES expression.
.. versionadded:: 1.3.8
"""
literal_execute_params = frozenset()
"""bindparameter objects that are rendered as literal values at statement
execution time.
"""
post_compile_params = frozenset()
"""bindparameter objects that are rendered as bound parameter placeholders
at statement execution time.
"""
escaped_bind_names = util.EMPTY_DICT
"""Late escaping of bound parameter names that has to be converted
to the original name when looking in the parameter dictionary.
"""
has_out_parameters = False
"""if True, there are bindparam() objects that have the isoutparam
flag set."""
insert_prefetch = update_prefetch = ()
postfetch_lastrowid = False
"""if True, and this in insert, use cursor.lastrowid to populate
result.inserted_primary_key. """
_cache_key_bind_match = None
"""a mapping that will relate the BindParameter object we compile
to those that are part of the extracted collection of parameters
in the cache key, if we were given a cache key.
"""
positiontup = None
"""for a compiled construct that uses a positional paramstyle, will be
a sequence of strings, indicating the names of bound parameters in order.
This is used in order to render bound parameters in their correct order,
and is combined with the :attr:`_sql.Compiled.params` dictionary to
render parameters.
.. seealso::
:ref:`faq_sql_expression_string` - includes a usage example for
debugging use cases.
"""
inline = False
def __init__(
self,
dialect,
statement,
cache_key=None,
column_keys=None,
for_executemany=False,
linting=NO_LINTING,
**kwargs,
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`_expression.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param for_executemany: whether INSERT / UPDATE statements should
expect that they are to be invoked in an "executemany" style,
which may impact how the statement will be expected to return the
values of defaults and autoincrement / sequences and similar.
Depending on the backend and driver in use, support for retrieving
these values may be disabled which means SQL expressions may
be rendered inline, RETURNING may not be rendered, etc.
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
self.cache_key = cache_key
if cache_key:
self._cache_key_bind_match = ckbm = {
b.key: b for b in cache_key[1]
}
ckbm.update({b: [b] for b in cache_key[1]})
# compile INSERT/UPDATE defaults/sequences to expect executemany
# style execution, which may mean no pre-execute of defaults,
# or no RETURNING
self.for_executemany = for_executemany
self.linting = linting
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. CursorResult uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = prefix_anon_map()
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.isinsert or self.isupdate or self.isdelete:
if statement._returning:
self.returning = statement._returning
if self.isinsert or self.isupdate:
if statement._inline:
self.inline = True
elif self.for_executemany and (
not self.isinsert
or (
self.dialect.insert_executemany_returning
and statement._return_defaults
)
):
self.inline = True
if self.positional and self._numeric_binds:
self._apply_numbered_params()
if self._render_postcompile:
self._process_parameters_for_postcompile(_populate_self=True)
@property
def current_executable(self):
"""Return the current 'executable' that is being compiled.
This is currently the :class:`_sql.Select`, :class:`_sql.Insert`,
:class:`_sql.Update`, :class:`_sql.Delete`,
:class:`_sql.CompoundSelect` object that is being compiled.
Specifically it's assigned to the ``self.stack`` list of elements.
When a statement like the above is being compiled, it normally
is also assigned to the ``.statement`` attribute of the
:class:`_sql.Compiler` object. However, all SQL constructs are
ultimately nestable, and this attribute should never be consulted
by a ``visit_`` method, as it is not guaranteed to be assigned
nor guaranteed to correspond to the current statement being compiled.
.. versionadded:: 1.3.21
For compatibility with previous versions, use the following
recipe::
statement = getattr(self, "current_executable", False)
if statement is False:
statement = self.stack[-1]["selectable"]
For versions 1.4 and above, ensure only .current_executable
is used; the format of "self.stack" may change.
"""
try:
return self.stack[-1]["selectable"]
except IndexError as ie:
raise IndexError("Compiler does not have a stack entry") from ie
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_property
def _global_attributes(self):
return {}
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
# To store the query to print - Dict[cte, text_query]
self.ctes = util.OrderedDict()
# Detect same CTE references - Dict[(level, name), cte]
# Level is required for supporting nesting
self.ctes_by_level_name = {}
# To retrieve key/level in ctes_by_level_name -
# Dict[cte_reference, (level, cte_name)]
self.level_name_by_cte = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r"\[_POSITION\]", lambda m: str(next(poscount)), self.string
)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value)
for key, value in (
(
self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect)
if not bindparam.type._is_tuple_type
else tuple(
elem_type._cached_bind_processor(self.dialect)
for elem_type in bindparam.type.types
),
)
for bindparam in self.bind_names
)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(
self,
params=None,
_group_number=None,
_check=True,
extracted_parameters=None,
):
"""return a dictionary of bind parameter keys and values"""
has_escaped_names = bool(self.escaped_bind_names)
if extracted_parameters:
# related the bound parameters collected in the original cache key
# to those collected in the incoming cache key. They will not have
# matching names but they will line up positionally in the same
# way. The parameters present in self.bind_names may be clones of
# these original cache key params in the case of DML but the .key
# will be guaranteed to match.
try:
orig_extracted = self.cache_key[1]
except TypeError as err:
raise exc.CompileError(
"This compiled object has no original cache key; "
"can't pass extracted_parameters to construct_params"
) from err
ckbm = self._cache_key_bind_match
resolved_extracted = {
bind: extracted
for b, extracted in zip(orig_extracted, extracted_parameters)
for bind in ckbm[b]
}
else:
resolved_extracted = None
if params:
pd = {}
for bindparam, name in self.bind_names.items():
escaped_name = (
self.escaped_bind_names.get(name, name)
if has_escaped_names
else name
)
if bindparam.key in params:
pd[escaped_name] = params[bindparam.key]
elif name in params:
pd[escaped_name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
else:
if resolved_extracted:
value_param = resolved_extracted.get(
bindparam, bindparam
)
else:
value_param = bindparam
if bindparam.callable:
pd[escaped_name] = value_param.effective_value
else:
pd[escaped_name] = value_param.value
return pd
else:
pd = {}
for bindparam, name in self.bind_names.items():
escaped_name = (
self.escaped_bind_names.get(name, name)
if has_escaped_names
else name
)
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if resolved_extracted:
value_param = resolved_extracted.get(bindparam, bindparam)
else:
value_param = bindparam
if bindparam.callable:
pd[escaped_name] = value_param.effective_value
else:
pd[escaped_name] = value_param.value
return pd
@util.memoized_instancemethod
def _get_set_input_sizes_lookup(self):
dialect = self.dialect
include_types = dialect.include_set_input_sizes
exclude_types = dialect.exclude_set_input_sizes
dbapi = dialect.dbapi
def lookup_type(typ):
dbtype = typ._unwrapped_dialect_impl(dialect).get_dbapi_type(dbapi)
if (
dbtype is not None
and (exclude_types is None or dbtype not in exclude_types)
and (include_types is None or dbtype in include_types)
):
return dbtype
else:
return None
inputsizes = {}
literal_execute_params = self.literal_execute_params
for bindparam in self.bind_names:
if bindparam in literal_execute_params:
continue
if bindparam.type._is_tuple_type:
inputsizes[bindparam] = [
lookup_type(typ) for typ in bindparam.type.types
]
else:
inputsizes[bindparam] = lookup_type(bindparam.type)
return inputsizes
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present.
.. seealso::
:ref:`faq_sql_expression_string` - includes a usage example for
debugging use cases.
"""
return self.construct_params(_check=False)
def _process_parameters_for_postcompile(
self, parameters=None, _populate_self=False
):
"""handle special post compile parameters.
These include:
* "expanding" parameters -typically IN tuples that are rendered
on a per-parameter basis for an otherwise fixed SQL statement string.
* literal_binds compiled with the literal_execute flag. Used for
things like SQL Server "TOP N" where the driver does not accommodate
N as a bound parameter.
"""
if parameters is None:
parameters = self.construct_params()
expanded_parameters = {}
if self.positional:
positiontup = []
else:
positiontup = None
processors = self._bind_processors
new_processors = {}
if self.positional and self._numeric_binds:
# I'm not familiar with any DBAPI that uses 'numeric'.
# strategy would likely be to make use of numbers greater than
# the highest number present; then for expanding parameters,
# append them to the end of the parameter list. that way
# we avoid having to renumber all the existing parameters.
raise NotImplementedError(
"'post-compile' bind parameters are not supported with "
"the 'numeric' paramstyle at this time."
)
replacement_expressions = {}
to_update_sets = {}
# notes:
# *unescaped* parameter names in:
# self.bind_names, self.binds, self._bind_processors
#
# *escaped* parameter names in:
# construct_params(), replacement_expressions
for name in (
self.positiontup if self.positional else self.bind_names.values()
):
escaped_name = (
self.escaped_bind_names.get(name, name)
if self.escaped_bind_names
else name
)
parameter = self.binds[name]
if parameter in self.literal_execute_params:
if escaped_name not in replacement_expressions:
value = parameters.pop(escaped_name)
replacement_expressions[
escaped_name
] = self.render_literal_bindparam(
parameter, render_literal_value=value
)
continue
if parameter in self.post_compile_params:
if escaped_name in replacement_expressions:
to_update = to_update_sets[escaped_name]
else:
# we are removing the parameter from parameters
# because it is a list value, which is not expected by
# TypeEngine objects that would otherwise be asked to
# process it. the single name is being replaced with
# individual numbered parameters for each value in the
# param.
values = parameters.pop(escaped_name)
leep = self._literal_execute_expanding_parameter
to_update, replacement_expr = leep(
escaped_name, parameter, values
)
to_update_sets[escaped_name] = to_update
replacement_expressions[escaped_name] = replacement_expr
if not parameter.literal_execute:
parameters.update(to_update)
if parameter.type._is_tuple_type:
new_processors.update(
(
"%s_%s_%s" % (name, i, j),
processors[name][j - 1],
)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
if name in processors
and processors[name][j - 1] is not None
)
else:
new_processors.update(
(key, processors[name])
for key, value in to_update
if name in processors
)
if self.positional:
positiontup.extend(name for name, value in to_update)
expanded_parameters[name] = [
expand_key for expand_key, value in to_update
]
elif self.positional:
positiontup.append(name)
def process_expanding(m):
key = m.group(1)
expr = replacement_expressions[key]
# if POSTCOMPILE included a bind_expression, render that
# around each element
if m.group(2):
tok = m.group(2).split("~~")
be_left, be_right = tok[1], tok[3]
expr = ", ".join(
"%s%s%s" % (be_left, exp, be_right)
for exp in expr.split(", ")
)
return expr
statement = re.sub(
r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]",
process_expanding,
self.string,
)
expanded_state = ExpandedState(
statement,
parameters,
new_processors,
positiontup,
expanded_parameters,
)
if _populate_self:
# this is for the "render_postcompile" flag, which is not
# otherwise used internally and is for end-user debugging and
# special use cases.
self.string = expanded_state.statement
self._bind_processors.update(expanded_state.processors)
self.positiontup = expanded_state.positiontup
self.post_compile_params = frozenset()
for key in expanded_state.parameter_expansion:
bind = self.binds.pop(key)
self.bind_names.pop(bind)
for value, expanded_key in zip(
bind.value, expanded_state.parameter_expansion[key]
):
self.binds[expanded_key] = new_param = bind._with_value(
value
)
self.bind_names[new_param] = expanded_key
return expanded_state
@util.preload_module("sqlalchemy.engine.cursor")
def _create_result_map(self):
"""utility method used for unit tests only."""
cursor = util.preloaded.engine_cursor
return cursor.CursorResultMetaData._create_description_match_map(
self._result_columns
)
@util.memoized_property
def _within_exec_param_key_getter(self):
getter = self._key_getters_for_crud_column[2]
if self.escaped_bind_names:
def _get(obj):
key = getter(obj)
return self.escaped_bind_names.get(key, key)
return _get
else:
return getter
@util.memoized_property
@util.preload_module("sqlalchemy.engine.result")
def _inserted_primary_key_from_lastrowid_getter(self):
result = util.preloaded.engine_result
param_key_getter = self._within_exec_param_key_getter
table = self.statement.table
getters = [
(operator.methodcaller("get", param_key_getter(col), None), col)
for col in table.primary_key
]
autoinc_col = table._autoincrement_column
if autoinc_col is not None:
# apply type post processors to the lastrowid
proc = autoinc_col.type._cached_result_processor(
self.dialect, None
)
else:
proc = None
row_fn = result.result_tuple([col.key for col in table.primary_key])
def get(lastrowid, parameters):
"""given cursor.lastrowid value and the parameters used for INSERT,
return a "row" that represents the primary key, either by
using the "lastrowid" or by extracting values from the parameters
that were sent along with the INSERT.
"""
if proc is not None:
lastrowid = proc(lastrowid)
if lastrowid is None:
return row_fn(getter(parameters) for getter, col in getters)
else:
return row_fn(
lastrowid if col is autoinc_col else getter(parameters)
for getter, col in getters
)
return get
@util.memoized_property
@util.preload_module("sqlalchemy.engine.result")
def _inserted_primary_key_from_returning_getter(self):
result = util.preloaded.engine_result
param_key_getter = self._within_exec_param_key_getter
table = self.statement.table
ret = {col: idx for idx, col in enumerate(self.returning)}
getters = [
(operator.itemgetter(ret[col]), True)
if col in ret
else (
operator.methodcaller("get", param_key_getter(col), None),
False,
)
for col in table.primary_key
]
row_fn = result.result_tuple([col.key for col in table.primary_key])
def get(row, parameters):
return row_fn(
getter(row) if use_row else getter(parameters)
for getter, use_row in getters
)
return get
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_select_statement_grouping(self, grouping, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
compile_state = self.stack[-1]["compile_state"]
(
with_cols,
only_froms,
only_cols,
) = compile_state._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs[
"render_label_as_label"
] = element.element._order_by_label_element
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs,
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
compile_state = self.stack[-1]["compile_state"]
with_cols, only_froms, only_cols = compile_state._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError as err:
coercions._no_text_coercion(
element.element,
extra=(
"Can't resolve label reference for ORDER BY / "
"GROUP BY / DISTINCT etc."
),
exc_cls=exc.CompileError,
err=err,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
result_map_targets=(),
**kw,
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names + result_map_targets,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw,
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until " "its 'name' is assigned."
)
def visit_lambda_element(self, element, **kw):
sql_element = element._resolved
return self.process(sql_element, **kw)
def visit_column(
self,
column,
add_to_result_map=None,
include_table=True,
result_map_targets=(),
ambiguous_table_name_map=None,
**kwargs,
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
targets = (column, name, column.key) + result_map_targets
if column._tq_label:
targets += (column._tq_label,)
add_to_result_map(name, orig_name, targets, column.type)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
tablename = table.name
if (
not effective_schema
and ambiguous_table_name_map
and tablename in ambiguous_table_name_map
):
tablename = ambiguous_table_name_map[tablename]
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
kw["identifier_preparer"] = self.preparer
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, add_to_result_map=None, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
if add_to_result_map:
# text() object is present in the columns clause of a
# select(). Add a no-name entry to the result map so that
# row[text()] produces a result
add_to_result_map(None, None, (textclause,), sqltypes.NULLTYPE)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_textual_select(
self, taf, compound_index=None, asfrom=False, **kw
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = (
self._textual_ordered_columns
) = taf.positional
# enable looser result column matching when the SQL text links to
# Column objects by name only
self._loose_column_name_matching = not taf.positional and bool(
taf.column_args
)
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
return self.process(taf.element, **kw)
def visit_null(self, expr, **kw):
return "NULL"
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "true"
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "false"
else:
return "0"
def _generate_delimited_list(self, elements, separator, **kw):
return separator.join(
s
for s in (c._compiler_dispatch(self, **kw) for c in elements)
if s
)
def _generate_delimited_and_list(self, clauses, **kw):
lcc, clauses = elements.BooleanClauseList._process_clauses_for_boolean(
operators.and_,
elements.True_._singleton,
elements.False_._singleton,
clauses,
)
if lcc == 1:
return clauses[0]._compiler_dispatch(self, **kw)
else:
separator = OPERATORS[operators.and_]
return separator.join(
s
for s in (c._compiler_dispatch(self, **kw) for c in clauses)
if s
)
def visit_tuple(self, clauselist, **kw):
return "(%s)" % self.visit_clauselist(clauselist, **kw)
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return self._generate_delimited_list(clauselist.clauses, sep, **kw)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += (
"WHEN "
+ cond._compiler_dispatch(self, **kwargs)
+ " THEN "
+ result._compiler_dispatch(self, **kwargs)
+ " "
)
if clause.else_ is not None:
x += (
"ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " "
)
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % (
cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs),
)
def _format_frame_clause(self, range_, **kw):
return "%s AND %s" % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[0])), **kw),)
if range_[0] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[0]), **kw),),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[1] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[1])), **kw),)
if range_[1] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[1]), **kw),),
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs
)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs
)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
" ".join(
[
"%s BY %s"
% (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
("PARTITION", over.partition_by),
("ORDER", over.order_by),
)
if clause is not None and len(clause)
]
+ ([range_] if range_ else [])
),
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs),
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs),
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field,
extract.expr._compiler_dispatch(self, **kwargs),
)
def visit_scalar_function_column(self, element, **kw):
compiled_fn = self.visit_function(element.fn, **kw)
compiled_col = self.visit_column(element, **kw)
return "(%s).%s" % (compiled_fn, compiled_col)
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
text = disp(func, **kwargs)
else:
name = FUNCTIONS.get(func._deannotate().__class__, None)
if name:
if func._has_args:
name += "%(expr)s"
else:
name = func.name
name = (
self.preparer.quote(name)
if self.preparer._requires_quotes_illegal_chars(name)
or isinstance(name, elements.quoted_name)
else name
)
name = name + "%(expr)s"
text = ".".join(
[
(
self.preparer.quote(tok)
if self.preparer._requires_quotes_illegal_chars(tok)
or isinstance(name, elements.quoted_name)
else tok
)
for tok in func.packagenames
]
+ [name]
) % {"expr": self.function_argspec(func, **kwargs)}
if func._with_ordinality:
text += " WITH ORDINALITY"
return text
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence, **kw):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments."
% self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(
self, cs, asfrom=False, compound_index=None, **kwargs
):
toplevel = not self.stack
compile_state = cs._compile_state_factory(cs, self, **kwargs)
if toplevel and not self.compile_state:
self.compile_state = compile_state
compound_stmt = compile_state.statement
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or (
not compound_index
and entry.get("need_result_map_for_compound", False)
)
# indicates there is already a CompoundSelect in play
if compound_index == 0:
entry["select_0"] = cs
self.stack.append(
{
"correlate_froms": entry["correlate_froms"],
"asfrom_froms": entry["asfrom_froms"],
"selectable": cs,
"compile_state": compile_state,
"need_result_map_for_compound": need_result_map,
}
)
if compound_stmt._independent_ctes:
for cte in compound_stmt._independent_ctes:
cte._compiler_dispatch(self, **kwargs)
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(
c._compiler_dispatch(
self, asfrom=asfrom, compound_index=i, **kwargs
)
for i, c in enumerate(cs.selects)
)
)
kwargs["include_table"] = False
text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs))
text += self.order_by_clause(cs, **kwargs)
if cs._has_row_limiting_clause:
text += self._row_limit_clause(cs, **kwargs)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = (
self._render_cte_clause(
nesting_level=nesting_level, include_following_stack=True
)
+ text
)
self.stack.pop(-1)
return text
def _row_limit_clause(self, cs, **kwargs):
if cs._fetch_clause is not None:
return self.fetch_clause(cs, **kwargs)
else:
return self.limit_clause(cs, **kwargs)
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__,
qualifier1,
"_" + qualifier2 if qualifier2 else "",
)
return getattr(self, attrname, None)
def visit_unary(
self, unary, add_to_result_map=None, result_map_targets=(), **kw
):
if add_to_result_map is not None:
result_map_targets += (unary,)
kw["add_to_result_map"] = add_to_result_map
kw["result_map_targets"] = result_map_targets
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously"
)
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator"
)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw
)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier"
)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw
)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier"
)
def visit_truediv_binary(self, binary, operator, **kw):
if self.dialect.div_is_floordiv:
return (
self.process(binary.left, **kw)
+ " / "
# TODO: would need a fast cast again here,
# unless we want to use an implicit cast like "+ 0.0"
+ self.process(
elements.Cast(
binary.right,
binary.right.type
if binary.right.type._type_affinity is sqltypes.Numeric
else sqltypes.Numeric(),
),
**kw,
)
)
else:
return (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
def visit_floordiv_binary(self, binary, operator, **kw):
if (
self.dialect.div_is_floordiv
and binary.right.type._type_affinity is sqltypes.Integer
):
return (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
else:
return "FLOOR(%s)" % (
self.process(binary.left, **kw)
+ " / "
+ self.process(binary.right, **kw)
)
def visit_is_true_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_is_false_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_not_match_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op
)
def visit_not_in_op_binary(self, binary, operator, **kw):
# The brackets are required in the NOT IN operation because the empty
# case is handled using the form "(col NOT IN (null) OR 1 = 1)".
# The presence of the OR makes the brackets required.
return "(%s)" % self._generate_generic_binary(
binary, OPERATORS[operator], **kw
)
def visit_empty_set_op_expr(self, type_, expand_op):
if expand_op is operators.not_in_op:
if len(type_) > 1:
return "(%s)) OR (1 = 1" % (
", ".join("NULL" for element in type_)
)
else:
return "NULL) OR (1 = 1"
elif expand_op is operators.in_op:
if len(type_) > 1:
return "(%s)) AND (1 != 1" % (
", ".join("NULL" for element in type_)
)
else:
return "NULL) AND (1 != 1"
else:
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types):
raise NotImplementedError(
"Dialect '%s' does not support empty set expression."
% self.dialect.name
)
def _literal_execute_expanding_parameter_literal_binds(
self, parameter, values
):
typ_dialect_impl = parameter.type._unwrapped_dialect_impl(self.dialect)
if not values:
if typ_dialect_impl._is_tuple_type:
replacement_expression = (
"VALUES " if self.dialect.tuple_in_values else ""
) + self.visit_empty_set_op_expr(
parameter.type.types, parameter.expand_op
)
else:
replacement_expression = self.visit_empty_set_op_expr(
[parameter.type], parameter.expand_op
)
elif typ_dialect_impl._is_tuple_type or (
typ_dialect_impl._isnull
and isinstance(values[0], collections_abc.Sequence)
and not isinstance(values[0], (str, bytes))
):
replacement_expression = (
"VALUES " if self.dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% (
", ".join(
self.render_literal_value(value, param_type)
for value, param_type in zip(
tuple_element, parameter.type.types
)
)
)
for i, tuple_element in enumerate(values)
)
else:
replacement_expression = ", ".join(
self.render_literal_value(value, parameter.type)
for value in values
)
return (), replacement_expression
def _literal_execute_expanding_parameter(self, name, parameter, values):
if parameter.literal_execute:
return self._literal_execute_expanding_parameter_literal_binds(
parameter, values
)
dialect = self.dialect
typ_dialect_impl = parameter.type._unwrapped_dialect_impl(dialect)
if (
self.dialect._bind_typing_render_casts
and typ_dialect_impl.render_bind_cast
):
def _render_bindtemplate(name):
return self.render_bind_cast(
parameter.type,
typ_dialect_impl,
self.bindtemplate % {"name": name},
)
else:
def _render_bindtemplate(name):
return self.bindtemplate % {"name": name}
if not values:
to_update = []
if typ_dialect_impl._is_tuple_type:
replacement_expression = self.visit_empty_set_op_expr(
parameter.type.types, parameter.expand_op
)
else:
replacement_expression = self.visit_empty_set_op_expr(
[parameter.type], parameter.expand_op
)
elif typ_dialect_impl._is_tuple_type or (
typ_dialect_impl._isnull
and isinstance(values[0], collections_abc.Sequence)
and not isinstance(values[0], (str, bytes))
):
assert not typ_dialect_impl._is_array
to_update = [
("%s_%s_%s" % (name, i, j), value)
for i, tuple_element in enumerate(values, 1)
for j, value in enumerate(tuple_element, 1)
]
replacement_expression = (
"VALUES " if dialect.tuple_in_values else ""
) + ", ".join(
"(%s)"
% (
", ".join(
_render_bindtemplate(
to_update[i * len(tuple_element) + j][0]
)
for j, value in enumerate(tuple_element)
)
)
for i, tuple_element in enumerate(values)
)
else:
to_update = [
("%s_%s" % (name, i), value)
for i, value in enumerate(values, 1)
]
replacement_expression = ", ".join(
_render_bindtemplate(key) for key, value in to_update
)
return to_update, replacement_expression
def visit_binary(
self,
binary,
override_operator=None,
eager_grouping=False,
from_linter=None,
lateral_from_linter=None,
**kw,
):
if from_linter and operators.is_comparison(binary.operator):
if lateral_from_linter is not None:
enclosing_lateral = kw["enclosing_lateral"]
lateral_from_linter.edges.update(
itertools.product(
binary.left._from_objects + [enclosing_lateral],
binary.right._from_objects + [enclosing_lateral],
)
)
else:
from_linter.edges.update(
itertools.product(
binary.left._from_objects, binary.right._from_objects
)
)
# don't allow "? = ?" to render
if (
self.ansi_bind_rules
and isinstance(binary.left, elements.BindParameter)
and isinstance(binary.right, elements.BindParameter)
):
kw["literal_execute"] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError as err:
raise exc.UnsupportedCompilationError(self, operator_) from err
else:
return self._generate_generic_binary(
binary,
opstring,
from_linter=from_linter,
lateral_from_linter=lateral_from_linter,
**kw,
)
def visit_function_as_comparison_op_binary(self, element, operator, **kw):
return self.process(element.sql_function, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def visit_custom_op_binary(self, element, operator, **kw):
kw["eager_grouping"] = operator.eager_grouping
return self._generate_generic_binary(
element,
" " + self.escape_literal_column(operator.opstring) + " ",
**kw,
)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, self.escape_literal_column(operator.opstring) + " ", **kw
)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + self.escape_literal_column(operator.opstring), **kw
)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw
):
_in_binary = kw.get("_in_binary", False)
kw["_in_binary"] = True
kw["_binary_op"] = binary.operator
text = (
binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
+ opstring
+ binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_not_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_not_like_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return "%s LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_not_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_not_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) NOT LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw
)
def visit_not_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary,
" NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ",
**kw,
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
raise exc.CompileError(
"%s dialect does not support regular expressions"
% self.dialect.name
)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
raise exc.CompileError(
"%s dialect does not support regular expressions"
% self.dialect.name
)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
raise exc.CompileError(
"%s dialect does not support regular expression replacements"
% self.dialect.name
)
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
literal_execute=False,
render_postcompile=False,
**kwargs,
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
bind_expression = impl.bind_expression(bindparam)
wrapped = self.process(
bind_expression,
skip_bind_expression=True,
within_columns_clause=within_columns_clause,
literal_binds=literal_binds,
literal_execute=literal_execute,
render_postcompile=render_postcompile,
**kwargs,
)
if bindparam.expanding:
# for postcompile w/ expanding, move the "wrapped" part
# of this into the inside
m = re.match(
r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped
)
assert m, "unexpected format for expanding parameter"
wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % (
m.group(2),
m.group(1),
m.group(3),
)
return wrapped
if not literal_binds:
literal_execute = (
literal_execute
or bindparam.literal_execute
or (within_columns_clause and self.ansi_bind_rules)
)
post_compile = literal_execute or bindparam.expanding
else:
post_compile = False
if literal_binds:
ret = self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs
)
if bindparam.expanding:
ret = "(%s)" % ret
return ret
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (
(existing.unique or bindparam.unique)
and not existing.proxy_set.intersection(
bindparam.proxy_set
)
and not existing._cloned_set.intersection(
bindparam._cloned_set
)
):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" % name
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
# if we are given a cache key that we're going to match against,
# relate the bindparam here to one that is most likely present
# in the "extracted params" portion of the cache key. this is used
# to set up a positional mapping that is used to determine the
# correct parameters for a subsequent use of this compiled with
# a different set of parameter values. here, we accommodate for
# parameters that may have been cloned both before and after the cache
# key was been generated.
ckbm = self._cache_key_bind_match
if ckbm:
for bp in bindparam._cloned_set:
if bp.key in ckbm:
cb = ckbm[bp.key]
ckbm[cb].append(bindparam)
if bindparam.isoutparam:
self.has_out_parameters = True
if post_compile:
if render_postcompile:
self._render_postcompile = True
if literal_execute:
self.literal_execute_params |= {bindparam}
else:
self.post_compile_params |= {bindparam}
ret = self.bindparam_string(
name,
post_compile=post_compile,
expanding=bindparam.expanding,
bindparam_type=bindparam.type,
**kwargs,
)
if bindparam.expanding:
ret = "(%s)" % ret
return ret
def render_bind_cast(self, type_, dbapi_type, sqltext):
raise NotImplementedError()
def render_literal_bindparam(
self, bindparam, render_literal_value=NO_ARG, **kw
):
if render_literal_value is not NO_ARG:
value = render_literal_value
else:
if bindparam.value is None and bindparam.callable is None:
op = kw.get("_binary_op", None)
if op and op not in (operators.is_, operators.is_not):
util.warn_limited(
"Bound parameter '%s' rendering literal NULL in a SQL "
"expression; comparisons to NULL should not use "
"operators outside of 'is' or 'is not'",
(bindparam.key,),
)
return self.process(sqltypes.NULLTYPE, **kw)
value = bindparam.effective_value
if bindparam.expanding:
leep = self._literal_execute_expanding_parameter_literal_binds
to_update, replacement_expr = leep(bindparam, value)
return replacement_expr
else:
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value
)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = (
anonname[0 : max(self.label_length - 6, 0)]
+ "_"
+ hex(counter)[2:]
)
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def bindparam_string(
self,
name,
positional_names=None,
post_compile=False,
expanding=False,
escaped_from=None,
bindparam_type=None,
**kw,
):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
elif not escaped_from:
if _BIND_TRANSLATE_RE.search(name):
# not quite the translate use case as we want to
# also get a quick boolean if we even found
# unusual characters in the name
new_name = _BIND_TRANSLATE_RE.sub(
lambda m: _BIND_TRANSLATE_CHARS[m.group(0)],
name,
)
escaped_from = name
name = new_name
if escaped_from:
if not self.escaped_bind_names:
self.escaped_bind_names = {}
self.escaped_bind_names[escaped_from] = name
if post_compile:
return "__[POSTCOMPILE_%s]" % name
ret = self.bindtemplate % {"name": name}
if (
bindparam_type is not None
and self.dialect._bind_typing_render_casts
):
type_impl = bindparam_type._unwrapped_dialect_impl(self.dialect)
if type_impl.render_bind_cast:
ret = self.render_bind_cast(bindparam_type, type_impl, ret)
return ret
def visit_cte(
self,
cte,
asfrom=False,
ashint=False,
fromhints=None,
visiting_cte=None,
from_linter=None,
**kwargs,
):
self._init_cte_state()
kwargs["visiting_cte"] = cte
cte_name = cte.name
if isinstance(cte_name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte_name)
is_new_cte = True
embedded_in_current_named_cte = False
_reference_cte = cte._get_reference_cte()
if _reference_cte in self.level_name_by_cte:
cte_level, _ = self.level_name_by_cte[_reference_cte]
assert _ == cte_name
else:
cte_level = len(self.stack) if cte.nesting else 1
cte_level_name = (cte_level, cte_name)
if cte_level_name in self.ctes_by_level_name:
existing_cte = self.ctes_by_level_name[cte_level_name]
embedded_in_current_named_cte = visiting_cte is existing_cte
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte is existing_cte._restates or cte is existing_cte:
is_new_cte = False
elif existing_cte is cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
existing_cte_reference_cte = existing_cte._get_reference_cte()
# TODO: determine if these assertions are correct. they
# pass for current test cases
# assert existing_cte_reference_cte is _reference_cte
# assert existing_cte_reference_cte is existing_cte
del self.level_name_by_cte[existing_cte_reference_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" % cte_name
)
if not asfrom and not is_new_cte:
return None
if cte._cte_alias is not None:
pre_alias_cte = cte._cte_alias
cte_pre_alias_name = cte._cte_alias.name
if isinstance(cte_pre_alias_name, elements._truncated_label):
cte_pre_alias_name = self._truncated_identifier(
"alias", cte_pre_alias_name
)
else:
pre_alias_cte = cte
cte_pre_alias_name = None
if is_new_cte:
self.ctes_by_level_name[cte_level_name] = cte
self.level_name_by_cte[_reference_cte] = cte_level_name
if (
"autocommit" in cte.element._execution_options
and "autocommit" not in self.execution_options
):
self.execution_options = self.execution_options.union(
{
"autocommit": cte.element._execution_options[
"autocommit"
]
}
)
if pre_alias_cte not in self.ctes:
self.visit_cte(pre_alias_cte, **kwargs)
if not cte_pre_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.element, selectable.Select):
col_source = cte.element
elif isinstance(cte.element, selectable.CompoundSelect):
col_source = cte.element.selects[0]
else:
assert False, "cte should only be against SelectBase"
# TODO: can we get at the .columns_plus_names collection
# that is already (or will be?) generated for the SELECT
# rather than calling twice?
recur_cols = [
# TODO: proxy_name is not technically safe,
# see test_cte->
# test_with_recursive_no_name_currently_buggy. not
# clear what should be done with such a case
fallback_label_name or proxy_name
for (
_,
proxy_name,
fallback_label_name,
c,
repeated,
) in (col_source._generate_columns_plus_names(True))
if not repeated
]
text += "(%s)" % (
", ".join(
self.preparer.format_label_name(
ident, anon_map=self.anon_map
)
for ident in recur_cols
)
)
if self.positional:
kwargs["positional_names"] = self.cte_positional[cte] = []
assert kwargs.get("subquery", False) is False
if not self.stack:
# toplevel, this is a stringify of the
# cte directly. just compile the inner
# the way alias() does.
return cte.element._compiler_dispatch(
self, asfrom=asfrom, **kwargs
)
else:
prefixes = self._generate_prefixes(
cte, cte._prefixes, **kwargs
)
inner = cte.element._compiler_dispatch(
self, asfrom=True, **kwargs
)
text += " AS %s\n(%s)" % (prefixes, inner)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs
)
self.ctes[cte] = text
if asfrom:
if from_linter:
from_linter.froms[cte] = cte_name
if not is_new_cte and embedded_in_current_named_cte:
return self.preparer.format_alias(cte, cte_name)
if cte_pre_alias_name:
text = self.preparer.format_alias(cte, cte_pre_alias_name)
if self.preparer._requires_quotes(cte_name):
cte_name = self.preparer.quote(cte_name)
text += self.get_render_as_alias_suffix(cte_name)
return text
else:
return self.preparer.format_alias(cte, cte_name)
def visit_table_valued_alias(self, element, **kw):
if element._is_lateral:
return self.visit_lateral(element, **kw)
else:
return self.visit_alias(element, **kw)
def visit_table_valued_column(self, element, **kw):
return self.visit_column(element, **kw)
def visit_alias(
self,
alias,
asfrom=False,
ashint=False,
iscrud=False,
fromhints=None,
subquery=False,
lateral=False,
enclosing_alias=None,
from_linter=None,
**kwargs,
):
if lateral:
if "enclosing_lateral" not in kwargs:
# if lateral is set and enclosing_lateral is not
# present, we assume we are being called directly
# from visit_lateral() and we need to set enclosing_lateral.
assert alias._is_lateral
kwargs["enclosing_lateral"] = alias
# for lateral objects, we track a second from_linter that is...
# lateral! to the level above us.
if (
from_linter
and "lateral_from_linter" not in kwargs
and "enclosing_lateral" in kwargs
):
kwargs["lateral_from_linter"] = from_linter
if enclosing_alias is not None and enclosing_alias.element is alias:
inner = alias.element._compiler_dispatch(
self,
asfrom=asfrom,
ashint=ashint,
iscrud=iscrud,
fromhints=fromhints,
lateral=lateral,
enclosing_alias=alias,
**kwargs,
)
if subquery and (asfrom or lateral):
inner = "(%s)" % (inner,)
return inner
else:
enclosing_alias = kwargs["enclosing_alias"] = alias
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
if from_linter:
from_linter.froms[alias] = alias_name
inner = alias.element._compiler_dispatch(
self, asfrom=True, lateral=lateral, **kwargs
)
if subquery:
inner = "(%s)" % (inner,)
ret = inner + self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name)
)
if alias._supports_derived_columns and alias._render_derived:
ret += "(%s)" % (
", ".join(
"%s%s"
% (
self.preparer.quote(col.name),
" %s"
% self.dialect.type_compiler.process(
col.type, **kwargs
)
if alias._render_derived_w_types
else "",
)
for col in alias.c
)
)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(
ret, alias, fromhints[alias], iscrud
)
return ret
else:
# note we cancel the "subquery" flag here as well
return alias.element._compiler_dispatch(
self, lateral=lateral, **kwargs
)
def visit_subquery(self, subquery, **kw):
kw["subquery"] = True
return self.visit_alias(subquery, **kw)
def visit_lateral(self, lateral_, **kw):
kw["lateral"] = True
return "LATERAL %s" % self.visit_alias(lateral_, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw),
)
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw)
)
return text
def visit_values(self, element, asfrom=False, from_linter=None, **kw):
kw.setdefault("literal_binds", element.literal_binds)
v = "VALUES %s" % ", ".join(
self.process(
elements.Tuple(
types=element._column_types, *elem
).self_group(),
**kw,
)
for chunk in element._data
for elem in chunk
)
if isinstance(element.name, elements._truncated_label):
name = self._truncated_identifier("values", element.name)
else:
name = element.name
if element._is_lateral:
lateral = "LATERAL "
else:
lateral = ""
if asfrom:
if from_linter:
from_linter.froms[element] = (
name if name is not None else "(unnamed VALUES element)"
)
if name:
v = "%s(%s)%s (%s)" % (
lateral,
v,
self.get_render_as_alias_suffix(self.preparer.quote(name)),
(
", ".join(
c._compiler_dispatch(
self, include_table=False, **kw
)
for c in element.columns
)
),
)
else:
v = "%s(%s)" % (lateral, v)
return v
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
if keyname is None or keyname == "*":
self._ordered_columns = False
self._textual_ordered_columns = True
if type_._is_tuple_type:
raise exc.CompileError(
"Most backends don't support SELECTing "
"from a tuple() object. If this is an ORM query, "
"consider using the Bundle object."
)
self._result_columns.append((keyname, name, objects, type_))
def _label_returning_column(self, stmt, column, column_clause_args=None):
"""Render a column with necessary labels inside of a RETURNING clause.
This method is provided for individual dialects in place of calling
the _label_select_column method directly, so that the two use cases
of RETURNING vs. SELECT can be disambiguated going forward.
.. versionadded:: 1.4.21
"""
return self._label_select_column(
None,
column,
True,
False,
{} if column_clause_args is None else column_clause_args,
)
def _label_select_column(
self,
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=None,
proxy_name=None,
fallback_label_name=None,
within_columns_clause=True,
column_is_repeated=False,
need_column_expressions=False,
):
"""produce labeled columns present in a select()."""
impl = column.type.dialect_impl(self.dialect)
if impl._has_column_expression and (
need_column_expressions or populate_result_map
):
col_expr = impl.column_expression(column)
else:
col_expr = column
if populate_result_map:
# pass an "add_to_result_map" callable into the compilation
# of embedded columns. this collects information about the
# column as it will be fetched in the result and is coordinated
# with cursor.description when the query is executed.
add_to_result_map = self._add_to_result_map
# if the SELECT statement told us this column is a repeat,
# wrap the callable with one that prevents the addition of the
# targets
if column_is_repeated:
_add_to_result_map = add_to_result_map
def add_to_result_map(keyname, name, objects, type_):
_add_to_result_map(keyname, name, (), type_)
# if we redefined col_expr for type expressions, wrap the
# callable with one that adds the original column to the targets
elif col_expr is not column:
_add_to_result_map = add_to_result_map
def add_to_result_map(keyname, name, objects, type_):
_add_to_result_map(
keyname, name, (column,) + objects, type_
)
else:
add_to_result_map = None
# this method is used by some of the dialects for RETURNING,
# which has different inputs. _label_returning_column was added
# as the better target for this now however for 1.4 we will keep
# _label_select_column directly compatible with this use case.
# these assertions right now set up the current expected inputs
assert within_columns_clause, (
"_label_select_column is only relevant within "
"the columns clause of a SELECT or RETURNING"
)
if isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr, column.name, alt_names=(column.element,)
)
else:
result_expr = col_expr
elif name:
# here, _columns_plus_names has determined there's an explicit
# label name we need to use. this is the default for
# tablenames_plus_columnnames as well as when columns are being
# deduplicated on name
assert (
proxy_name is not None
), "proxy_name is required if 'name' is passed"
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(
proxy_name,
# this is a hack to allow legacy result column lookups
# to work as they did before; this goes away in 2.0.
# TODO: this only seems to be tested indirectly
# via test/orm/test_deprecations.py. should be a
# resultset test for this
column._tq_label,
),
)
else:
# determine here whether this column should be rendered in
# a labelled context or not, as we were given no required label
# name from the caller. Here we apply heuristics based on the kind
# of SQL expression involved.
if col_expr is not column:
# type-specific expression wrapping the given column,
# so we render a label
render_with_label = True
elif isinstance(column, elements.ColumnClause):
# table-bound column, we render its name as a label if we are
# inside of a subquery only
render_with_label = (
asfrom
and not column.is_literal
and column.table is not None
)
elif isinstance(column, elements.TextClause):
render_with_label = False
elif isinstance(column, elements.UnaryExpression):
render_with_label = column.wraps_column_expression or asfrom
elif (
# general class of expressions that don't have a SQL-column
# addressible name. includes scalar selects, bind parameters,
# SQL functions, others
not isinstance(column, elements.NamedColumn)
# deeper check that indicates there's no natural "name" to
# this element, which accommodates for custom SQL constructs
# that might have a ".name" attribute (but aren't SQL
# functions) but are not implementing this more recently added
# base class. in theory the "NamedColumn" check should be
# enough, however here we seek to maintain legacy behaviors
# as well.
and column._non_anon_label is None
):
render_with_label = True
else:
render_with_label = False
if render_with_label:
if not fallback_label_name:
# used by the RETURNING case right now. we generate it
# here as 3rd party dialects may be referring to
# _label_select_column method directly instead of the
# just-added _label_returning_column method
assert not column_is_repeated
fallback_label_name = column._anon_name_label
fallback_label_name = (
elements._truncated_label(fallback_label_name)
if not isinstance(
fallback_label_name, elements._truncated_label
)
else fallback_label_name
)
result_expr = _CompileLabel(
col_expr, fallback_label_name, alt_names=(proxy_name,)
)
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map,
)
return result_expr._compiler_dispatch(self, **column_clause_args)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
_default_stack_entry = util.immutabledict(
[("correlate_froms", frozenset()), ("asfrom_froms", frozenset())]
)
def _display_froms_for_select(
self, select_stmt, asfrom, lateral=False, **kw
):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
compile_state = select_stmt._compile_state_factory(select_stmt, self)
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
return froms
translate_select_structure = None
"""if not ``None``, should be a callable which accepts ``(select_stmt,
**kw)`` and returns a select object. this is used for structural changes
mostly to accommodate for LIMIT/OFFSET schemes
"""
def visit_select(
self,
select_stmt,
asfrom=False,
insert_into=False,
fromhints=None,
compound_index=None,
select_wraps_for=None,
lateral=False,
from_linter=None,
**kwargs,
):
assert select_wraps_for is None, (
"SQLAlchemy 1.4 requires use of "
"the translate_select_structure hook for structural "
"translations of SELECT objects"
)
# initial setup of SELECT. the compile_state_factory may now
# be creating a totally different SELECT from the one that was
# passed in. for ORM use this will convert from an ORM-state
# SELECT to a regular "Core" SELECT. other composed operations
# such as computation of joins will be performed.
kwargs["within_columns_clause"] = False
compile_state = select_stmt._compile_state_factory(
select_stmt, self, **kwargs
)
kwargs[
"ambiguous_table_name_map"
] = compile_state._ambiguous_table_name_map
select_stmt = compile_state.statement
toplevel = not self.stack
if toplevel and not self.compile_state:
self.compile_state = compile_state
is_embedded_select = compound_index is not None or insert_into
# translate step for Oracle, SQL Server which often need to
# restructure the SELECT to allow for LIMIT/OFFSET and possibly
# other conditions
if self.translate_select_structure:
new_select_stmt = self.translate_select_structure(
select_stmt, asfrom=asfrom, **kwargs
)
# if SELECT was restructured, maintain a link to the originals
# and assemble a new compile state
if new_select_stmt is not select_stmt:
compile_state_wraps_for = compile_state
select_wraps_for = select_stmt
select_stmt = new_select_stmt
compile_state = select_stmt._compile_state_factory(
select_stmt, self, **kwargs
)
select_stmt = compile_state.statement
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = need_column_expressions = (
toplevel
or entry.get("need_result_map_for_compound", False)
or entry.get("need_result_map_for_nested", False)
)
# indicates there is a CompoundSelect in play and we are not the
# first select
if compound_index:
populate_result_map = False
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and "add_to_result_map" in kwargs:
del kwargs["add_to_result_map"]
froms = self._setup_select_stack(
select_stmt, compile_state, entry, asfrom, lateral, compound_index
)
column_clause_args = kwargs.copy()
column_clause_args.update(
{"within_label_clause": False, "within_columns_clause": False}
)
text = "SELECT " # we're off to a good start !
if select_stmt._hints:
hint_text, byfrom = self._setup_select_hints(select_stmt)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select_stmt._independent_ctes:
for cte in select_stmt._independent_ctes:
cte._compiler_dispatch(self, **kwargs)
if select_stmt._prefixes:
text += self._generate_prefixes(
select_stmt, select_stmt._prefixes, **kwargs
)
text += self.get_select_precolumns(select_stmt, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c
for c in [
self._label_select_column(
select_stmt,
column,
populate_result_map,
asfrom,
column_clause_args,
name=name,
proxy_name=proxy_name,
fallback_label_name=fallback_label_name,
column_is_repeated=repeated,
need_column_expressions=need_column_expressions,
)
for (
name,
proxy_name,
fallback_label_name,
column,
repeated,
) in compile_state.columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select was generated from translate_select,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[
name
for (
key,
proxy_name,
fallback_label_name,
name,
repeated,
) in compile_state.columns_plus_names
],
[
name
for (
key,
proxy_name,
fallback_label_name,
name,
repeated,
) in compile_state_wraps_for.columns_plus_names
],
)
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text,
select_stmt,
compile_state,
inner_columns,
froms,
byfrom,
toplevel,
kwargs,
)
if select_stmt._statement_hints:
per_dialect = [
ht
for (dialect_name, ht) in select_stmt._statement_hints
if dialect_name in ("*", self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes:
# In compound query, CTEs are shared at the compound level
if not is_embedded_select:
nesting_level = len(self.stack) if not toplevel else None
text = (
self._render_cte_clause(nesting_level=nesting_level) + text
)
if select_stmt._suffixes:
text += " " + self._generate_prefixes(
select_stmt, select_stmt._suffixes, **kwargs
)
self.stack.pop(-1)
return text
def _setup_select_hints(self, select):
byfrom = dict(
[
(
from_,
hinttext
% {"name": from_._compiler_dispatch(self, ashint=True)},
)
for (from_, dialect), hinttext in select._hints.items()
if dialect in ("*", self.dialect.name)
]
)
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(
self, select, compile_state, entry, asfrom, lateral, compound_index
):
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if compound_index == 0:
entry["select_0"] = select
elif compound_index:
select_0 = entry["select_0"]
numcols = len(select_0._all_selected_columns)
if len(compile_state.columns_plus_names) != numcols:
raise exc.CompileError(
"All selectables passed to "
"CompoundSelect must have identical numbers of "
"columns; select #%d has %d columns, select "
"#%d has %d"
% (
1,
numcols,
compound_index + 1,
len(select._all_selected_columns),
)
)
if asfrom and not lateral:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = compile_state._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
"asfrom_froms": new_correlate_froms,
"correlate_froms": all_correlate_froms,
"selectable": select,
"compile_state": compile_state,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self,
text,
select,
compile_state,
inner_columns,
froms,
byfrom,
toplevel,
kwargs,
):
text += ", ".join(inner_columns)
if self.linting & COLLECT_CARTESIAN_PRODUCTS:
from_linter = FromLinter({}, set())
warn_linting = self.linting & WARN_LINTING
if toplevel:
self.from_linter = from_linter
else:
from_linter = None
warn_linting = False
if froms:
text += " \nFROM "
if select._hints:
text += ", ".join(
[
f._compiler_dispatch(
self,
asfrom=True,
fromhints=byfrom,
from_linter=from_linter,
**kwargs,
)
for f in froms
]
)
else:
text += ", ".join(
[
f._compiler_dispatch(
self,
asfrom=True,
from_linter=from_linter,
**kwargs,
)
for f in froms
]
)
else:
text += self.default_from()
if select._where_criteria:
t = self._generate_delimited_and_list(
select._where_criteria, from_linter=from_linter, **kwargs
)
if t:
text += " \nWHERE " + t
if warn_linting:
from_linter.warn()
if select._group_by_clauses:
text += self.group_by_clause(select, **kwargs)
if select._having_criteria:
t = self._generate_delimited_and_list(
select._having_criteria, **kwargs
)
if t:
text += " \nHAVING " + t
if select._order_by_clauses:
text += self.order_by_clause(select, **kwargs)
if select._has_row_limiting_clause:
text += self._row_limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(
self,
nesting_level=None,
include_following_stack=False,
):
"""
include_following_stack
Also render the nesting CTEs on the next stack. Useful for
SQL structures like UNION or INSERT that can wrap SELECT
statements containing nesting CTEs.
"""
if not self.ctes:
return ""
if nesting_level and nesting_level > 1:
ctes = util.OrderedDict()
for cte in list(self.ctes.keys()):
cte_level, cte_name = self.level_name_by_cte[
cte._get_reference_cte()
]
is_rendered_level = cte_level == nesting_level or (
include_following_stack and cte_level == nesting_level + 1
)
if not (cte.nesting and is_rendered_level):
continue
ctes[cte] = self.ctes[cte]
else:
ctes = self.ctes
if not ctes:
return ""
ctes_recursive = any([cte.recursive for cte in ctes])
if self.positional:
self.positiontup = (
sum([self.cte_positional[cte] for cte in ctes], [])
+ self.positiontup
)
cte_text = self.get_cte_preamble(ctes_recursive) + " "
cte_text += ", \n".join([txt for txt in ctes.values()])
cte_text += "\n "
if nesting_level and nesting_level > 1:
for cte in list(ctes.keys()):
cte_level, cte_name = self.level_name_by_cte[
cte._get_reference_cte()
]
del self.ctes[cte]
del self.ctes_by_level_name[(cte_level, cte_name)]
del self.level_name_by_cte[cte._get_reference_cte()]
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
if select._distinct_on:
util.warn_deprecated(
"DISTINCT ON is currently supported only by the PostgreSQL "
"dialect. Use of DISTINCT ON for other backends is currently "
"silently ignored, however this usage is deprecated, and will "
"raise CompileError in a future release for all backends "
"that do not support this syntax.",
version="1.4",
)
return "DISTINCT " if select._distinct else ""
def group_by_clause(self, select, **kw):
"""allow dialects to customize how GROUP BY is rendered."""
group_by = self._generate_delimited_list(
select._group_by_clauses, OPERATORS[operators.comma_op], **kw
)
if group_by:
return " GROUP BY " + group_by
else:
return ""
def order_by_clause(self, select, **kw):
"""allow dialects to customize how ORDER BY is rendered."""
order_by = self._generate_delimited_list(
select._order_by_clauses, OPERATORS[operators.comma_op], **kw
)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler."
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def fetch_clause(self, select, **kw):
text = ""
if select._offset_clause is not None:
text += "\n OFFSET %s ROWS" % self.process(
select._offset_clause, **kw
)
if select._fetch_clause is not None:
text += "\n FETCH FIRST %s%s ROWS %s" % (
self.process(select._fetch_clause, **kw),
" PERCENT" if select._fetch_clause_options["percent"] else "",
"WITH TIES"
if select._fetch_clause_options["with_ties"]
else "ONLY",
)
return text
def visit_table(
self,
table,
asfrom=False,
iscrud=False,
ashint=False,
fromhints=None,
use_schema=True,
from_linter=None,
ambiguous_table_name_map=None,
**kwargs,
):
if from_linter:
from_linter.froms[table] = table.fullname
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = (
self.preparer.quote_schema(effective_schema)
+ "."
+ self.preparer.quote(table.name)
)
else:
ret = self.preparer.quote(table.name)
if (
not effective_schema
and ambiguous_table_name_map
and table.name in ambiguous_table_name_map
):
anon_name = self._truncated_identifier(
"alias", ambiguous_table_name_map[table.name]
)
ret = ret + self.get_render_as_alias_suffix(
self.preparer.format_alias(None, anon_name)
)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(
ret, table, fromhints[table], iscrud
)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, from_linter=None, **kwargs):
if from_linter:
from_linter.edges.update(
itertools.product(
join.left._from_objects, join.right._from_objects
)
)
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(
self, asfrom=True, from_linter=from_linter, **kwargs
)
+ join_type
+ join.right._compiler_dispatch(
self, asfrom=True, from_linter=from_linter, **kwargs
)
+ " ON "
# TODO: likely need asfrom=True here?
+ join.onclause._compiler_dispatch(
self, from_linter=from_linter, **kwargs
)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict(
[
(table, hint_text)
for (table, dialect), hint_text in stmt._hints.items()
if dialect in ("*", self.dialect.name)
]
)
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, **kw):
compile_state = insert_stmt._compile_state_factory(
insert_stmt, self, **kw
)
insert_stmt = compile_state.statement
toplevel = not self.stack
if toplevel:
self.isinsert = True
if not self.compile_state:
self.compile_state = compile_state
self.stack.append(
{
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": insert_stmt,
}
)
crud_params = crud._get_crud_params(
self, insert_stmt, compile_state, **kw
)
if (
not crud_params
and not self.dialect.supports_default_values
and not self.dialect.supports_default_metavalue
and not self.dialect.supports_empty_insert
):
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support empty "
"inserts." % self.dialect.name
)
if compile_state._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." % self.dialect.name
)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(
insert_stmt, insert_stmt._prefixes, **kw
)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
_, table_text = self._setup_crud_hints(insert_stmt, table_text)
if insert_stmt._independent_ctes:
for cte in insert_stmt._independent_ctes:
cte._compiler_dispatch(self, **kw)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ", ".join(
[expr for c, expr, value in crud_params_single]
)
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning
)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
# placed here by crud.py
select_text = self.process(
self.stack[-1]["insert_from_select"], insert_into=True, **kw
)
if self.ctes and self.dialect.cte_follows_insert:
nesting_level = len(self.stack) if not toplevel else None
text += " %s%s" % (
self._render_cte_clause(
nesting_level=nesting_level,
include_following_stack=True,
),
select_text,
)
else:
text += " %s" % select_text
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif compile_state._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)"
% (", ".join(value for c, expr, value in crud_param_set))
for crud_param_set in crud_params
)
)
else:
insert_single_values_expr = ", ".join(
[value for c, expr, value in crud_params]
)
text += " VALUES (%s)" % insert_single_values_expr
if toplevel and insert_stmt._post_values_clause is None:
# don't assign insert_single_values_expr if _post_values_clause
# is present. what this means concretely is that the
# "fast insert executemany helper" won't be used, in other
# words we won't convert "executemany()" of many parameter
# sets into a single INSERT with many elements in VALUES.
# We can't apply that optimization safely if for example the
# statement includes a clause like "ON CONFLICT DO UPDATE"
self.insert_single_values_expr = insert_single_values_expr
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw
)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and not self.dialect.cte_follows_insert:
nesting_level = len(self.stack) if not toplevel else None
text = (
self._render_cte_clause(
nesting_level=nesting_level, include_following_stack=True
)
+ text
)
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw["asfrom"] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE"
)
def visit_update(self, update_stmt, **kw):
compile_state = update_stmt._compile_state_factory(
update_stmt, self, **kw
)
update_stmt = compile_state.statement
toplevel = not self.stack
if toplevel:
self.isupdate = True
if not self.compile_state:
self.compile_state = compile_state
extra_froms = compile_state._extra_froms
is_multitable = bool(extra_froms)
if is_multitable:
# main table might be a JOIN
main_froms = set(selectable._from_objects(update_stmt.table))
render_extra_froms = [
f for f in extra_froms if f not in main_froms
]
correlate_froms = main_froms.union(extra_froms)
else:
render_extra_froms = []
correlate_froms = {update_stmt.table}
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": update_stmt,
}
)
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(
update_stmt, update_stmt._prefixes, **kw
)
table_text = self.update_tables_clause(
update_stmt, update_stmt.table, render_extra_froms, **kw
)
crud_params = crud._get_crud_params(
self, update_stmt, compile_state, **kw
)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
else:
dialect_hints = None
if update_stmt._independent_ctes:
for cte in update_stmt._independent_ctes:
cte._compiler_dispatch(self, **kw)
text += table_text
text += " SET "
text += ", ".join(expr + "=" + value for c, expr, value in crud_params)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
dialect_hints,
**kw,
)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._where_criteria:
t = self._generate_delimited_and_list(
update_stmt._where_criteria, **kw
)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (
self.returning or update_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
self.stack.pop(-1)
return text
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE"
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
return from_table._compiler_dispatch(self, asfrom=True, iscrud=True)
def visit_delete(self, delete_stmt, **kw):
compile_state = delete_stmt._compile_state_factory(
delete_stmt, self, **kw
)
delete_stmt = compile_state.statement
toplevel = not self.stack
if toplevel:
self.isdelete = True
if not self.compile_state:
self.compile_state = compile_state
extra_froms = compile_state._extra_froms
correlate_froms = {delete_stmt.table}.union(extra_froms)
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": delete_stmt,
}
)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(
delete_stmt, delete_stmt._prefixes, **kw
)
text += "FROM "
table_text = self.delete_table_clause(
delete_stmt, delete_stmt.table, extra_froms
)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text
)
else:
dialect_hints = None
if delete_stmt._independent_ctes:
for cte in delete_stmt._independent_ctes:
cte._compiler_dispatch(self, **kw)
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints,
**kw,
)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._where_criteria:
t = self._generate_delimited_and_list(
delete_stmt._where_criteria, **kw
)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if self.ctes:
nesting_level = len(self.stack) if not toplevel else None
text = self._render_cte_clause(nesting_level=nesting_level) + text
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
class StrSQLCompiler(SQLCompiler):
"""A :class:`.SQLCompiler` subclass which allows a small selection
of non-standard SQL features to render into a string value.
The :class:`.StrSQLCompiler` is invoked whenever a Core expression
element is directly stringified without calling upon the
:meth:`_expression.ClauseElement.compile` method.
It can render a limited set
of non-standard SQL constructs to assist in basic stringification,
however for more substantial custom or dialect-specific SQL constructs,
it will be necessary to make use of
:meth:`_expression.ClauseElement.compile`
directly.
.. seealso::
:ref:`faq_sql_expression_string`
"""
def _fallback_column_name(self, column):
return "<name unknown>"
@util.preload_module("sqlalchemy.engine.url")
def visit_unsupported_compilation(self, element, err, **kw):
if element.stringify_dialect != "default":
url = util.preloaded.engine_url
dialect = url.URL.create(element.stringify_dialect).get_dialect()()
compiler = dialect.statement_compiler(dialect, None)
if not isinstance(compiler, StrSQLCompiler):
return compiler.process(element)
return super(StrSQLCompiler, self).visit_unsupported_compilation(
element, err
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "<next sequence value: %s>" % self.preparer.format_sequence(seq)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in base._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
kw["asfrom"] = True
return "FROM " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
kw["asfrom"] = True
return ", " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in extra_froms
)
def visit_empty_set_expr(self, type_):
return "SELECT 1 WHERE 1!=1"
def get_from_hint_text(self, table, text):
return "[%s]" % text
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " <regexp> ", **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " <not regexp> ", **kw)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
replacement = binary.modifiers["replacement"]
return "<regexp replace>(%s, %s, %s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
replacement._compiler_dispatch(self, **kw),
)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(
self.dialect, None, schema_translate_map=self.schema_translate_map
)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None, extracted_parameters=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ""
else:
table, sch = path[-1], path[0]
context.setdefault("table", table)
context.setdefault("schema", sch)
context.setdefault("fullname", preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create, **kw):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop, **kw):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create, **kw):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(
create_column, first_pk=column.primary_key and not first_pk
)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
raise exc.CompileError(
"(in table '%s', column '%s'): %s"
% (table.description, column.name, ce.args[0])
) from ce
const = self.create_table_constraints(
table,
_include_foreign_key_constraints=create.include_foreign_key_constraints, # noqa
)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False, **kw):
column = create.element
if column.system:
return None
text = self.get_column_specification(column, first_pk=first_pk)
const = " ".join(
self.process(constraint) for constraint in column.constraints
)
if const:
text += " " + const
return text
def create_table_constraints(
self, table, _include_foreign_key_constraints=None, **kw
):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend(
[
c
for c in table._sorted_constraints
if c is not table.primary_key and c not in omit_fkcs
]
)
return ", \n\t".join(
p
for p in (
self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None
or constraint._create_rule(self)
)
and (
not self.dialect.supports_alter
or not getattr(constraint, "use_alter", False)
)
)
if p is not None
)
def visit_drop_table(self, drop, **kw):
text = "\nDROP TABLE "
if drop.if_exists:
text += "IF EXISTS "
return text + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop, **kw):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError(
"Index '%s' is not associated " "with any table." % index.name
)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True, **kw
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.name is None:
raise exc.CompileError(
"CREATE INDEX requires that the index have a name"
)
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(
index.table, use_schema=include_table_schema
),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
return text
def visit_drop_index(self, drop, **kw):
index = drop.element
if index.name is None:
raise exc.CompileError(
"DROP INDEX requires that the index have a name"
)
text = "\nDROP INDEX "
if drop.if_exists:
text += "IF EXISTS "
return text + self._prepared_index_name(index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
index_name = self.preparer.format_index(index)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create, **kw):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element),
)
def visit_set_table_comment(self, create, **kw):
return "COMMENT ON TABLE %s IS %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, drop, **kw):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_set_column_comment(self, create, **kw):
return "COMMENT ON COLUMN %s IS %s" % (
self.preparer.format_column(
create.element, use_table=True, use_schema=True
),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_column_comment(self, drop, **kw):
return "COMMENT ON COLUMN %s IS NULL" % self.preparer.format_column(
drop.element, use_table=True
)
def get_identity_options(self, identity_options):
text = []
if identity_options.increment is not None:
text.append("INCREMENT BY %d" % identity_options.increment)
if identity_options.start is not None:
text.append("START WITH %d" % identity_options.start)
if identity_options.minvalue is not None:
text.append("MINVALUE %d" % identity_options.minvalue)
if identity_options.maxvalue is not None:
text.append("MAXVALUE %d" % identity_options.maxvalue)
if identity_options.nominvalue is not None:
text.append("NO MINVALUE")
if identity_options.nomaxvalue is not None:
text.append("NO MAXVALUE")
if identity_options.cache is not None:
text.append("CACHE %d" % identity_options.cache)
if identity_options.order is not None:
text.append("ORDER" if identity_options.order else "NO ORDER")
if identity_options.cycle is not None:
text.append("CYCLE" if identity_options.cycle else "NO CYCLE")
return " ".join(text)
def visit_create_sequence(self, create, prefix=None, **kw):
text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
if prefix:
text += prefix
if create.element.start is None:
create.element.start = self.dialect.default_sequence_base
options = self.get_identity_options(create.element)
if options:
text += " " + options
return text
def visit_drop_sequence(self, drop, **kw):
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop, **kw):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element
)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or "",
)
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.computed is not None:
colspec += " " + self.process(column.computed)
if (
column.identity is not None
and self.dialect.supports_identity_columns
):
colspec += " " + self.process(column.identity)
if not column.nullable and (
not column.identity or not self.dialect.supports_identity_columns
):
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ""
def post_create_table(self, table):
return ""
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, str):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE
)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True
)
else:
return None
def visit_table_or_column_check_constraint(self, constraint, **kw):
if constraint.is_column_level:
return self.visit_column_check_constraint(constraint)
else:
return self.visit_check_constraint(constraint)
def visit_check_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint, **kw):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name)
for c in (
constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns
)
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint, **kw):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
", ".join(
preparer.quote(f.parent.name) for f in constraint.elements
),
self.define_constraint_remote_table(
constraint, remote_table, preparer
),
", ".join(
preparer.quote(f.column.name) for f in constraint.elements
),
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint, **kw):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
", ".join(self.preparer.quote(c.name) for c in constraint)
)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % self.preparer.validate_sql_phrase(
constraint.ondelete, FK_ON_DELETE
)
if constraint.onupdate is not None:
text += " ON UPDATE %s" % self.preparer.validate_sql_phrase(
constraint.onupdate, FK_ON_UPDATE
)
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % self.preparer.validate_sql_phrase(
constraint.initially, FK_INITIALLY
)
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
def visit_computed_column(self, generated, **kw):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
text += " STORED"
elif generated.persisted is False:
text += " VIRTUAL"
return text
def visit_identity_column(self, identity, **kw):
text = "GENERATED %s AS IDENTITY" % (
"ALWAYS" if identity.always else "BY DEFAULT",
)
options = self.get_identity_options(identity)
if options:
text += " (%s)" % options
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {"precision": type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError(
"Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_
)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def process(self, type_, **kw):
try:
_compiler_dispatch = type_._compiler_dispatch
except AttributeError:
return self._visit_unknown(type_, **kw)
else:
return _compiler_dispatch(self, **kw)
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
if type_.__class__.__name__ == type_.__class__.__name__.upper():
return type_.__class__.__name__
else:
return repr(type_)
def visit_null(self, type_, **kw):
return "NULL"
def visit_user_defined(self, type_, **kw):
try:
get_col_spec = type_.get_col_spec
except AttributeError:
return repr(type_)
else:
return get_col_spec(**kw)
class IdentifierPreparer:
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = operator.attrgetter("schema")
"""Return the .schema attribute for an object.
For the default IdentifierPreparer, the schema for an object is always
the value of the ".schema" attribute. if the preparer is replaced
with one that has a non-empty schema_translate_map, the value of the
".schema" attribute is rendered a symbol that will be converted to a
real schema name from the mapping post-compile.
"""
def __init__(
self,
dialect,
initial_quote='"',
final_quote=None,
escape_quote='"',
quote_case_sensitive_collations=True,
omit_schema=False,
):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self.quote_case_sensitive_collations = quote_case_sensitive_collations
self._strings = {}
self._double_percents = self.dialect.paramstyle in (
"format",
"pyformat",
)
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
def symbol_getter(obj):
name = obj.schema
if name in schema_translate_map and obj._use_schema_map:
if name is not None and ("[" in name or "]" in name):
raise exc.CompileError(
"Square bracket characters ([]) not supported "
"in schema translate name '%s'" % name
)
return quoted_name(
"__[SCHEMA_%s]" % (name or "_none"), quote=False
)
else:
return obj.schema
prep.schema_for_object = symbol_getter
return prep
def _render_schema_translates(self, statement, schema_translate_map):
d = schema_translate_map
if None in d:
d["_none"] = d[None]
def replace(m):
name = m.group(2)
effective_schema = d[name]
if not effective_schema:
effective_schema = self.dialect.default_schema_name
if not effective_schema:
# TODO: no coverage here
raise exc.CompileError(
"Dialect has no default schema name; can't "
"use None as dynamic schema target."
)
return self.quote_schema(effective_schema)
return re.sub(r"(__\[SCHEMA_([^\]]+)\])", replace, statement)
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
value = value.replace(self.escape_quote, self.escape_to_quote)
if self._double_percents:
value = value.replace("%", "%%")
return value
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def validate_sql_phrase(self, element, reg):
"""keyword sequence filter.
a filter for elements that are intended to represent keyword sequences,
such as "INITIALLY", "INITIALLY DEFERRED", etc. no special characters
should be present.
.. versionadded:: 1.3
"""
if element is not None and not reg.match(element):
raise exc.CompileError(
"Unexpected SQL phrase: %r (matching against %r)"
% (element, reg.pattern)
)
return element
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return (
self.initial_quote
+ self._escape_identifier(value)
+ self.final_quote
)
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(str(value))
or (lc_value != value)
)
def _requires_quotes_illegal_chars(self, value):
"""Return True if the given identifier requires quoting, but
not taking case convention into account."""
return not self.legal_characters.match(str(value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema name.
The name is quoted if it is a reserved word, contains quote-necessary
characters, or is an instance of :class:`.quoted_name` which includes
``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
:param schema: string schema name
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote_schema.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name().",
# deprecated 0.9. warning from 1.3
version="0.9",
)
return self.quote(schema)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
The identifier is quoted if it is a reserved word, contains
quote-necessary characters, or is an instance of
:class:`.quoted_name` which includes ``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for identifier names.
:param ident: string identifier
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name().",
# deprecated 0.9. warning from 1.3
version="0.9",
)
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_collation(self, collation_name):
if self.quote_case_sensitive_collations:
return self.quote(collation_name)
else:
return collation_name
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.preload_module("sqlalchemy.sql.naming")
def format_constraint(self, constraint, _alembic_quote=True):
naming = util.preloaded.sql_naming
if constraint.name is elements._NONE_NAME:
name = naming._constraint_name_for_table(
constraint, constraint.table
)
if name is None:
return None
else:
name = constraint.name
if constraint.__visit_name__ == "index":
return self.truncate_and_render_index_name(
name, _alembic_quote=_alembic_quote
)
else:
return self.truncate_and_render_constraint_name(
name, _alembic_quote=_alembic_quote
)
def truncate_and_render_index_name(self, name, _alembic_quote=True):
# calculate these at format time so that ad-hoc changes
# to dialect.max_identifier_length etc. can be reflected
# as IdentifierPreparer is long lived
max_ = (
self.dialect.max_index_name_length
or self.dialect.max_identifier_length
)
return self._truncate_and_render_maxlen_name(
name, max_, _alembic_quote
)
def truncate_and_render_constraint_name(self, name, _alembic_quote=True):
# calculate these at format time so that ad-hoc changes
# to dialect.max_identifier_length etc. can be reflected
# as IdentifierPreparer is long lived
max_ = (
self.dialect.max_constraint_name_length
or self.dialect.max_identifier_length
)
return self._truncate_and_render_maxlen_name(
name, max_, _alembic_quote
)
def _truncate_and_render_maxlen_name(self, name, max_, _alembic_quote):
if isinstance(name, elements._truncated_label):
if len(name) > max_:
name = name[0 : max_ - 8] + "_" + util.md5_hex(name)[-4:]
else:
self.dialect.validate_identifier(name)
if not _alembic_quote:
return name
else:
return self.quote(name)
def format_index(self, index):
return self.format_constraint(index)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name):
"""Prepare a quoted schema name."""
return self.quote(name)
def format_label_name(
self,
name,
anon_map=None,
):
"""Prepare a quoted column name."""
if anon_map is not None and isinstance(
name, elements._truncated_label
):
name = name.apply_map(anon_map)
return self.quote(name)
def format_column(
self,
column,
use_table=False,
name=None,
table_name=None,
use_schema=False,
anon_map=None,
):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if anon_map is not None and isinstance(
name, elements._truncated_label
):
name = name.apply_map(anon_map)
if not getattr(column, "is_literal", False):
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ self.quote(name)
)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ name
)
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
return (
self.quote_schema(effective_schema),
self.format_table(table, use_schema=False),
)
else:
return (self.format_table(table, use_schema=False),)
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = [
re.escape(s)
for s in (
self.initial_quote,
self.final_quote,
self._escape_identifier(self.final_quote),
)
]
r = re.compile(
r"(?:"
r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s"
r"|([^\.]+))(?=\.|$))+"
% {"initial": initial, "final": final, "escaped": escaped_final}
)
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [
self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]
]
| 33.92768
| 92
| 0.561816
|
4a16a00f15b2f0e72ec4ec89b32f6998084234f9
| 17,939
|
py
|
Python
|
train.py
|
Shreeyak/pytorch-deeplabv3-alphapilot
|
30db7cfc8bf699253308d28c19f03e6249aea9a1
|
[
"MIT"
] | null | null | null |
train.py
|
Shreeyak/pytorch-deeplabv3-alphapilot
|
30db7cfc8bf699253308d28c19f03e6249aea9a1
|
[
"MIT"
] | null | null | null |
train.py
|
Shreeyak/pytorch-deeplabv3-alphapilot
|
30db7cfc8bf699253308d28c19f03e6249aea9a1
|
[
"MIT"
] | null | null | null |
import argparse
import glob
import os
import socket
import timeit
from collections import OrderedDict
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from PIL.ImageMath import eval
from tqdm import tqdm
import imgaug as ia
import oyaml
# PyTorch includes
import torch
import torch.nn as nn
import torch.optim as optim
from attrdict import AttrDict
# Custom includes
from dataloaders import utils
from dataloaders.alphapilot import AlphaPilotSegmentation
from imgaug import augmenters as iaa
# from dataloaders import custom_transforms as tr
from networks import deeplab_resnet, deeplab_xception, unet
from tensorboardX import SummaryWriter
from termcolor import colored
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
# from inference import labels
# from train import labels
# parser = argparse.ArgumentParser()
# parser.add_argument("-b", "--batch_size", required=True, type=int, help="Num of images per batch for training")
# args = parser.parse_args()
###################### Load Config File #############################
CONFIG_FILE_PATH = 'config/config.yaml'
with open(CONFIG_FILE_PATH) as fd:
config_yaml = oyaml.load(fd) # Returns an ordered dict. Used for printing
config = AttrDict(config_yaml)
print(colored('Config being used for training:\n{}\n\n'.format(oyaml.dump(config_yaml)), 'green'))
# Setting parameters
nEpochs = 100 # Number of epochs for training
resume_epoch = 0 # Default is 0, change if want to resume
p = OrderedDict() # Parameters to include in report
p['trainBatchSize'] = config.train.batchSize # Training batch size
testBatchSize = 1 # Testing batch size
useTest = True # See evolution of the test set when training
nTestInterval = 1 # Run on test set every nTestInterval epochs
snapshot = 2 # Store a model every snapshot epochs
p['nAveGrad'] = 1 # Average the gradient of several iterations
p['lr'] = 1e-6 # Learning rate
p['wd'] = 5e-2 # Weight decay
p['momentum'] = 0.9 # Momentum
p['epoch_size'] = 1 # How many epochs to change learning rate
p['Model'] = 'deeplab' # Choose model: unet or deeplab
backbone = 'xception' # For deeplab only: Use xception or resnet as feature extractor,
num_of_classes = 2
imsize = 512 # 256 or 512
output_stride = 8 # 8 or 16, 8 is better. Controls output stride of the deeplab model, which increases resolution of convolutions.
numInputChannels = 3
def save_test_img(inputs, outputs, ii):
fig = plt.figure()
ax0 = plt.subplot(121)
ax1 = plt.subplot(122)
# Input RGB img
rgb_img = inputs[0]
# inv_normalize = transforms.Normalize(
# mean=[-0.5 / 0.5, -0.5 / 0.5, -0.5 / 0.5],
# std=[1 / 0.5, 1 / 0.5, 1 / 0.5]
# )
# rgb_img = inv_normalize(rgb_img)
rgb_img = rgb_img.detach().cpu().numpy()
rgb_img = np.transpose(rgb_img, (1, 2, 0))
# Inference Result
predictions = torch.max(outputs[:1], 1)[1].detach().cpu().numpy()
output_rgb = utils.decode_seg_map_sequence(predictions)
output_rgb = output_rgb.numpy()
output_rgb = np.transpose(output_rgb[0], (1, 2, 0))
# Create plot
ax0.imshow(rgb_img)
ax0.set_title('Source RGB Image') # subplot 211 title
ax1.imshow(output_rgb)
ax1.set_title('Inference result')
fig.savefig('data/results/current_training_model/%04d-results.png' % (ii))
plt.close('all')
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if resume_epoch != 0:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) if runs else 0
else:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
print('run id: ', run_id)
save_dir = os.path.join(save_dir_root, 'run', 'run_{:02d}'.format(run_id))
# Network definition
if p['Model'] == 'deeplab':
if backbone == 'xception':
net = deeplab_xception.DeepLabv3_plus(nInputChannels=numInputChannels, n_classes=num_of_classes, os=output_stride, pretrained=True)
elif backbone == 'resnet':
net = deeplab_resnet.DeepLabv3_plus(nInputChannels=numInputChannels, n_classes=num_of_classes, os=output_stride, pretrained=True)
else:
raise NotImplementedError
modelName = 'deeplabv3plus-' + backbone
# Use the following optimizer
optimizer = optim.SGD(net.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
p['optimizer'] = str(optimizer)
# Use the following loss function
criterion = utils.cross_entropy2d
elif p['Model'] == 'unet':
net = unet.Unet(num_classes=num_of_classes)
modelName = 'unet'
# Use the following optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, weight_decay=0.0001)
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
p['optimizer'] = str(optimizer)
# Use the following loss function
criterion = nn.CrossEntropyLoss(size_average=False, reduce=True)
else:
raise NotImplementedError
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#criterion = criterion.to(device) #TODO: IS THIS NEEDED?
# Enable Multi-GPU training
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
net = nn.DataParallel(net)
if resume_epoch == 0:
print("Training deeplabv3+ from scratch...")
else:
print("Initializing weights from: {}...".format(
os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth')))
net.load_state_dict(
torch.load(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth'),
map_location=lambda storage, loc: storage)) # Load all tensors onto the CPU
net.to(device)
if resume_epoch != nEpochs:
# Logging into Tensorboard
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime(
'%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
augs_train = iaa.Sequential([
# Geometric Augs
iaa.Resize((imsize, imsize), 0)
# iaa.Fliplr(0.5),
# iaa.Flipud(0.5),
# iaa.Rot90((0, 4)),
# # Blur and Noise
# iaa.Sometimes(0.10, iaa.OneOf([iaa.GaussianBlur(sigma=(1.5, 2.5), name="gaus_blur"),
# iaa.MotionBlur(k=13, angle=[0, 180, 270, 360], direction=[-1.0, 1.0],
# name='motion_blur'),
# ])),
# # Color, Contrast, etc
# iaa.Sometimes(0.30, iaa.CoarseDropout(0.05, size_px=(2, 4), per_channel=0.5, min_size=2, name='dropout')),
# iaa.SomeOf((0, None), [ iaa.Sometimes(0.15, iaa.GammaContrast((0.5, 1.5), name="contrast")),
# iaa.Sometimes(0.15, iaa.Multiply((0.40, 1.60), per_channel=1.0, name="multiply")),
# iaa.Sometimes(0.15, iaa.AddToHueAndSaturation((-30, 30), name="hue_sat")),
# ]),
# # Affine
# iaa.Sometimes(0.10, iaa.Affine(scale={"x": (0.5, 0.7), "y": 1.0})),
# iaa.Sometimes(0.10, iaa.Affine(scale=(0.5, 0.7))),
])
augs_test = iaa.Sequential([
# Geometric Augs
iaa.Resize((imsize, imsize), interpolation='cubic'),
])
# db_train = AlphaPilotSegmentation(
# input_dir=config.train.datasets.images, label_dir=config.train.datasets.labels,
# transform=augs_train,
# input_only=["gaus_blur", "motion_blur", "dropout", "contrast", "multiply", "hue_sat"]
# )
db_train_list = []
for dataset in config.train.datasets:
db = AlphaPilotSegmentation(input_dir=dataset.images, label_dir=dataset.labels,
transform=augs_train, input_only=None)
train_size = int(config.train.percentageDataForTraining * len(db))
db = torch.utils.data.Subset(db, range(train_size))
db_train_list.append(db)
db_train = torch.utils.data.ConcatDataset(db_train_list)
for dataset in config.eval.datasetsSynthetic:
db_validation = AlphaPilotSegmentation(
input_dir=dataset.images, label_dir=dataset.labels,
transform=augs_train,
input_only=None
)
db_test_list = []
for dataset in config.eval.datasetsReal:
db_test = AlphaPilotSegmentation(input_dir=dataset.images, transform=augs_test, input_only=None)
db_test_list.append(db_test)
print('size db_train, db_val: ', len(db_train), len(db_validation))
trainloader = DataLoader(db_train, batch_size=p['trainBatchSize'], shuffle=True, num_workers=4, drop_last=True)
validationloader = DataLoader(db_validation, batch_size=p['trainBatchSize'], shuffle=False, num_workers=4, drop_last=True)
testloader = DataLoader(db_test, batch_size=p['trainBatchSize'], shuffle=False, num_workers=4, drop_last=True)
utils.generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p)
num_img_tr = len(trainloader)
num_img_val = len(validationloader)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_val = 0.0
running_loss_ts = 0.0
aveGrad = 0
global_step = 0
print("Training Network")
# Main Training and Testing Loop
for epoch in range(resume_epoch, nEpochs):
start_time = timeit.default_timer()
#TODO: plot the learning rate
if p['Model'] == 'unet':
exp_lr_scheduler.step()
else:
if epoch % p['epoch_size'] == p['epoch_size'] - 1:
lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9)
print('(poly lr policy) learning rate: ', lr_)
optimizer = optim.SGD(net.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])
net.train()
for ii, sample_batched in enumerate(tqdm(trainloader)):
inputs, labels, sample_filename = sample_batched
inputs = inputs.to(device)
labels = labels.to(device)
global_step += 1
# print('iter_num: ', ii + 1, '/', num_img_tr)
writer.add_scalar('Epoch Num', epoch, global_step)
torch.set_grad_enabled(True)
outputs = net.forward(inputs)
labels = labels.squeeze(1)
loss = criterion(outputs, labels, size_average=False, batch_average=True)
running_loss_tr += loss.item()
# Print stuff
if ii % num_img_tr == (num_img_tr - 1):
running_loss_tr = running_loss_tr / num_img_tr
writer.add_scalar('data/total_loss_epoch', running_loss_tr, global_step)
print('[Epoch: %d, numImages: %5d]' % (epoch, ii * p['trainBatchSize'] + inputs.shape[0]))
print('Loss: %f' % running_loss_tr)
running_loss_tr = 0
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Backward the averaged gradient
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
# Update the weights once in p['nAveGrad'] forward passes
if aveGrad % p['nAveGrad'] == 0:
writer.add_scalar('data/total_loss_iter', loss.item(), global_step)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
# Show 10 * 3 images results each epoch
if num_img_tr < 10:
plot_per_iter = num_img_tr
else:
plot_per_iter = 10
if ii % (num_img_tr // plot_per_iter) == 0:
img_tensor = torch.squeeze((inputs[:3].clone().cpu().data), 0)
output_tensor = torch.squeeze(utils.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()).type(torch.FloatTensor), 0)
label_tensor = torch.squeeze(utils.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()).type(torch.FloatTensor), 0)
images = []
for img, output, label in zip(img_tensor, output_tensor, label_tensor):
images.append(img)
images.append(output)
images.append(label)
grid_image = make_grid(images ,3, normalize=True, scale_each=True )
writer.add_image('Train', grid_image, global_step)
# Save the model
# TODO : bring the model to cpu before saving
if (epoch % snapshot) == snapshot - 1:
torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
print("Save model at {}\n".format(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')))
# One testing epoch
if useTest and epoch % nTestInterval == (nTestInterval - 1):
net.eval()
images_list = []
dataloader_list = [validationloader, testloader]
for dataloader in dataloader_list:
total_iou = 0.0
if dataloader == validationloader:
print("Validation Running")
if dataloader == testloader:
print("Testing Running")
for ii, sample_batched in enumerate(tqdm(dataloader)):
inputs, labels, sample_filename = sample_batched
# Forward pass of the mini-batch
inputs = inputs.to(device)
labels = labels.to(device)
with torch.no_grad():
outputs = net.forward(inputs)
predictions = torch.max(outputs, 1)[1]
labels = labels.squeeze(1)
loss = criterion(outputs, labels)
# run validation dataset
if dataloader == validationloader:
# print('iter_num: ', ii + 1, '/', num_img_val)
running_loss_val += loss.item()
_total_iou, per_class_iou, num_images_per_class = utils.get_iou(predictions, labels, n_classes=num_of_classes)
total_iou += _total_iou
# Print stuff
if ii % num_img_val == num_img_val - 1:
miou = total_iou / (ii * p['trainBatchSize'] + inputs.shape[0])
running_loss_val = running_loss_val / num_img_val
print('Validation:')
print('[Epoch: %d, numImages: %5d]' % (epoch, ii * p['trainBatchSize'] + inputs.shape[0]))
writer.add_scalar('data/val_loss_epoch', running_loss_val, global_step)
writer.add_scalar('data/val_miour', miou, global_step)
print('Loss: %f' % running_loss_val)
print('MIoU: %f\n' % miou)
running_loss_val = 0
print(inputs.shape)
# Show 10 * 2 images results each epoch
img_tensor = (inputs[:2].clone().cpu().data)
output_tensor = utils.decode_seg_map_sequence(torch.max(outputs[:2], 1)[1].detach().cpu().numpy()).type(torch.FloatTensor)
label_tensor = utils.decode_seg_map_sequence(torch.squeeze(labels[:2], 1).detach().cpu().numpy()).type(torch.FloatTensor)
images_list = []
for i in range (0,2):
images_list.append(img_tensor[i])
images_list.append(output_tensor[i])
images_list.append(label_tensor[i])
grid_image = make_grid(images_list ,3, normalize=True, scale_each=True )
writer.add_image('Validation', grid_image, global_step)
if dataloader == testloader:
# print('iter_num: ', ii + 1, '/', num_img_ts)
running_loss_ts += loss.item()
_total_iou, per_class_iou, num_images_per_class = utils.get_iou(predictions, labels, n_classes=num_of_classes)
total_iou += _total_iou
# print stuff
save_test_img(inputs, outputs, ii)
if ii % num_img_ts == num_img_ts - 1:
# Calculate the loss and plot the graph
miou = total_iou / (ii * p['trainBatchSize'] + inputs.shape[0])
running_loss_ts = running_loss_ts / num_img_ts
print('Test:')
print('[Epoch: %d, numImages: %5d]' % (epoch, ii * testBatchSize + inputs.shape[0]))
writer.add_scalar('data/test_loss_epoch', running_loss_ts, global_step)
writer.add_scalar('data/test_miour', miou, global_step)
print('Loss: %f' % running_loss_ts)
print('MIoU: %f\n' % miou)
running_loss_ts = 0
# Show 10 * 3 images results each epoch
img_tensor = inputs[:1].clone().cpu().data
output_tensor = utils.decode_seg_map_sequence(torch.max(outputs[:1], 1)[1].detach().cpu().numpy()).type(torch.FloatTensor)
label_tensor = utils.decode_seg_map_sequence(torch.squeeze(labels[:1], 1).detach().cpu().numpy()).type(torch.FloatTensor)
images_list.append(img_tensor[0])
images_list.append(output_tensor[0])
images_list.append(label_tensor[0])
grid_image = make_grid(images_list, 3, normalize=True, scale_each=True ) #TODO: Probably shouldn't scale each. And should give min-max range for normalization.
writer.add_image('Test', grid_image, global_step)
writer.close()
| 40.585973
| 183
| 0.61213
|
4a16a01bee25ace29ff1ce09407ef8f7f370d7c9
| 10,033
|
py
|
Python
|
pywikibot/families/wikipedia_family.py
|
Amitie10g/pywikibot
|
9c1ee13eb3e5077f929eaeef4959b76b0151b616
|
[
"MIT"
] | null | null | null |
pywikibot/families/wikipedia_family.py
|
Amitie10g/pywikibot
|
9c1ee13eb3e5077f929eaeef4959b76b0151b616
|
[
"MIT"
] | null | null | null |
pywikibot/families/wikipedia_family.py
|
Amitie10g/pywikibot
|
9c1ee13eb3e5077f929eaeef4959b76b0151b616
|
[
"MIT"
] | null | null | null |
"""Family module for Wikipedia."""
#
# (C) Pywikibot team, 2004-2021
#
# Distributed under the terms of the MIT license.
#
from pywikibot import family
# The Wikimedia family that is known as Wikipedia, the Free Encyclopedia
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family module for Wikipedia."""
name = 'wikipedia'
closed_wikis = [
# See:
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/closed.dblist
'aa', 'cho', 'ho', 'hz', 'ii', 'kj', 'kr', 'lrc', 'mh', 'mus', 'ng',
'ten',
]
removed_wikis = [
# See:
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/deleted.dblist
'dk', 'ru-sib', 'tlh', 'tokipona', 'zh_cn', 'zh_tw',
]
languages_by_size = [
'en', 'ceb', 'sv', 'de', 'fr', 'nl', 'ru', 'es', 'it', 'arz', 'pl',
'ja', 'vi', 'war', 'zh', 'ar', 'uk', 'pt', 'fa', 'ca', 'sr', 'id',
'no', 'ko', 'fi', 'hu', 'cs', 'tr', 'sh', 'ce', 'zh-min-nan', 'ro',
'tt', 'eu', 'ms', 'eo', 'he', 'hy', 'bg', 'da', 'azb', 'sk', 'kk',
'min', 'et', 'be', 'hr', 'el', 'simple', 'lt', 'az', 'gl', 'sl', 'ur',
'nn', 'ka', 'hi', 'th', 'ta', 'uz', 'la', 'cy', 'ast', 'vo', 'mk',
'zh-yue', 'bn', 'lv', 'tg', 'my', 'af', 'mg', 'bs', 'oc', 'sq', 'nds',
'mr', 'ky', 'ml', 'be-tarask', 'te', 'new', 'br', 'sw', 'vec', 'pms',
'jv', 'pnb', 'ht', 'su', 'lb', 'ba', 'ga', 'szl', 'is', 'ku', 'lmo',
'cv', 'fy', 'tl', 'wuu', 'an', 'sco', 'diq', 'ckb', 'pa', 'yo', 'ne',
'bar', 'io', 'gu', 'als', 'kn', 'scn', 'bpy', 'ia', 'qu', 'mn', 'avk',
'xmf', 'nv', 'si', 'bat-smg', 'or', 'gd', 'cdo', 'ilo', 'yi', 'am',
'sd', 'bug', 'os', 'frr', 'nap', 'crh', 'hsb', 'ha', 'map-bms', 'mai',
'fo', 'sah', 'li', 'mzn', 'ps', 'eml', 'gor', 'ace', 'bcl', 'sa',
'ban', 'wa', 'zh-classical', 'lij', 'zu', 'mhr', 'mrj', 'shn', 'hif',
'mni', 'as', 'hak', 'roa-tara', 'hyw', 'pam', 'km', 'ie', 'nso', 'rue',
'se', 'bh', 'nds-nl', 'vls', 'sn', 'so', 'mi', 'myv', 'nah', 'sat',
'sc', 'vep', 'lld', 'gan', 'glk', 'kab', 'tk', 'co', 'fiu-vro', 'bo',
'ab', 'kv', 'csb', 'frp', 'pcd', 'kw', 'ug', 'udm', 'gv', 'ay', 'ary',
'zea', 'nrm', 'gn', 'bjn', 'mt', 'lez', 'lfn', 'stq', 'smn', 'lo',
'mwl', 'skr', 'olo', 'rm', 'lad', 'fur', 'gom', 'ang', 'koi', 'ext',
'tyv', 'dsb', 'dty', 'ln', 'cbk-zam', 'dv', 'ksh', 'rw', 'gag', 'bxr',
'pfl', 'ig', 'av', 'pi', 'pag', 'awa', 'haw', 'tay', 'pap', 'krc',
'xal', 'szy', 'za', 'pdc', 'kaa', 'inh', 'atj', 'to', 'arc', 'jam',
'tpi', 'kbp', 'na', 'wo', 'kbd', 'tcy', 'mdf', 'nov', 'ki', 'tet',
'dag', 'lg', 'bi', 'jbo', 'roa-rup', 'fj', 'kg', 'tw', 'lbe', 'xh',
'ty', 'mnw', 'nqo', 'trv', 'shi', 'srn', 'om', 'gcr', 'sm', 'ks',
'chr', 'ltg', 'alt', 'cu', 'nia', 'pih', 'ny', 'got', 'st', 'mad',
'ami', 'kl', 'rmy', 'tn', 'bm', 'ts', 'chy', 've', 'rn', 'tum', 'iu',
'ak', 'ss', 'ch', 'pnt', 'ady', 'ik', 'ee', 'ff', 'din', 'sg', 'dz',
'ti', 'pwn', 'cr',
]
# Sites we want to edit but not count as real languages
test_codes = ['test', 'test2']
# Templates that indicate a category redirect
# Redirects to these templates are automatically included
category_redirect_templates = {
'_default': (),
'ar': ('تحويل تصنيف',),
'ary': ('Category redirect',),
'arz': ('تحويل تصنيف',),
'bn': ('বিষয়শ্রেণী পুনর্নির্দেশ',),
'bs': ('Category redirect',),
'cs': ('Zastaralá kategorie',),
'da': ('Kategoriomdirigering',),
'en': ('Category redirect',),
'es': ('Categoría redirigida',),
'eu': ('Kategoria birzuzendu',),
'fa': ('رده بهتر',),
'fr': ('Catégorie redirigée',),
'gv': ('Aastiurey ronney',),
'hi': ('श्रेणी अनुप्रेषित',),
'hu': ('Kat-redir',),
'id': ('Alih kategori',),
'ja': ('Category redirect',),
'ko': ('분류 넘겨주기',),
'mk': ('Премести категорија',),
'ml': ('Category redirect',),
'ms': ('Pengalihan kategori',),
'mt': ('Rindirizzament kategorija',),
'ne': ('श्रेणी अनुप्रेषण',),
'no': ('Kategoriomdirigering',),
'pt': ('Redirecionamento de categoria',),
'ro': ('Redirect categorie',),
'ru': ('Переименованная категория',),
'sco': ('Category redirect',),
'sh': ('Prekat',),
'simple': ('Category redirect',),
'sl': ('Preusmeritev kategorije',),
'sr': ('Category redirect',),
'sq': ('Kategori e zhvendosur',),
'sv': ('Kategoriomdirigering',),
'tl': ('Category redirect',),
'tr': ('Kategori yönlendirme',),
'uk': ('Categoryredirect',),
'ur': ('زمرہ رجوع مکرر',),
'vi': ('Đổi hướng thể loại',),
'yi': ('קאטעגאריע אריבערפירן',),
'zh': ('分类重定向',),
'zh-yue': ('分類彈去',),
}
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
# & https://meta.wikimedia.org/wiki/Special:WikiSets/2
cross_allowed = [
'ab', 'ace', 'ady', 'af', 'ak', 'als', 'am', 'an', 'ang', 'ar', 'arc',
'arz', 'as', 'ast', 'atj', 'av', 'ay', 'az', 'ba', 'bar', 'bat-smg',
'bcl', 'be', 'be-tarask', 'bg', 'bh', 'bi', 'bjn', 'bm', 'bo', 'bpy',
'bug', 'bxr', 'ca', 'cbk-zam', 'cdo', 'ce', 'ceb', 'ch', 'chr', 'chy',
'ckb', 'co', 'cr', 'crh', 'cs', 'csb', 'cu', 'cv', 'cy', 'da', 'diq',
'dsb', 'dty', 'dz', 'ee', 'el', 'eml', 'en', 'eo', 'et', 'eu', 'ext',
'fa', 'ff', 'fi', 'fj', 'fo', 'frp', 'frr', 'fur', 'ga', 'gag', 'gan',
'gd', 'glk', 'gn', 'gom', 'gor', 'got', 'gu', 'gv', 'ha', 'hak', 'haw',
'he', 'hi', 'hif', 'hr', 'hsb', 'ht', 'hu', 'hy', 'ia', 'ie', 'ig',
'ik', 'ilo', 'inh', 'io', 'iu', 'ja', 'jam', 'jbo', 'jv', 'ka', 'kaa',
'kab', 'kbd', 'kg', 'ki', 'kk', 'kl', 'km', 'kn', 'ko', 'koi', 'krc',
'ks', 'ku', 'kv', 'kw', 'ky', 'la', 'lad', 'lb', 'lbe', 'lez', 'lfn',
'lg', 'li', 'lij', 'lmo', 'ln', 'lo', 'lt', 'ltg', 'lv', 'map-bms',
'mdf', 'meta', 'mg', 'mhr', 'mi', 'mk', 'ml', 'mn', 'mrj', 'ms', 'mwl',
'my', 'myv', 'mzn', 'na', 'nah', 'nap', 'nds-nl', 'ne', 'new', 'nl',
'no', 'nov', 'nrm', 'nso', 'nv', 'ny', 'oc', 'olo', 'om', 'or', 'os',
'pa', 'pag', 'pam', 'pap', 'pdc', 'pfl', 'pi', 'pih', 'pms', 'pnb',
'pnt', 'ps', 'qu', 'rm', 'rmy', 'rn', 'roa-rup', 'roa-tara', 'ru',
'rue', 'rw', 'sa', 'sah', 'sc', 'scn', 'sco', 'sd', 'se', 'sg', 'sh',
'shn', 'si', 'simple', 'sk', 'sm', 'sn', 'so', 'srn', 'ss', 'st',
'stq', 'su', 'sv', 'sw', 'szl', 'ta', 'tcy', 'te', 'tet', 'tg', 'th',
'ti', 'tk', 'tl', 'tn', 'to', 'tpi', 'tr', 'ts', 'tt', 'tum', 'tw',
'ty', 'tyv', 'udm', 'ug', 'uz', 've', 'vec', 'vep', 'vls', 'vo', 'wa',
'war', 'wo', 'xal', 'xh', 'xmf', 'yi', 'yo', 'za', 'zea', 'zh',
'zh-classical', 'zh-min-nan', 'zh-yue', 'zu',
]
# Languages that used to be coded in iso-8859-1
latin1old = {
'af', 'bs', 'co', 'cs', 'da', 'de', 'en', 'es', 'et', 'eu', 'fi', 'fr',
'fy', 'ga', 'gl', 'ia', 'id', 'it', 'la', 'lt', 'lv', 'mi', 'mr', 'na',
'nds', 'nl', 'no', 'pt', 'simple', 'sl', 'sv', 'sw', 'test', 'tt',
'uk', 'vi', 'vo'
}
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
# TODO: Remove comments for appropriate pages
doc_subpages = {
'_default': (('/doc', ),
['arz', 'bn', 'cs', 'da', 'en', 'es', 'hr', 'hu', 'id',
'ilo', 'ja', 'ms', 'pt', 'ro', 'ru', 'simple', 'sh',
'vi', 'zh']
),
'ar': ('/شرح', '/doc', ),
'ary': ('/توثيق', '/شرح', '/doc', ),
'bs': ('/dok', ),
'ca': ('/ús', ),
'de': ('Doku', '/Meta'),
'dsb': ('/Dokumentacija', ),
'eu': ('txantiloi dokumentazioa', '/dok'),
'fa': ('/doc', '/توضیحات'),
# fi: no idea how to handle this type of subpage at :Metasivu:
'fi': ((), ),
'fr': ('/Documentation',),
'hsb': ('/Dokumentacija', ),
'it': ('/Man', ),
'ka': ('/ინფო', ),
'ko': ('/설명문서', ),
'no': ('/dok', ),
'nn': ('/dok', ),
'pl': ('/opis', ),
'sk': ('/Dokumentácia', ),
'sr': ('/док', ),
'sv': ('/dok', ),
'uk': ('/Документація', ),
'ur': ('/doc', '/دستاویز'),
}
# Templates that indicate an edit should be avoided
edit_restricted_templates = {
'ar': ('تحرر',),
'ary': ('كاتبدل دابا',),
'arz': ('بتتطور',),
'bs': ('Izmjena u toku',),
'cs': ('Pracuje se',),
'de': ('Inuse', 'In use', 'In bearbeitung', 'Inbearbeitung',),
'en': ('Inuse', 'In use'),
'fa': ('ویرایش',),
'fr': ('En cours',),
'he': ('בעבודה',),
'hr': ('Radovi',),
'hy': ('Խմբագրում եմ',),
'sr': ('Радови у току', 'Рут',),
'test': ('In use',),
'ur': ('زیر ترمیم',),
'zh': ('Inuse',),
}
# Archive templates that indicate an edit of non-archive bots
# should be avoided
archived_page_templates = {
'ar': ('أرشيف نقاش',),
'arz': ('صفحة ارشيف',),
'cs': ('Archiv', 'Archiv Wikipedie', 'Archiv diskuse',
'Archivace start', 'Posloupnost archivů', 'Rfa-archiv-start',
'Rfc-archiv-start',),
'de': ('Archiv',),
}
def encodings(self, code):
"""Return a list of historical encodings for a specific site."""
# Historic compatibility
if code == 'pl':
return 'utf-8', 'iso8859-2'
if code == 'ru':
return 'utf-8', 'iso8859-5'
if code in self.latin1old:
return 'utf-8', 'iso-8859-1'
return super().encodings(code)
| 43.812227
| 82
| 0.424399
|
4a16a0db65c77bfab77ff3fb0b8633bc3bcb9ed3
| 2,808
|
py
|
Python
|
main.py
|
Sech1/Longest_Path_Dag
|
b22158a5e2345791a8a87e326145aab1d42627f0
|
[
"CECILL-B"
] | null | null | null |
main.py
|
Sech1/Longest_Path_Dag
|
b22158a5e2345791a8a87e326145aab1d42627f0
|
[
"CECILL-B"
] | null | null | null |
main.py
|
Sech1/Longest_Path_Dag
|
b22158a5e2345791a8a87e326145aab1d42627f0
|
[
"CECILL-B"
] | null | null | null |
import networkx as nx
from graph import *
def main():
graphs = read_in_files()
for graph in graphs:
dag_sp(graph, graph.sources, 14)
def read_in_files():
graphs = list()
files = ['input/BAinCS.gml', 'input/BSinCS.gml', 'input/BSinECE.gml']
for file in files:
with open(file) as file:
gml = file.read()
graph = nx.parse_gml(gml)
print(graph)
graph = nx.convert_node_labels_to_integers(graph)
num_nodes = int(graph.number_of_nodes())
out_graph = Graph(num_nodes)
out_graph.sources = list(graph.nodes)
for u, v, a in graph.edges(data=True):
out_graph.graph_edges_weights[u].append([v, 1])
graphs.append(out_graph)
return graphs
def topological_sort(graph, current_vertex, visited, stack):
# creating a list of bools for visited, index 0 is null because we're starting at 1
# visited = [False] * (graph.vertices + 1)
# creating a recursion stack to detect for cycle 0 is null because we're starting at 1
# top_list = []
visited[current_vertex] = True
stack = stack
if current_vertex in graph.graph_edges_weights.keys():
for node, weight in graph.graph_edges_weights[current_vertex]:
if not visited[node]:
topological_sort(graph, node, visited, stack)
stack.append(current_vertex)
def dag_sp(graph, source_node_list, target_node):
out_list = defaultdict(list)
for source in source_node_list:
visited = [False] * int(graph.vertices + 1)
stack = []
for i in range(1, graph.vertices + 1):
if not visited[i]:
topological_sort(graph, source, visited, stack)
distance = [float("-inf")] * int(graph.vertices + 1)
distance[source] = 0
shortest_path = [-1] * int(graph.vertices)
while stack:
index = stack.pop()
for node, weight in graph.graph_edges_weights[index]:
if distance[node] < distance[index] + weight:
distance[node] = distance[index] + weight
shortest_path[node] = int(index)
for k in range(target_node, int(target_node + 1)):
print_recursive(shortest_path, k, out_list, source)
source_longest = max(out_list, key=lambda x: len(out_list[x]))
longest_path = out_list[source_longest]
print("DAG SP Longest Path Output: ")
print("SOURCE: " + str(source_longest))
print("It's path is:")
print(longest_path)
print('\n')
def print_recursive(shortest_path, vertex, out_list=None, source=None):
if vertex < 0:
return
print_recursive(shortest_path, shortest_path[vertex], out_list, source)
out_list[source].append(int(vertex))
if __name__ == '__main__':
main()
| 32.275862
| 90
| 0.631766
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.