text
stringlengths 2
999k
|
|---|
"""
Definition of the :class:`NativeRegistration` class.
"""
from pathlib import Path
from typing import Tuple
from typing import Union
import nibabel as nib
from brain_parts.parcellation.parcellations import (
Parcellation as parcellation_manager,
)
from nilearn.image.resampling import resample_to_img
from nipype.interfaces.base import TraitError
from tqdm import tqdm
from dmriprep_analyses.manager import DmriprepManager
from dmriprep_analyses.registrations.messages import REFERENCE_FILE_MISSING
from dmriprep_analyses.registrations.utils import DEFAULT_PARCELLATION_NAMING
from dmriprep_analyses.registrations.utils import PROBSEG_THRESHOLD
from dmriprep_analyses.registrations.utils import QUERIES
from dmriprep_analyses.registrations.utils import TRANSFORMS
class NativeRegistration(DmriprepManager):
QUERIES = QUERIES
#: Naming
DEFAULT_PARCELLATION_NAMING = DEFAULT_PARCELLATION_NAMING
#: Types of transformations
TRANSFORMS = TRANSFORMS
#: Default probability segmentations' threshold
PROBSEG_THRESHOLD = PROBSEG_THRESHOLD
def __init__(
self,
base_dir: Path,
participant_labels: Union[str, list] = None,
) -> None:
super().__init__(base_dir, participant_labels)
self.parcellation_manager = parcellation_manager()
def initiate_subject(
self, participant_label: str
) -> Tuple[dict, Path, Path]:
"""
Query initially-required patricipant's files
Parameters
----------
participant_label : str
Specific participant's label to be queried
Returns
-------
Tuple[dict,Path,Path]
A tuple of required files for parcellation registration.
"""
return [
grabber(participant_label, queries=self.QUERIES)
for grabber in [
self.get_transforms,
self.get_reference,
self.get_probseg,
]
]
def build_output_dictionary(
self,
parcellation_scheme: str,
reference: Path,
reference_type: str,
) -> dict:
"""
Based on a *reference* image,
reconstruct output names for native parcellation naming.
Parameters
----------
reference : Path
The reference image.
reference_type : str
The reference image type (either "anat" or "dwi")
Returns
-------
dict
A dictionary with keys of "whole-brain" and "gm-cropped" and their
corresponding paths
"""
basic_query = dict(
atlas=parcellation_scheme,
resolution=reference_type,
**self.DEFAULT_PARCELLATION_NAMING.copy(),
)
outputs = dict()
for key, label in zip(["whole_brain", "gm_cropped"], ["", "GM"]):
query = basic_query.copy()
query["label"] = label
outputs[key] = self.data_grabber.build_path(reference, query)
return outputs
def register_to_anatomical(
self,
parcellation_scheme: str,
participant_label: str,
probseg_threshold: float = None,
force: bool = False,
) -> dict:
"""
Register a *parcellation scheme* from standard to native anatomical space. # noqa
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*.
participant_label : str
Specific participant's label
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to re-write existing files, by default False
Returns
-------
dict
A dictionary with keys of "whole_brain" and "gm_cropped" native-spaced parcellation schemes.
"""
transforms, reference, gm_probseg = self.initiate_subject(
participant_label
)
whole_brain, gm_cropped = [
self.build_output_dictionary(
parcellation_scheme, reference, "anat"
).get(key)
for key in ["whole_brain", "gm_cropped"]
]
self.parcellation_manager.register_parcellation_scheme(
parcellation_scheme,
participant_label,
reference,
transforms.get("mni2native"),
whole_brain,
force=force,
)
self.parcellation_manager.crop_to_probseg(
parcellation_scheme,
participant_label,
whole_brain,
gm_probseg,
gm_cropped,
masking_threshold=probseg_threshold or self.PROBSEG_THRESHOLD,
force=force,
)
return whole_brain, gm_cropped
def register_dwi(
self,
parcellation_scheme: str,
participant_label: str,
session: str,
anatomical_whole_brain: Path,
anatomical_gm_cropped: Path,
force: bool = False,
):
"""
Resample parcellation scheme from anatomical to DWI space.
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : str
Specific participant's label
anatomical_whole_brain : Path
Participant's whole-brain parcellation scheme in anatomical space
anatomical_gm_cropped : Path
Participant's GM-cropped parcellation scheme in anatomical space
force : bool, optional
Whether to re-write existing files, by default False
"""
reference = self.get_reference(
participant_label,
"dwi",
{"session": session},
queries=self.QUERIES,
)
if not reference:
raise FileNotFoundError(
REFERENCE_FILE_MISSING.format(
participant_label=participant_label
)
)
whole_brain, gm_cropped = [
self.build_output_dictionary(
parcellation_scheme, reference, "dwi"
).get(key)
for key in ["whole_brain", "gm_cropped"]
]
for source, target in zip(
[anatomical_whole_brain, anatomical_gm_cropped],
[whole_brain, gm_cropped],
):
if not target.exists() or force:
img = resample_to_img(
str(source), str(reference), interpolation="nearest"
)
nib.save(img, target)
return whole_brain, gm_cropped
def run_single_subject(
self,
parcellation_scheme: str,
participant_label: str,
session: Union[str, list] = None,
probseg_threshold: float = None,
force: bool = False,
) -> dict:
"""
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : str
Specific participant's label
session : Union[str, list], optional
Specific sessions available for *participant_label*, by default None # noqa
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to re-write existing files, by default False
Returns
-------
dict
A dictionary with keys of "anat" and available or requested sessions,
and corresponding natice parcellations as keys.
"""
outputs = {}
anat_whole_brain, anat_gm_cropped = self.register_to_anatomical(
parcellation_scheme, participant_label, probseg_threshold, force
)
outputs["anat"] = {
"whole_brain": anat_whole_brain,
"gm_cropped": anat_gm_cropped,
}
sessions = self.subjects.get(participant_label) or session
if isinstance(sessions, str):
sessions = [sessions]
for session in sessions:
whole_brain, gm_cropped = self.register_dwi(
parcellation_scheme,
participant_label,
session,
anat_whole_brain,
anat_gm_cropped,
force,
)
outputs[session] = {
"whole_brain": whole_brain,
"gm_cropped": gm_cropped,
}
return outputs
def run_dataset(
self,
parcellation_scheme: str,
participant_label: Union[str, list] = None,
probseg_threshold: float = None,
force: bool = False,
):
"""
Register *parcellation_scheme* to all available (or requested) subjects' native space.
Parameters
----------
parcellation_scheme : str
A string representing existing key within *self.parcellation_manager.parcellations*. # noqa
participant_label : Union[str, list], optional
Specific subject/s within the dataset to run, by default None
probseg_threshold : float, optional
Threshold for probability segmentation masking, by default None
force : bool, optional
Whether to remove existing products and generate new ones, by default False # noqa
"""
native_parcellations = {}
if participant_label:
if isinstance(participant_label, str):
participant_labels = [participant_label]
elif isinstance(participant_label, list):
participant_labels = participant_label
else:
participant_labels = list(sorted(self.subjects.keys()))
for participant_label in tqdm(participant_labels):
try:
native_parcellations[
participant_label
] = self.run_single_subject(
parcellation_scheme,
participant_label,
probseg_threshold=probseg_threshold,
force=force,
)
except (FileNotFoundError, TraitError):
continue
return native_parcellations
|
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import repackage
repackage.add("../../azure-enterprise-scale-ml/esml/common/")
import azureml.core
from azureml.core.authentication import AzureCliAuthentication
from esml import ESMLProject
from baselayer_azure_ml import AutoMLFactory, azure_metric_regression,azure_metric_classification
print("SDK Version:", azureml.core.VERSION)
p = ESMLProject.get_project_from_env_command_line()
p.describe()
cli_auth = AzureCliAuthentication()
ws = p.get_workspace_from_config(cli_auth) # Reads the current environment (dev,test, prod)config.json | Use CLI auth if MLOps
p.init(ws) # Automapping from datalake to Azure ML datasets, prints status
# FEATURE ENGINEERING
# Feture engineering: Bronze 2 Gold - working with Azure ML Datasets with Bronze, Silver, Gold concept
print("DEMO MLOPS FOLDER settings - remove this after you copies this folder)") # remove this after you copies this folder
esml_dataset = p.DatasetByName("ds01_diabetes") # Get dataset
df_bronze = esml_dataset.Bronze.to_pandas_dataframe()
p.save_silver(esml_dataset,df_bronze) #Bronze -> Silver
df = esml_dataset.Silver.to_pandas_dataframe()
df_filtered = df[df.AGE > 0.015]
gold_train = p.save_gold(df_filtered) #Silver -> Gold
# SAVE GOLD - Last step that must happen
gold_train = p.save_gold(df_filtered)
|
'''
This file defines the testing module. This needs the following:
1. The system under test
2. The specification or the function which we are trying to minimize
3. Domains of the uncertainities
'''
from .optimizers import *
from .func_tree import *
from .utils import *
from sklearn.decomposition import KernelPCA
import copy
import GPy
class test_module:
def __init__(self, sut, bounds, spec=None,f_tree=None, optimizer=None,
normalizer=False,seed=None, **kwargs):
self.system_under_test = sut
# Choosing the optimizer function
if spec is None:
self.f_acqu = f_tree
else:
self.spec = spec
# To implement parser to convert from specification to the function f
self.bounds = bounds
self.normalizer=normalizer
self.seed=seed
if 'cost_model' in kwargs:
self.cost_model = kwargs['cost_model']
else:
self.cost_model = lambda x: 1
# Choosing the optimizers
if 'opt_name' in kwargs:
self.optimizer = select_opt(kwargs[opt_name])(bounds, **kwargs)
elif optimizer is None:
self.optimizer = sample_opt(bounds=bounds, cost=self.cost_model)
else:
self.optimizer = optimizer
# Number of samples for initializing GPs
if 'init_sample' in kwargs:
self.init_sample = kwargs['init_sample']
else:
self.init_sample = 2*len(bounds)
# Model GPs for the smooth functions
if 'with_smooth' in kwargs:
self.with_smooth = kwargs['with_smooth']
else:
self.with_smooth = True
# Model GPs for the top level requirement, potentially modeling
# non-smooth function
if 'with_ns' in kwargs:
self.with_ns = kwargs['with_ns']
else:
self.with_ns = False
# Random sampling
if 'with_random' in kwargs:
self.with_random = kwargs['with_random']
else:
self.with_random = False
# Exploration weight for GP-LCB
if 'exp_weight' in kwargs:
self.k = kwargs['exp_weight']
else:
self.k = 10
# Optimize retsrats for hyper parameter optimization for GPs
if 'optimize_restarts' in kwargs:
self.optimize_restarts = kwargs['optimize_restarts']
else:
self.optimize_restarts = 1
# Search in lower dimension
if 'low_dim' in kwargs:
self.using_kpca=True
self.low_dim = kwargs['low_dim']
if 'kernel_type' in kwargs:
self.kernel = kwargs['kernel_type'](self.low_dim)
elif 'kernel' in kwargs:
self.kernel = kwargs['kernel']
self.using_kpca = True
self.low_dim = self.kernel.input_dim
else:
self.using_kpca=False
if 'kernel_type' in kwargs:
self.kernel = kwargs['kernel_type'](len(bounds))
else:
self.kernel = GPy.kern.Matern32(len(bounds), ARD=True)
if self.using_kpca:
if isinstance(self.optimizer, lbfgs_opt) or \
isinstance(self.optimizer, direct_opt):
print('Can use only sample_opt or delta_opt!')
print('Changing optimizer to sample_opt!')
self.optimizer = sample_opt(bounds, **kwargs)
# Sending in pre sampled data
if 'X' in kwargs:
self.X = kwargs['X']
else:
self.X = []
def initialize(self):
if len(self.X) == 0:
X = sample_from(self.init_sample, self.bounds)
self.X = X
trajs = []
for x in self.X:
trajs.append(self.system_under_test(x))
Y = self.f_acqu.eval_robustness(trajs)
if self.with_smooth:
self.smooth_X = copy.deepcopy(self.X)
if self.using_kpca:
self.kpca_s = KernelPCA(kernel='rbf', fit_inverse_transform=True,
copy_X=True, n_components=self.low_dim)
X_s = self.kpca_s.fit_transform(self.smooth_X)
else:
X_s = self.smooth_X
self.f_acqu.init_GPs(X_s, trajs,
kernel=copy.deepcopy(self.kernel),
optimize_restarts=self.optimize_restarts,
normalizer=self.normalizer)
if self.with_ns:
self.ns_X = copy.deepcopy(self.X)
if self.using_kpca:
self.kpca_ns = KernelPCA(kernel='rbf', fit_inverse_transform=True,
copy_X=True, n_components=self.low_dim)
X_ns = self.kpca_ns.fit_transform(self.ns_X)
else:
X_ns = copy.deepcopy(self.ns_X)
self.ns_GP = GPy.models.GPRegression(X_ns, Y,
kernel=copy.deepcopy(self.kernel),
normalizer=self.normalizer)
self.ns_GP.optimize_restarts(self.optimize_restarts)
if self.with_random:
self.random_X = copy.deepcopy(self.X)
self.random_Y = Y
def run_BO(self, iters_BO):
for ib in range(iters_BO):
print('BO iteration:', ib)
if self.with_smooth:
def f(x):
if self.using_kpca:
x_s = self.kpca_s.transform(x)
else:
x_s = x
if isinstance(self.optimizer, lbfgs_opt):
df = self.f_acqu.eval_df(x_s, k = self.k)
else:
df=None
return self.f_acqu.evaluate(x_s, k=self.k), df
x,f= self.optimizer.optimize(f=lambda x:f(x)[0],
df = lambda x:f(x)[1])
self.smooth_X = np.vstack((self.smooth_X, np.atleast_2d(x)))
trajs = [self.system_under_test(x_i) for x_i in x]
if self.using_kpca:
X_s = self.kpca_s.fit_transform(self.smooth_X)
else:
X_s = self.smooth_X
self.f_acqu.update_GPs(X_s, trajs,
optimize_restarts=self.optimize_restarts)
if self.with_ns:
def f(X):
if self.using_kpca:
X_ns = self.kpca_ns.transform(X)
else:
X_ns = X
m,v = self.ns_GP.predict(X_ns)
if isinstance(self.optimizer, lbfgs_opt):
dm,dv = self.ns_GP.predictive_gradients(X_ns)
dm = dm[:,:,0]
df = dm - (self.k/2)*(dv/np.sqrt(v))
else:
df =None
return m - self.k*np.sqrt(v), df
x,f = self.optimizer.optimize(f=lambda x: f(x)[0],
df = lambda x:f(x)[1])
trajs = [self.system_under_test(x_i) for x_i in x]
f_x = self.f_acqu.eval_robustness(trajs)
self.ns_X = np.vstack((self.ns_X, np.atleast_2d(x)))
if self.using_kpca:
X_ns = self.kpca_ns.fit_transform(self.ns_X)
else:
X_ns = self.ns_X
self.ns_GP.set_XY(X_ns,
np.vstack((self.ns_GP.Y, np.atleast_2d(f_x))))
self.ns_GP.optimize_restarts(self.optimize_restarts)
if self.with_random:
if self.seed is not None:
np.random.seed(self.seed)
sample_from(self.init_sample, self.bounds)
rand_x = sample_from(iters_BO, self.bounds)
trajs = []
for x in rand_x:
trajs.append(self.system_under_test(x))
self.random_X = np.vstack((self.random_X, rand_x))
rand_y = self.f_acqu.eval_robustness(trajs)
self.random_Y = np.vstack((self.random_Y, rand_y))
if self.with_smooth:
vals = self.f_acqu.find_GP_func()
self.smooth_min_val = np.array(vals).min()
self.smooth_min_loc = np.array(vals).argmin()
self.smooth_min_x = self.smooth_X[self.smooth_min_loc]
self.smooth_count = np.sum(np.array(vals) < 0)
self.smooth_ce = np.flatnonzero(np.array(vals) < 0)
if self.with_ns:
self.ns_min_val = self.ns_GP.Y.min()
self.ns_min_loc = self.ns_GP.Y.argmin()
self.ns_min_x = self.ns_GP.X[self.ns_min_loc]
self.ns_count = np.sum(self.ns_GP.Y < 0)
self.ns_ce = np.flatnonzero(self.ns_GP.Y < 0)
if self.with_random:
self.rand_min_val = self.random_Y.min()
self.rand_min_loc = self.random_Y.argmin()
self.rand_min_x = self.random_X[self.rand_min_loc]
self.rand_count = np.sum(self.random_Y < 0)
self.rand_ce = np.flatnonzero(self.random_Y < 0)
|
import argparse
from suzieq.cli.sqcmds import *
from suzieq.cli.sqcmds import context_commands
from suzieq.cli.sqcmds import sqcmds_all
from suzieq.cli.sq_nubia_context import NubiaSuzieqContext
from suzieq.cli.sq_nubia_statusbar import NubiaSuzieqStatusBar
from nubia import PluginInterface, CompletionDataSource
from nubia.internal.blackcmd import CommandBlacklist
from nubia.internal.cmdbase import AutoCommand
class NubiaSuzieqPlugin(PluginInterface):
"""
The PluginInterface class is a way to customize nubia for every customer
use case. It allowes custom argument validation, control over command
loading, custom context objects, and much more.
"""
def create_context(self):
"""
Must create an object that inherits from `Context` parent class.
The plugin can return a custom context but it has to inherit from the
correct parent class.
"""
return NubiaSuzieqContext()
def validate_args(self, args):
"""
This will be executed when starting nubia, the args passed is a
dict-like object that contains the argparse result after parsing the
command line arguments. The plugin can choose to update the context
with the values, and/or decide to raise `ArgsValidationError` with
the error message.
"""
pass
def get_commands(self):
cmds = [AutoCommand(getattr(globals()[x], x))
for x in sqcmds_all if not x.startswith('_')]
cmds.append(AutoCommand(context_commands.set_ctxt))
cmds.append(AutoCommand(context_commands.clear_ctxt))
return cmds
def get_opts_parser(self, add_help=True):
"""
Builds the ArgumentParser that will be passed to , use this to
build your list of arguments that you want for your shell.
"""
opts_parser = argparse.ArgumentParser(
description="Suzieq CLI",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=add_help,
)
opts_parser.add_argument(
"--config", "-c", default="", type=str, help="Configuration File"
)
opts_parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Increase verbosity, can be specified " "multiple times",
)
opts_parser.add_argument(
"--stderr",
"-s",
action="store_true",
default=True,
help="By default the logging output goes to stderr "
"Enable this feature to send it to a temporary logfile"
)
# we only support pandas now, so we don't want this option
# opts_parser.add_argument(
# "--use-engine", "-e", help="Which analysis engine to use", default="pandas"
# )
return opts_parser
def get_completion_datasource_for_global_argument(self, argument):
if argument == "--config":
return ConfigFileCompletionDataSource()
if argument == "--use-engine":
return ConfigEngineCompletionDataSource()
return None
def create_usage_logger(self, context):
"""
Override this and return you own usage logger.
Must be a subtype of UsageLoggerInterface.
"""
return None
def get_status_bar(self, context):
"""
This returns the StatusBar object that handles the bottom status bar
and the right-side per-line status
"""
return NubiaSuzieqStatusBar(context)
def getBlacklistPlugin(self):
blacklister = CommandBlacklist()
blacklister.add_blocked_command("topcpu")
blacklister.add_blocked_command("topmem")
return blacklister
class ConfigFileCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["/tmp/c1", "/tmp/c2"]
class ConfigEngineCompletionDataSource(CompletionDataSource):
def get_all(self):
return ["pandas"]
|
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from django.urls import reverse
class standard(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(null=True,blank=True)
description = models.TextField(max_length=550,blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def save_subject_image(instance,filename):
upload_to = 'images'
ext = filename.split('.')[-1]
#get filename
if instance.subject_id:
filename = 'Subject_Pictures/{}'.format(instance.subject_id,ext)
return os.path.join(upload_to, filename)
class subject(models.Model):
subject_id =models.CharField(max_length=100,unique=True)
name = models.CharField(max_length=100)
slug = models.SlugField(null=True,blank=True)
standard = models.ForeignKey(standard,on_delete = models.CASCADE, related_name='subjects')
image = models.ImageField(upload_to = save_subject_image,blank=True,verbose_name ='subject image')
description = models.TextField(max_length=550, blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.subject_id)
super().save(*args, **kwargs)
def save_lesson_files(instance,filename):
upload_to = 'images'
ext = filename.split('.')[-1]
#get filename
if instance.lesson_id:
filename = 'lesson_files/{}/{}.{}'.format(instance.lesson_id,instance.lesson_id,ext)
if os.path.exists(filename):
new_name = str(instance.lesson_id) + str('1')
filename = 'lesson_images/{}/{}.{}'.format(instance.lesson_id, new_name,ext)
return os.path.join(upload_to, filename)
class lesson(models.Model):
lesson_id = models.CharField(max_length=100, unique=True)
standard = models.ForeignKey(standard,on_delete=models.CASCADE)
created_by = models.ForeignKey(User,on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
subject= models.ForeignKey(subject, on_delete=models.CASCADE, related_name='lessons')
name = models.CharField(max_length=150)
position = models.PositiveSmallIntegerField(verbose_name= 'Chapter No')
slug = models.SlugField(null=True,blank=True)
video = models.FileField(upload_to=save_lesson_files,verbose_name='video',blank=True,null=True)
ppt = models.FileField(upload_to=save_lesson_files,verbose_name='ppt',blank=True)
notes = models.FileField(upload_to=save_lesson_files,verbose_name='notes',blank=True)
class Meta:
ordering = ['position']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("curriculum:lesson_list", kwargs={'slug':self.subject.slug, 'standard':self.standard.slug})
class comment(models.Model):
lesson_name = models.ForeignKey(lesson,null=True,on_delete=models.CASCADE,related_name='comments')
comment_name = models.CharField(max_length=150, blank=True)
#reply = models.ForeignKey("Comment",null=True, blank = True, on_delete = models.CASCADE, related_name='replies')
author = models.ForeignKey(User,on_delete=models.CASCADE)
body = models.TextField(max_length=500)
date_added = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
self.comment_name = slugify("Comment by"+"-"+str(self.author)+str(self.date_added))
super().save(*args, **kwargs)
def __str__(self):
return self.comment_name
class Meta:
ordering = ['-date_added']
class reply(models.Model):
comment_name = models.ForeignKey(comment,on_delete=models.CASCADE,related_name='replies')
reply_body = models.TextField(max_length=500)
author = models.ForeignKey(User, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "reply to "+str(self.comment_name.comment_name)
|
from django.shortcuts import render
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenview(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authentication user"""
return self.request.user
|
import requests
import numpy as np
import collections
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from PIL import Image
from io import BytesIO
class Image_Data:
image = None
@property
def Array(self) -> np.ndarray:
"""
Return image array (RGB)
"""
return self.image
@property
def Color_Hex(self) -> list:
hex = []
def convert_RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
image = self.image
image_height = len(image)
for y in range(image_height):
for x in image[y]:
hex.append(convert_RGB2HEX(x))
return hex
def __init__(self, image_path: str):
if 'http' in image_path:
# Online image
image_req = requests.get(image_path, stream=True)
if image_req.status_code == 200:
self.image = np.array(Image.open(BytesIO(image_req.content)))
else:
# Local image
self.image = np.array(Image.open(image_path))
def show(self):
Image.fromarray(self.image, 'RGB').show()
class Color:
color = []
@property
def Total(self) -> int:
return len(self.color)
@property
def Count(self) -> dict:
"""
Return total unique color
"""
color_count = dict(collections.Counter(self.color))
# Sort dict by highest value
color_count = {
key: value for key, value in sorted(color_count.items(), key=lambda x: x[1], reverse=True)
}
return color_count
@property
def Listed_Count(self) -> list[dict]:
"""
Return total unique color in list of dictionary
"""
list_colors = []
colors = self.Count.items()
# List each dict item
for key, val in colors:
item = "{'%(key)s' : %(val)s}" % {'key': key, 'val': val}
list_colors.append(eval(item))
return list_colors
def __init__(self, color: list):
self.color = color
def plot(self, min_value = 1):
"""
Plot color data with value more than min_value
"""
color_count = self.Count
color_count = {key : value for key, value in color_count.items() if value >= min_value}
color = list(color_count.keys())
count = list(color_count.values())
bar_colors = color
# Draw plot
#fig_width = len(color)
#fig_height
figure = plt.figure('Color Distribution', tight_layout=True)
plt.barh(color, count, color=bar_colors, edgecolor='#aaaaaa')
plt.title('Color Distribution')
plt.ylabel('Color')
plt.xlabel('Count')
plt.show()
# Render figure
canvas = FigureCanvas(figure)
canvas.draw()
width, height = figure.get_size_inches() * figure.get_dpi()
image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
return image
|
from pymongo import MongoClient
client = MongoClient()
# carPricingDB = client["carPricing"]
# firstOffersCollection = carPricingDB.create_collection("firstOffers")
# firstOffersCollection.insert_one({"item":"initialone"})
carPricingDB = client.carPricing
firstOffersCollection = carPricingDB.firstOffers
firstOffersCollection.insert_one(
{"item": "canvas",
"qty": 100,
"tags": ["cotton"],
"size": {"h": 28, "w": 35.5, "uom": "cm"}})
coss = firstOffersCollection.find({"item":"canvas"})[0]
print(coss)
|
# coding: utf-8
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import print_function
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
try:
import cPickle
except ImportError:
import pickle as cPickle
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
from .imdb import imdb
from .imdb import ROOT_DIR
from . import ds_utils
from .adas_eval import adas_eval
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from model.utils.config import cfg
# <<<< obsolete
class adas(imdb):
def __init__(self, image_set, year, devkit_path=None, sub_type='car'):
imdb.__init__(self, 'adas_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'ADAS' + self._year)
if sub_type == 'car':
self._classes = ('__background__', #always index 0
'car',)
elif sub_type == 'tired':
self._classes = ('__background__', #always index 0
'o','s','w')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'ADASdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /ADASdevkit2007/ADAS2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL ADAS is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'ADASdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
print(cache_file)
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL ADAS
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_adas_results_file_template(self):
# ADASdevkit/results/ADAS2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'ADAS' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_adas_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} ADAS results file'.format(cls))
filename = self._get_adas_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the ADASdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'ADAS' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'ADAS' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_adas_results_file_template().format(cls)
rec, prec, ap = adas_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'ADASdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'adas_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_adas_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_adas_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = adas('trainval', '2017')
res = d.roidb
from IPython import embed;
embed()
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for lots of functionality provided by L{twisted.internet}.
"""
from __future__ import division, absolute_import
import os
import sys
import time
from twisted.python.compat import _PY3
from twisted.trial import unittest
from twisted.internet import reactor, protocol, error, abstract, defer
from twisted.internet import interfaces, base
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
from twisted.internet.defer import Deferred
if not _PY3:
from twisted.python import util
class ThreePhaseEventTests(unittest.TestCase):
"""
Tests for the private implementation helpers for system event triggers.
"""
def setUp(self):
"""
Create a trigger, an argument, and an event to be used by tests.
"""
self.trigger = lambda x: None
self.arg = object()
self.event = base._ThreePhaseEvent()
def test_addInvalidPhase(self):
"""
L{_ThreePhaseEvent.addTrigger} should raise L{KeyError} when called
with an invalid phase.
"""
self.assertRaises(
KeyError,
self.event.addTrigger, 'xxx', self.trigger, self.arg)
def test_addBeforeTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'before'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the before list.
"""
self.event.addTrigger('before', self.trigger, self.arg)
self.assertEqual(
self.event.before,
[(self.trigger, (self.arg,), {})])
def test_addDuringTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'during'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the during list.
"""
self.event.addTrigger('during', self.trigger, self.arg)
self.assertEqual(
self.event.during,
[(self.trigger, (self.arg,), {})])
def test_addAfterTrigger(self):
"""
L{_ThreePhaseEvent.addTrigger} should accept C{'after'} as a phase, a
callable, and some arguments and add the callable with the arguments to
the after list.
"""
self.event.addTrigger('after', self.trigger, self.arg)
self.assertEqual(
self.event.after,
[(self.trigger, (self.arg,), {})])
def test_removeTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should accept an opaque object
previously returned by L{_ThreePhaseEvent.addTrigger} and remove the
associated trigger.
"""
handle = self.event.addTrigger('before', self.trigger, self.arg)
self.event.removeTrigger(handle)
self.assertEqual(self.event.before, [])
def test_removeNonexistentTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} when given
an object not previously returned by L{_ThreePhaseEvent.addTrigger}.
"""
self.assertRaises(ValueError, self.event.removeTrigger, object())
def test_removeRemovedTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} the second
time it is called with an object returned by
L{_ThreePhaseEvent.addTrigger}.
"""
handle = self.event.addTrigger('before', self.trigger, self.arg)
self.event.removeTrigger(handle)
self.assertRaises(ValueError, self.event.removeTrigger, handle)
def test_removeAlmostValidTrigger(self):
"""
L{_ThreePhaseEvent.removeTrigger} should raise L{ValueError} if it is
given a trigger handle which resembles a valid trigger handle aside
from its phase being incorrect.
"""
self.assertRaises(
KeyError,
self.event.removeTrigger, ('xxx', self.trigger, (self.arg,), {}))
def test_fireEvent(self):
"""
L{_ThreePhaseEvent.fireEvent} should call I{before}, I{during}, and
I{after} phase triggers in that order.
"""
events = []
self.event.addTrigger('after', events.append, ('first', 'after'))
self.event.addTrigger('during', events.append, ('first', 'during'))
self.event.addTrigger('before', events.append, ('first', 'before'))
self.event.addTrigger('before', events.append, ('second', 'before'))
self.event.addTrigger('during', events.append, ('second', 'during'))
self.event.addTrigger('after', events.append, ('second', 'after'))
self.assertEqual(events, [])
self.event.fireEvent()
self.assertEqual(events,
[('first', 'before'), ('second', 'before'),
('first', 'during'), ('second', 'during'),
('first', 'after'), ('second', 'after')])
def test_asynchronousBefore(self):
"""
L{_ThreePhaseEvent.fireEvent} should wait for any L{Deferred} returned
by a I{before} phase trigger before proceeding to I{during} events.
"""
events = []
beforeResult = Deferred()
self.event.addTrigger('before', lambda: beforeResult)
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.assertEqual(events, [])
self.event.fireEvent()
self.assertEqual(events, [])
beforeResult.callback(None)
self.assertEqual(events, ['during', 'after'])
def test_beforeTriggerException(self):
"""
If a before-phase trigger raises a synchronous exception, it should be
logged and the remaining triggers should be run.
"""
events = []
class DummyException(Exception):
pass
def raisingTrigger():
raise DummyException()
self.event.addTrigger('before', raisingTrigger)
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', events.append, 'during')
self.event.fireEvent()
self.assertEqual(events, ['before', 'during'])
errors = self.flushLoggedErrors(DummyException)
self.assertEqual(len(errors), 1)
def test_duringTriggerException(self):
"""
If a during-phase trigger raises a synchronous exception, it should be
logged and the remaining triggers should be run.
"""
events = []
class DummyException(Exception):
pass
def raisingTrigger():
raise DummyException()
self.event.addTrigger('during', raisingTrigger)
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.assertEqual(events, ['during', 'after'])
errors = self.flushLoggedErrors(DummyException)
self.assertEqual(len(errors), 1)
def test_synchronousRemoveAlreadyExecutedBefore(self):
"""
If a before-phase trigger tries to remove another before-phase trigger
which has already run, a warning should be emitted.
"""
events = []
def removeTrigger():
self.event.removeTrigger(beforeHandle)
beforeHandle = self.event.addTrigger('before', events.append, ('first', 'before'))
self.event.addTrigger('before', removeTrigger)
self.event.addTrigger('before', events.append, ('second', 'before'))
self.assertWarns(
DeprecationWarning,
"Removing already-fired system event triggers will raise an "
"exception in a future version of Twisted.",
__file__,
self.event.fireEvent)
self.assertEqual(events, [('first', 'before'), ('second', 'before')])
def test_synchronousRemovePendingBefore(self):
"""
If a before-phase trigger removes another before-phase trigger which
has not yet run, the removed trigger should not be run.
"""
events = []
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(beforeHandle))
beforeHandle = self.event.addTrigger(
'before', events.append, ('first', 'before'))
self.event.addTrigger('before', events.append, ('second', 'before'))
self.event.fireEvent()
self.assertEqual(events, [('second', 'before')])
def test_synchronousBeforeRemovesDuring(self):
"""
If a before-phase trigger removes a during-phase trigger, the
during-phase trigger should not be run.
"""
events = []
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(duringHandle))
duringHandle = self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.assertEqual(events, ['after'])
def test_asynchronousBeforeRemovesDuring(self):
"""
If a before-phase trigger returns a L{Deferred} and later removes a
during-phase trigger before the L{Deferred} fires, the during-phase
trigger should not be run.
"""
events = []
beforeResult = Deferred()
self.event.addTrigger('before', lambda: beforeResult)
duringHandle = self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.event.removeTrigger(duringHandle)
beforeResult.callback(None)
self.assertEqual(events, ['after'])
def test_synchronousBeforeRemovesConspicuouslySimilarDuring(self):
"""
If a before-phase trigger removes a during-phase trigger which is
identical to an already-executed before-phase trigger aside from their
phases, no warning should be emitted and the during-phase trigger
should not be run.
"""
events = []
def trigger():
events.append('trigger')
self.event.addTrigger('before', trigger)
self.event.addTrigger(
'before', lambda: self.event.removeTrigger(duringTrigger))
duringTrigger = self.event.addTrigger('during', trigger)
self.event.fireEvent()
self.assertEqual(events, ['trigger'])
def test_synchronousRemovePendingDuring(self):
"""
If a during-phase trigger removes another during-phase trigger which
has not yet run, the removed trigger should not be run.
"""
events = []
self.event.addTrigger(
'during', lambda: self.event.removeTrigger(duringHandle))
duringHandle = self.event.addTrigger(
'during', events.append, ('first', 'during'))
self.event.addTrigger(
'during', events.append, ('second', 'during'))
self.event.fireEvent()
self.assertEqual(events, [('second', 'during')])
def test_triggersRunOnce(self):
"""
A trigger should only be called on the first call to
L{_ThreePhaseEvent.fireEvent}.
"""
events = []
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', events.append, 'during')
self.event.addTrigger('after', events.append, 'after')
self.event.fireEvent()
self.event.fireEvent()
self.assertEqual(events, ['before', 'during', 'after'])
def test_finishedBeforeTriggersCleared(self):
"""
The temporary list L{_ThreePhaseEvent.finishedBefore} should be emptied
and the state reset to C{'BASE'} before the first during-phase trigger
executes.
"""
events = []
def duringTrigger():
events.append('during')
self.assertEqual(self.event.finishedBefore, [])
self.assertEqual(self.event.state, 'BASE')
self.event.addTrigger('before', events.append, 'before')
self.event.addTrigger('during', duringTrigger)
self.event.fireEvent()
self.assertEqual(events, ['before', 'during'])
class SystemEventTests(unittest.TestCase):
"""
Tests for the reactor's implementation of the C{fireSystemEvent},
C{addSystemEventTrigger}, and C{removeSystemEventTrigger} methods of the
L{IReactorCore} interface.
@ivar triggers: A list of the handles to triggers which have been added to
the reactor.
"""
def setUp(self):
"""
Create an empty list in which to store trigger handles.
"""
self.triggers = []
def tearDown(self):
"""
Remove all remaining triggers from the reactor.
"""
while self.triggers:
trigger = self.triggers.pop()
try:
reactor.removeSystemEventTrigger(trigger)
except (ValueError, KeyError):
pass
def addTrigger(self, event, phase, func):
"""
Add a trigger to the reactor and remember it in C{self.triggers}.
"""
t = reactor.addSystemEventTrigger(event, phase, func)
self.triggers.append(t)
return t
def removeTrigger(self, trigger):
"""
Remove a trigger by its handle from the reactor and from
C{self.triggers}.
"""
reactor.removeSystemEventTrigger(trigger)
self.triggers.remove(trigger)
def _addSystemEventTriggerTest(self, phase):
eventType = 'test'
events = []
def trigger():
events.append(None)
self.addTrigger(phase, eventType, trigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [None])
def test_beforePhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'before'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('before')
def test_duringPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'during'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('during')
def test_afterPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should accept the C{'after'}
phase and not call the given object until the right event is fired.
"""
self._addSystemEventTriggerTest('after')
def test_unknownPhase(self):
"""
L{IReactorCore.addSystemEventTrigger} should reject phases other than
C{'before'}, C{'during'}, or C{'after'}.
"""
eventType = 'test'
self.assertRaises(
KeyError, self.addTrigger, 'xxx', eventType, lambda: None)
def test_beforePreceedsDuring(self):
"""
L{IReactorCore.addSystemEventTrigger} should call triggers added to the
C{'before'} phase before it calls triggers added to the C{'during'}
phase.
"""
eventType = 'test'
events = []
def beforeTrigger():
events.append('before')
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, beforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['before', 'during'])
def test_duringPreceedsAfter(self):
"""
L{IReactorCore.addSystemEventTrigger} should call triggers added to the
C{'during'} phase before it calls triggers added to the C{'after'}
phase.
"""
eventType = 'test'
events = []
def duringTrigger():
events.append('during')
def afterTrigger():
events.append('after')
self.addTrigger('during', eventType, duringTrigger)
self.addTrigger('after', eventType, afterTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['during', 'after'])
def test_beforeReturnsDeferred(self):
"""
If a trigger added to the C{'before'} phase of an event returns a
L{Deferred}, the C{'during'} phase should be delayed until it is called
back.
"""
triggerDeferred = Deferred()
eventType = 'test'
events = []
def beforeTrigger():
return triggerDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, beforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [])
triggerDeferred.callback(None)
self.assertEqual(events, ['during'])
def test_multipleBeforeReturnDeferred(self):
"""
If more than one trigger added to the C{'before'} phase of an event
return L{Deferred}s, the C{'during'} phase should be delayed until they
are all called back.
"""
firstDeferred = Deferred()
secondDeferred = Deferred()
eventType = 'test'
events = []
def firstBeforeTrigger():
return firstDeferred
def secondBeforeTrigger():
return secondDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.addTrigger('before', eventType, secondBeforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, [])
firstDeferred.callback(None)
self.assertEqual(events, [])
secondDeferred.callback(None)
self.assertEqual(events, ['during'])
def test_subsequentBeforeTriggerFiresPriorBeforeDeferred(self):
"""
If a trigger added to the C{'before'} phase of an event calls back a
L{Deferred} returned by an earlier trigger in the C{'before'} phase of
the same event, the remaining C{'before'} triggers for that event
should be run and any further L{Deferred}s waited on before proceeding
to the C{'during'} events.
"""
eventType = 'test'
events = []
firstDeferred = Deferred()
secondDeferred = Deferred()
def firstBeforeTrigger():
return firstDeferred
def secondBeforeTrigger():
firstDeferred.callback(None)
def thirdBeforeTrigger():
events.append('before')
return secondDeferred
def duringTrigger():
events.append('during')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.addTrigger('before', eventType, secondBeforeTrigger)
self.addTrigger('before', eventType, thirdBeforeTrigger)
self.addTrigger('during', eventType, duringTrigger)
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['before'])
secondDeferred.callback(None)
self.assertEqual(events, ['before', 'during'])
def test_removeSystemEventTrigger(self):
"""
A trigger removed with L{IReactorCore.removeSystemEventTrigger} should
not be called when the event fires.
"""
eventType = 'test'
events = []
def firstBeforeTrigger():
events.append('first')
def secondBeforeTrigger():
events.append('second')
self.addTrigger('before', eventType, firstBeforeTrigger)
self.removeTrigger(
self.addTrigger('before', eventType, secondBeforeTrigger))
self.assertEqual(events, [])
reactor.fireSystemEvent(eventType)
self.assertEqual(events, ['first'])
def test_removeNonExistentSystemEventTrigger(self):
"""
Passing an object to L{IReactorCore.removeSystemEventTrigger} which was
not returned by a previous call to
L{IReactorCore.addSystemEventTrigger} or which has already been passed
to C{removeSystemEventTrigger} should result in L{TypeError},
L{KeyError}, or L{ValueError} being raised.
"""
b = self.addTrigger('during', 'test', lambda: None)
self.removeTrigger(b)
self.assertRaises(
TypeError, reactor.removeSystemEventTrigger, None)
self.assertRaises(
ValueError, reactor.removeSystemEventTrigger, b)
self.assertRaises(
KeyError,
reactor.removeSystemEventTrigger,
(b[0], ('xxx',) + b[1][1:]))
def test_interactionBetweenDifferentEvents(self):
"""
L{IReactorCore.fireSystemEvent} should behave the same way for a
particular system event regardless of whether Deferreds are being
waited on for a different system event.
"""
events = []
firstEvent = 'first-event'
firstDeferred = Deferred()
def beforeFirstEvent():
events.append(('before', 'first'))
return firstDeferred
def afterFirstEvent():
events.append(('after', 'first'))
secondEvent = 'second-event'
secondDeferred = Deferred()
def beforeSecondEvent():
events.append(('before', 'second'))
return secondDeferred
def afterSecondEvent():
events.append(('after', 'second'))
self.addTrigger('before', firstEvent, beforeFirstEvent)
self.addTrigger('after', firstEvent, afterFirstEvent)
self.addTrigger('before', secondEvent, beforeSecondEvent)
self.addTrigger('after', secondEvent, afterSecondEvent)
self.assertEqual(events, [])
# After this, firstEvent should be stuck before 'during' waiting for
# firstDeferred.
reactor.fireSystemEvent(firstEvent)
self.assertEqual(events, [('before', 'first')])
# After this, secondEvent should be stuck before 'during' waiting for
# secondDeferred.
reactor.fireSystemEvent(secondEvent)
self.assertEqual(events, [('before', 'first'), ('before', 'second')])
# After this, firstEvent should have finished completely, but
# secondEvent should be at the same place.
firstDeferred.callback(None)
self.assertEqual(events, [('before', 'first'), ('before', 'second'),
('after', 'first')])
# After this, secondEvent should have finished completely.
secondDeferred.callback(None)
self.assertEqual(events, [('before', 'first'), ('before', 'second'),
('after', 'first'), ('after', 'second')])
class TimeTests(unittest.TestCase):
"""
Tests for the IReactorTime part of the reactor.
"""
def test_seconds(self):
"""
L{twisted.internet.reactor.seconds} should return something
like a number.
1. This test specifically does not assert any relation to the
"system time" as returned by L{time.time} or
L{twisted.python.runtime.seconds}, because at some point we
may find a better option for scheduling calls than
wallclock-time.
2. This test *also* does not assert anything about the type of
the result, because operations may not return ints or
floats: For example, datetime-datetime == timedelta(0).
"""
now = reactor.seconds()
self.assertEqual(now-now+now, now)
def test_callLaterUsesReactorSecondsInDelayedCall(self):
"""
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
should use the reactor's seconds factory
to produce the time at which the DelayedCall will be called.
"""
oseconds = reactor.seconds
reactor.seconds = lambda: 100
try:
call = reactor.callLater(5, lambda: None)
self.assertEqual(call.getTime(), 105)
finally:
reactor.seconds = oseconds
def test_callLaterUsesReactorSecondsAsDelayedCallSecondsFactory(self):
"""
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
should propagate its own seconds factory
to the DelayedCall to use as its own seconds factory.
"""
oseconds = reactor.seconds
reactor.seconds = lambda: 100
try:
call = reactor.callLater(5, lambda: None)
self.assertEqual(call.seconds(), 100)
finally:
reactor.seconds = oseconds
def test_callLater(self):
"""
Test that a DelayedCall really calls the function it is
supposed to call.
"""
d = Deferred()
reactor.callLater(0, d.callback, None)
d.addCallback(self.assertEqual, None)
return d
def test_cancelDelayedCall(self):
"""
Test that when a DelayedCall is cancelled it does not run.
"""
called = []
def function():
called.append(None)
call = reactor.callLater(0, function)
call.cancel()
# Schedule a call in two "iterations" to check to make sure that the
# above call never ran.
d = Deferred()
def check():
try:
self.assertEqual(called, [])
except:
d.errback()
else:
d.callback(None)
reactor.callLater(0, reactor.callLater, 0, check)
return d
def test_cancelCancelledDelayedCall(self):
"""
Test that cancelling a DelayedCall which has already been cancelled
raises the appropriate exception.
"""
call = reactor.callLater(0, lambda: None)
call.cancel()
self.assertRaises(error.AlreadyCancelled, call.cancel)
def test_cancelCalledDelayedCallSynchronous(self):
"""
Test that cancelling a DelayedCall in the DelayedCall's function as
that function is being invoked by the DelayedCall raises the
appropriate exception.
"""
d = Deferred()
def later():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
call = reactor.callLater(0, later)
return d
def test_cancelCalledDelayedCallAsynchronous(self):
"""
Test that cancelling a DelayedCall after it has run its function
raises the appropriate exception.
"""
d = Deferred()
def check():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
def later():
reactor.callLater(0, check)
call = reactor.callLater(0, later)
return d
def testCallLaterTime(self):
d = reactor.callLater(10, lambda: None)
try:
self.assertTrue(d.getTime() - (time.time() + 10) < 1)
finally:
d.cancel()
def testDelayedCallStringification(self):
# Mostly just make sure str() isn't going to raise anything for
# DelayedCalls within reason.
dc = reactor.callLater(0, lambda x, y: None, 'x', y=10)
str(dc)
dc.reset(5)
str(dc)
dc.cancel()
str(dc)
dc = reactor.callLater(0, lambda: None, x=[({'hello': u'world'}, 10j), reactor], *range(10))
str(dc)
dc.cancel()
str(dc)
def calledBack(ignored):
str(dc)
d = Deferred().addCallback(calledBack)
dc = reactor.callLater(0, d.callback, None)
str(dc)
return d
def testDelayedCallSecondsOverride(self):
"""
Test that the C{seconds} argument to DelayedCall gets used instead of
the default timing function, if it is not None.
"""
def seconds():
return 10
dc = base.DelayedCall(5, lambda: None, (), {}, lambda dc: None,
lambda dc: None, seconds)
self.assertEqual(dc.getTime(), 5)
dc.reset(3)
self.assertEqual(dc.getTime(), 13)
class CallFromThreadStopsAndWakeUpTests(unittest.TestCase):
def testWakeUp(self):
# Make sure other threads can wake up the reactor
d = Deferred()
def wake():
time.sleep(0.1)
# callFromThread will call wakeUp for us
reactor.callFromThread(d.callback, None)
reactor.callInThread(wake)
return d
if interfaces.IReactorThreads(reactor, None) is None:
testWakeUp.skip = "Nothing to wake up for without thread support"
def _stopCallFromThreadCallback(self):
self.stopped = True
def _callFromThreadCallback(self, d):
reactor.callFromThread(self._callFromThreadCallback2, d)
reactor.callLater(0, self._stopCallFromThreadCallback)
def _callFromThreadCallback2(self, d):
try:
self.assertTrue(self.stopped)
except:
# Send the error to the deferred
d.errback()
else:
d.callback(None)
def testCallFromThreadStops(self):
"""
Ensure that callFromThread from inside a callFromThread
callback doesn't sit in an infinite loop and lets other
things happen too.
"""
self.stopped = False
d = defer.Deferred()
reactor.callFromThread(self._callFromThreadCallback, d)
return d
class DelayedTests(unittest.TestCase):
def setUp(self):
self.finished = 0
self.counter = 0
self.timers = {}
self.deferred = defer.Deferred()
def tearDown(self):
for t in self.timers.values():
t.cancel()
def checkTimers(self):
l1 = self.timers.values()
l2 = list(reactor.getDelayedCalls())
# There should be at least the calls we put in. There may be other
# calls that are none of our business and that we should ignore,
# though.
missing = []
for dc in l1:
if dc not in l2:
missing.append(dc)
if missing:
self.finished = 1
self.assertFalse(missing, "Should have been missing no calls, instead "
+ "was missing " + repr(missing))
def callback(self, tag):
del self.timers[tag]
self.checkTimers()
def addCallback(self, tag):
self.callback(tag)
self.addTimer(15, self.callback)
def done(self, tag):
self.finished = 1
self.callback(tag)
self.deferred.callback(None)
def addTimer(self, when, callback):
self.timers[self.counter] = reactor.callLater(when * 0.01, callback,
self.counter)
self.counter += 1
self.checkTimers()
def testGetDelayedCalls(self):
if not hasattr(reactor, "getDelayedCalls"):
return
# This is not a race because we don't do anything which might call
# the reactor until we have all the timers set up. If we did, this
# test might fail on slow systems.
self.checkTimers()
self.addTimer(35, self.done)
self.addTimer(20, self.callback)
self.addTimer(30, self.callback)
which = self.counter
self.addTimer(29, self.callback)
self.addTimer(25, self.addCallback)
self.addTimer(26, self.callback)
self.timers[which].cancel()
del self.timers[which]
self.checkTimers()
self.deferred.addCallback(lambda x : self.checkTimers())
return self.deferred
def test_active(self):
"""
L{IDelayedCall.active} returns False once the call has run.
"""
dcall = reactor.callLater(0.01, self.deferred.callback, True)
self.assertTrue(dcall.active())
def checkDeferredCall(success):
self.assertFalse(dcall.active())
return success
self.deferred.addCallback(checkDeferredCall)
return self.deferred
resolve_helper = """
from __future__ import print_function
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
class Foo:
def __init__(self):
reactor.callWhenRunning(self.start)
self.timer = reactor.callLater(3, self.failed)
def start(self):
reactor.resolve('localhost').addBoth(self.done)
def done(self, res):
print('done', res)
reactor.stop()
def failed(self):
print('failed')
self.timer = None
reactor.stop()
f = Foo()
reactor.run()
"""
class ChildResolveProtocol(protocol.ProcessProtocol):
def __init__(self, onCompletion):
self.onCompletion = onCompletion
def connectionMade(self):
self.output = []
self.error = []
def outReceived(self, out):
self.output.append(out)
def errReceived(self, err):
self.error.append(err)
def processEnded(self, reason):
self.onCompletion.callback((reason, self.output, self.error))
self.onCompletion = None
class ResolveTests(unittest.TestCase):
def testChildResolve(self):
# I've seen problems with reactor.run under gtk2reactor. Spawn a
# child which just does reactor.resolve after the reactor has
# started, fail if it does not complete in a timely fashion.
helperPath = os.path.abspath(self.mktemp())
with open(helperPath, 'w') as helperFile:
# Eeueuuggg
reactorName = reactor.__module__
helperFile.write(resolve_helper % {'reactor': reactorName})
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
helperDeferred = Deferred()
helperProto = ChildResolveProtocol(helperDeferred)
reactor.spawnProcess(helperProto, sys.executable, ("python", "-u", helperPath), env)
def cbFinished(result):
(reason, output, error) = result
# If the output is "done 127.0.0.1\n" we don't really care what
# else happened.
output = b''.join(output)
if output != b'done 127.0.0.1\n':
self.fail((
"The child process failed to produce the desired results:\n"
" Reason for termination was: %r\n"
" Output stream was: %r\n"
" Error stream was: %r\n") % (reason.getErrorMessage(), output, b''.join(error)))
helperDeferred.addCallback(cbFinished)
return helperDeferred
if not interfaces.IReactorProcess(reactor, None):
ResolveTests.skip = (
"cannot run test: reactor doesn't support IReactorProcess")
class CallFromThreadTests(unittest.TestCase):
"""
Task scheduling from threads tests.
"""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "Nothing to test without thread support"
def setUp(self):
self.counter = 0
self.deferred = Deferred()
def schedule(self, *args, **kwargs):
"""
Override in subclasses.
"""
reactor.callFromThread(*args, **kwargs)
def test_lotsOfThreadsAreScheduledCorrectly(self):
"""
L{IReactorThreads.callFromThread} can be used to schedule a large
number of calls in the reactor thread.
"""
def addAndMaybeFinish():
self.counter += 1
if self.counter == 100:
self.deferred.callback(True)
for i in range(100):
self.schedule(addAndMaybeFinish)
return self.deferred
def test_threadsAreRunInScheduledOrder(self):
"""
Callbacks should be invoked in the order they were scheduled.
"""
order = []
def check(_):
self.assertEqual(order, [1, 2, 3])
self.deferred.addCallback(check)
self.schedule(order.append, 1)
self.schedule(order.append, 2)
self.schedule(order.append, 3)
self.schedule(reactor.callFromThread, self.deferred.callback, None)
return self.deferred
def test_scheduledThreadsNotRunUntilReactorRuns(self):
"""
Scheduled tasks should not be run until the reactor starts running.
"""
def incAndFinish():
self.counter = 1
self.deferred.callback(True)
self.schedule(incAndFinish)
# Callback shouldn't have fired yet.
self.assertEqual(self.counter, 0)
return self.deferred
class MyProtocol(protocol.Protocol):
"""
Sample protocol.
"""
class MyFactory(protocol.Factory):
"""
Sample factory.
"""
protocol = MyProtocol
class ProtocolTests(unittest.TestCase):
def testFactory(self):
factory = MyFactory()
protocol = factory.buildProtocol(None)
self.assertEqual(protocol.factory, factory)
self.assertIsInstance(protocol, factory.protocol)
class DummyProducer(object):
"""
Very uninteresting producer implementation used by tests to ensure the
right methods are called by the consumer with which it is registered.
@type events: C{list} of C{str}
@ivar events: The producer/consumer related events which have happened to
this producer. Strings in this list may be C{'resume'}, C{'stop'}, or
C{'pause'}. Elements are added as they occur.
"""
def __init__(self):
self.events = []
def resumeProducing(self):
self.events.append('resume')
def stopProducing(self):
self.events.append('stop')
def pauseProducing(self):
self.events.append('pause')
class SillyDescriptor(abstract.FileDescriptor):
"""
A descriptor whose data buffer gets filled very fast.
Useful for testing FileDescriptor's IConsumer interface, since
the data buffer fills as soon as at least four characters are
written to it, and gets emptied in a single doWrite() cycle.
"""
bufferSize = 3
connected = True
def writeSomeData(self, data):
"""
Always write all data.
"""
return len(data)
def startWriting(self):
"""
Do nothing: bypass the reactor.
"""
stopWriting = startWriting
class ReentrantProducer(DummyProducer):
"""
Similar to L{DummyProducer}, but with a resumeProducing method which calls
back into an L{IConsumer} method of the consumer against which it is
registered.
@ivar consumer: The consumer with which this producer has been or will
be registered.
@ivar methodName: The name of the method to call on the consumer inside
C{resumeProducing}.
@ivar methodArgs: The arguments to pass to the consumer method invoked in
C{resumeProducing}.
"""
def __init__(self, consumer, methodName, *methodArgs):
super(ReentrantProducer, self).__init__()
self.consumer = consumer
self.methodName = methodName
self.methodArgs = methodArgs
def resumeProducing(self):
super(ReentrantProducer, self).resumeProducing()
getattr(self.consumer, self.methodName)(*self.methodArgs)
class ProducerTests(unittest.TestCase):
"""
Test abstract.FileDescriptor's consumer interface.
"""
def test_doubleProducer(self):
"""
Verify that registering a non-streaming producer invokes its
resumeProducing() method and that you can only register one producer
at a time.
"""
fd = abstract.FileDescriptor()
fd.connected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEqual(dp.events, ['resume'])
self.assertRaises(RuntimeError, fd.registerProducer, DummyProducer(), 0)
def test_unconnectedFileDescriptor(self):
"""
Verify that registering a producer when the connection has already
been closed invokes its stopProducing() method.
"""
fd = abstract.FileDescriptor()
fd.disconnected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEqual(dp.events, ['stop'])
def _dontPausePullConsumerTest(self, methodName):
"""
Pull consumers don't get their C{pauseProducing} method called if the
descriptor buffer fills up.
@param _methodName: Either 'write', or 'writeSequence', indicating
which transport method to write data to.
"""
descriptor = SillyDescriptor()
producer = DummyProducer()
descriptor.registerProducer(producer, streaming=False)
self.assertEqual(producer.events, ['resume'])
del producer.events[:]
# Fill up the descriptor's write buffer so we can observe whether or
# not it pauses its producer in that case.
if methodName == "writeSequence":
descriptor.writeSequence([b'1', b'2', b'3', b'4'])
else:
descriptor.write(b'1234')
self.assertEqual(producer.events, [])
def test_dontPausePullConsumerOnWrite(self):
"""
Verify that FileDescriptor does not call producer.pauseProducing() on a
non-streaming pull producer in response to a L{IConsumer.write} call
which results in a full write buffer. Issue #2286.
"""
return self._dontPausePullConsumerTest('write')
def test_dontPausePullConsumerOnWriteSequence(self):
"""
Like L{test_dontPausePullConsumerOnWrite}, but for a call to
C{writeSequence} rather than L{IConsumer.write}.
C{writeSequence} is not part of L{IConsumer}, but
L{abstract.FileDescriptor} has supported consumery behavior in response
to calls to L{writeSequence} forever.
"""
return self._dontPausePullConsumerTest('writeSequence')
def _reentrantStreamingProducerTest(self, methodName):
descriptor = SillyDescriptor()
if methodName == "writeSequence":
data = [b's', b'p', b'am']
else:
data = b"spam"
producer = ReentrantProducer(descriptor, methodName, data)
descriptor.registerProducer(producer, streaming=True)
# Start things off by filling up the descriptor's buffer so it will
# pause its producer.
getattr(descriptor, methodName)(data)
# Sanity check - make sure that worked.
self.assertEqual(producer.events, ['pause'])
del producer.events[:]
# After one call to doWrite, the buffer has been emptied so the
# FileDescriptor should resume its producer. That will result in an
# immediate call to FileDescriptor.write which will again fill the
# buffer and result in the producer being paused.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
del producer.events[:]
# After a second call to doWrite, the exact same thing should have
# happened. Prior to the bugfix for which this test was written,
# FileDescriptor would have incorrectly believed its producer was
# already resumed (it was paused) and so not resume it again.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
def test_reentrantStreamingProducerUsingWrite(self):
"""
Verify that FileDescriptor tracks producer's paused state correctly.
Issue #811, fixed in revision r12857.
"""
return self._reentrantStreamingProducerTest('write')
def test_reentrantStreamingProducerUsingWriteSequence(self):
"""
Like L{test_reentrantStreamingProducerUsingWrite}, but for calls to
C{writeSequence}.
C{writeSequence} is B{not} part of L{IConsumer}, however
C{abstract.FileDescriptor} has supported consumery behavior in response
to calls to C{writeSequence} forever.
"""
return self._reentrantStreamingProducerTest('writeSequence')
class PortStringificationTests(unittest.TestCase):
if interfaces.IReactorTCP(reactor, None) is not None:
def testTCP(self):
p = reactor.listenTCP(0, protocol.ServerFactory())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorUDP(reactor, None) is not None:
def testUDP(self):
p = reactor.listenUDP(0, protocol.DatagramProtocol())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorSSL(reactor, None) is not None and ssl:
def testSSL(self, ssl=ssl):
pem = util.sibpath(__file__, 'server.pem')
p = reactor.listenSSL(0, protocol.ServerFactory(), ssl.DefaultOpenSSLContextFactory(pem, pem))
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if _PY3:
testSSL.skip = ("Re-enable once the Python 3 SSL port is done.")
|
import setuptools
with open('README.md') as file:
readme = file.read()
name = 'aio4chan'
module = __import__(name)
version = module.__version__
author = 'Exahilosys'
url = f'https://github.com/{author}/{name}'
download_url = f'{url}/archive/v{version}.tar.gz'
setuptools.setup(
name = name,
version = version,
author = author,
url = url,
download_url = download_url,
packages = setuptools.find_packages(),
license = 'MIT',
description = 'API wrapper for 4chan.',
long_description = readme,
long_description_content_type = 'text/markdown',
include_package_data = True,
install_requires = ['aiohttp'],
py_modules = [name],
classifiers = [
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
|
# coding: utf-8
"""
Signing Today Web
*Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import signing_today_client
from signing_today_client.api.bit4id_pathgroup_digital_signature_transactions_api import Bit4idPathgroupDigitalSignatureTransactionsApi # noqa: E501
from signing_today_client.rest import ApiException
class TestBit4idPathgroupDigitalSignatureTransactionsApi(unittest.TestCase):
"""Bit4idPathgroupDigitalSignatureTransactionsApi unit test stubs"""
def setUp(self):
self.api = signing_today_client.api.bit4id_pathgroup_digital_signature_transactions_api.Bit4idPathgroupDigitalSignatureTransactionsApi() # noqa: E501
def tearDown(self):
pass
def test_d_s_ts_get(self):
"""Test case for d_s_ts_get
Retrieve DSTs # noqa: E501
"""
pass
def test_d_s_ts_post(self):
"""Test case for d_s_ts_post
Create a new DST # noqa: E501
"""
pass
def test_d_st_id_audit_get(self):
"""Test case for d_st_id_audit_get
Retrieve the audit records associated to the DST # noqa: E501
"""
pass
def test_d_st_id_delete(self):
"""Test case for d_st_id_delete
Delete a DST # noqa: E501
"""
pass
def test_d_st_id_fill_patch(self):
"""Test case for d_st_id_fill_patch
Fill a form of a DST # noqa: E501
"""
pass
def test_d_st_id_get(self):
"""Test case for d_st_id_get
Retrieve a DST # noqa: E501
"""
pass
def test_d_st_id_instantiate_post(self):
"""Test case for d_st_id_instantiate_post
Instantiate a DST from a template # noqa: E501
"""
pass
def test_d_st_id_modify_post(self):
"""Test case for d_st_id_modify_post
Modify a published DST template # noqa: E501
"""
pass
def test_d_st_id_notify_post(self):
"""Test case for d_st_id_notify_post
Send notifications for a DST # noqa: E501
"""
pass
def test_d_st_id_publish_post(self):
"""Test case for d_st_id_publish_post
Publish a DST # noqa: E501
"""
pass
def test_d_st_id_put(self):
"""Test case for d_st_id_put
Update a DST # noqa: E501
"""
pass
def test_d_st_id_replace_post(self):
"""Test case for d_st_id_replace_post
Replace a rejected DST # noqa: E501
"""
pass
def test_d_st_id_sign_doc_id_sign_id_get(self):
"""Test case for d_st_id_sign_doc_id_sign_id_get
Return the address for signing # noqa: E501
"""
pass
def test_d_st_id_templatize_post(self):
"""Test case for d_st_id_templatize_post
Create a template from a DST # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
import os
import sys
import traceback
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec
from _pydev_bundle._pydev_calltip_util import get_description
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import (IS_JYTHON, dict_iter_items, NEXT_VALUE_SEPARATOR, Null,
get_global_debugger)
import signal
from contextlib import contextmanager
from _pydev_bundle import pydev_log
try:
import cStringIO as StringIO # may not always be available @UnusedImport
except:
try:
import StringIO # @Reimport
except:
import io as StringIO
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = sys.stdin.errors # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
# Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
else:
# readline should end with '\n' (not doing so makes IPython 5 remove the last *valid* character).
requested_input += '\n'
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, py_db, original_stdin):
'''
:param py_db:
If None, get_global_debugger() is used.
'''
BaseStdIn.__init__(self, original_stdin)
self._py_db = py_db
self._in_notification = 0
def __send_input_requested_message(self, is_started):
try:
py_db = self._py_db
if py_db is None:
py_db = get_global_debugger()
cmd = py_db.cmd_factory.make_input_requested_message(is_started)
py_db.writer.add_command(cmd)
except Exception:
pydev_log.exception()
@contextmanager
def notify_input_requested(self):
self._in_notification += 1
if self._in_notification == 1:
self.__send_input_requested_message(True)
try:
yield
finally:
self._in_notification -= 1
if self._in_notification == 0:
self.__send_input_requested_message(False)
def readline(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.readline(*args, **kwargs)
def read(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.read(*args, **kwargs)
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread, connect_status_queue=None):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = None
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.mpl_modules_for_patching = {}
self.init_mpl_modules_for_patching()
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
def init_mpl_modules_for_patching(self):
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
self.mpl_modules_for_patching = {
"matplotlib": lambda: activate_matplotlib(self.enableGui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab
}
def need_more_for_code(self, source):
# PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations
# Strangely even the IPython console is_complete said it was complete
# even with a continuation char at the end.
if source.endswith('\\'):
return True
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
# At this point, it should always be single.
# If we don't do this, things as:
#
# for i in range(10): print(i)
#
# (in a single line) don't work.
# Note that it won't give an error and code will be None (so, it'll
# use execMultipleLines in the next call in this case).
symbol = 'single'
code = self.interpreter.compile(source, '<input>', symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def need_more(self, code_fragment):
if self.buffer is None:
self.buffer = code_fragment
else:
self.buffer.append(code_fragment)
return self.need_more_for_code(self.buffer.text)
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.host, self.client_port, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(py_db=debugger, original_stdin=original_std_in)
def add_exec(self, code_fragment, debugger=None):
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] # Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
# You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
# Just ignore any error here
pass
more = False
try:
sys.stdin = self.create_std_in(debugger, original_in)
try:
if help is not None:
# This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
traceback.print_exc()
try:
self.start_exec()
if hasattr(self, 'debugger'):
self.debugger.enable_tracing()
more = self.do_add_exec(code_fragment)
if hasattr(self, 'debugger'):
self.debugger.disable_tracing()
self.finish_exec(more)
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
traceback.print_exc()
finally:
sys.__excepthook__ = sys.excepthook
return more
def do_add_exec(self, codeFragment):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def get_namespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def __resolve_reference__(self, text):
"""
:type text: str
"""
obj = None
if '.' not in text:
try:
obj = self.get_namespace()[text]
except KeyError:
pass
if obj is None:
try:
obj = self.get_namespace()['__builtins__'][text]
except:
pass
if obj is None:
try:
obj = getattr(self.get_namespace()['__builtins__'], text, None)
except:
pass
else:
try:
last_dot = text.rindex('.')
parent_context = text[0:last_dot]
res = pydevd_vars.eval_in_context(parent_context, self.get_namespace(), self.get_namespace())
obj = getattr(res, text[last_dot + 1:])
except:
pass
return obj
def getDescription(self, text):
try:
obj = self.__resolve_reference__(text)
if obj is None:
return ''
return get_description(obj)
except:
return ''
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
return self.do_exec_code(line, True)
def execMultipleLines(self, lines):
if IS_JYTHON:
more = False
for line in lines.split('\n'):
more = self.do_exec_code(line, True)
return more
else:
return self.do_exec_code(lines, False)
def interrupt(self):
self.buffer = None # Also clear the buffer when it's interrupted.
try:
if self.interruptable:
called = False
try:
# Fix for #PyDev-500: Console interrupt can't interrupt on sleep
if os.name == 'posix':
# On Linux we can't interrupt 0 as in Windows because it's
# actually owned by a process -- on the good side, signals
# work much better on Linux!
os.kill(os.getpid(), signal.SIGINT)
called = True
elif os.name == 'nt':
# Stupid windows: sending a Ctrl+C to a process given its pid
# is absurdly difficult.
# There are utilities to make it work such as
# http://www.latenighthacking.com/projects/2003/sendSignal/
# but fortunately for us, it seems Python does allow a CTRL_C_EVENT
# for the current process in Windows if pid 0 is passed... if we needed
# to send a signal to another process the approach would be
# much more difficult.
# Still, note that CTRL_C_EVENT is only Python 2.7 onwards...
# Also, this doesn't seem to be documented anywhere!? (stumbled
# upon it by chance after digging quite a lot).
os.kill(0, signal.CTRL_C_EVENT)
called = True
except:
# Many things to go wrong (from CTRL_C_EVENT not being there
# to failing import signal)... if that's the case, ask for
# forgiveness and go on to the approach which will interrupt
# the main thread (but it'll only work when it's executing some Python
# code -- not on sleep() for instance).
pass
if not called:
if hasattr(thread, 'interrupt_main'): # Jython doesn't have it
thread.interrupt_main()
else:
self.mainThread._thread.interrupt() # Jython
self.finish_exec(False)
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def start_exec(self):
self.interruptable = True
def get_server(self):
if getattr(self, 'host', None) is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.ShowConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished(more)
else:
return True
def getFrame(self):
xml = StringIO.StringIO()
hidden_ns = self.get_ipython_hidden_vars_dict()
xml.write("<xml>")
xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns))
xml.write("</xml>")
return xml.getvalue()
def getVariable(self, attributes):
xml = StringIO.StringIO()
xml.write("<xml>")
val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes)
if val_dict is None:
val_dict = {}
keys = val_dict.keys()
for k in keys:
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
return xml.getvalue()
def getArray(self, attr, roffset, coffset, rows, cols, format):
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format)
def evaluate(self, expression):
xml = StringIO.StringIO()
xml.write("<xml>")
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression))
xml.write("</xml>")
return xml.getvalue()
def loadFullValue(self, seq, scope_attrs):
"""
Evaluate full value for async Console variables in a separate thread and send results to IDE side
:param seq: id of command
:param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR
(i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2)
:return:
"""
frame_variables = self.get_namespace()
var_objects = []
vars = scope_attrs.split(NEXT_VALUE_SEPARATOR)
for var_attrs in vars:
if '\t' in var_attrs:
name, attrs = var_attrs.split('\t', 1)
else:
name = var_attrs
attrs = None
if name in frame_variables:
var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs)
var_objects.append((var_object, name))
else:
var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables)
var_objects.append((var_object, name))
from _pydevd_bundle.pydevd_comm import GetValueAsyncThreadConsole
t = GetValueAsyncThreadConsole(self.get_server(), seq, var_objects)
t.start()
def changeVariable(self, attr, value):
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
def connectToDebugger(self, debuggerPort, debugger_options=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
if debugger_options is None:
debugger_options = {}
env_key = "PYDEVD_EXTRA_ENVS"
if env_key in debugger_options:
for (env_name, value) in dict_iter_items(debugger_options[env_key]):
existing_value = os.environ.get(env_name, None)
if existing_value:
os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value)
else:
os.environ[env_name] = value
if env_name == "PYTHONPATH":
sys.path.append(value)
del debugger_options[env_key]
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_imps._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n')
from _pydevd_bundle.pydevd_constants import set_thread_id
from _pydev_bundle import pydev_localhost
set_thread_id(threading.currentThread(), "console_main")
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
f = FakeFrame()
f.f_back = None
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
self.debugger = pydevd.PyDB()
self.debugger.add_fake_frame(thread_id=VIRTUAL_CONSOLE_ID, frame_id=VIRTUAL_FRAME_ID, frame=f)
try:
pydevd.apply_debugger_options(debugger_options)
self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)
self.debugger.prepare_to_run()
self.debugger.disable_tracing()
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
def enableGui(self, guiname):
''' Enable the GUI specified in guiname (see inputhook for list).
As with IPython, enabling multiple GUIs isn't an error, but
only the last one's main loop runs and it may not work
'''
def do_enable_gui():
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_enable_gui)
def get_ipython_hidden_vars_dict(self):
return None
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
|
from .writer import saveMeshTracks
from .reader import loadMeshTracks
from .meshdata import Track, Mesh
|
"""Utilities for setting up a project's settings.
The default way to use this is to import and call :func:`init_settings`
in a project's settings module:
# project/top_level_package/settings.py
from arcutils.settings import init_settings
init_settings()
This adds a few default settings for bootstrapping purposes and then
loads the project's local settings--the django-local-settings variety.
Pass ``local_settings=False`` to :func:`init_settings` if the project
doesn't use django-local-settings.
"""
import base64
import inspect
import ipaddress
import os
import pkg_resources
from datetime import datetime
from django import VERSION as DJANGO_VERSION
from django.conf import settings as django_settings
from django.utils import timezone
from local_settings import NO_DEFAULT, load_and_check_settings, LocalSetting, SecretSetting
from local_settings.settings import DottedAccessDict, Settings as LocalSettings
ARCUTILS_PACKAGE_DIR = pkg_resources.resource_filename('arcutils', '')
class _InternalIPsType:
"""Used to construct a convenient INTERNAL_IPS setting for dev.
An *instance* of this type considers any standard loopback or
private IP address a valid internal IP address.
"""
def __contains__(self, addr):
addr = ipaddress.ip_address(addr)
return addr.is_loopback or addr.is_private
INTERNAL_IPS = _InternalIPsType()
def init_settings(settings=None, local_settings=True, prompt=None, quiet=None, package_level=0,
stack_level=2, drop=(), settings_processors=()):
"""Initialize project settings.
Basic Usage
===========
By default, it's assumed that the project is structured like so,
with the settings module in the top level package::
project/
package/
__init__.py
settings.py
README
setup.py
It's also assumed that :func:`init_settings` will be called from the
global scope of the project's settings module::
# package/settings.py
from arcutils.settings import init_settings
init_settings()
A few default settings that are commonly used in local settings
files will be added (if not explicitly set before calling this
function):
- ARCUTILS_PACKAGE_DIR
- PACKAGE (top level project package)
- PACKAGE_DIR (top level project package directory)
- ROOT_DIR (project directory; should only be used in dev)
- START_TIME (current date/time; will be an "aware" UTC datetime
object if the project has time zone support enabled)
If the project has additional local settings, they must be defined
*before* this function is called.
Advanced Usage
==============
Generally, you won't need to pass ``settings``, but if you do, it
should be a dict of settings as you'd get from calling ``globals()``
in the project's settings module.
If the settings module is in a sub-package, ``package_level`` will
need to be adjusted accordingly. If :func:`init_settings` is being
called from another function, ``stack_level`` will have to be
adjusted accordingly. See :func:`derive_top_level_package_name` for
more info about these args.
The ``PACKAGE``, ``PACKAGE_DIR``, and ``ROOT_DIR`` settings will be
derived based on the location of the settings module this function
is called from. If this isn't working, ensure the ``package_level``
and ``stack_level`` options are correct; or, set the ``PACKAGE``
setting explicitly before calling this function::
PACKAGE = 'quickticket'
init_settings()
``PACKAGE_DIR`` and ``ROOT_DIR`` can also be set explicitly if
necessary.
.. note:: If the package name and related settings can't be derived
automatically, that indicates a bug in this function.
To drop unused default settings, specify a list of such settings via
the ``drop`` arg.
To process settings in any custom manner needed, pass a list of
functions via ``settings_processors``. Each processor will be passed
the settings to be manipulated as necessary.
"""
settings = settings if settings is not None else get_module_globals(stack_level)
if not settings.get('ARCUTILS_PACKAGE_DIR'):
settings['ARCUTILS_PACKAGE_DIR'] = ARCUTILS_PACKAGE_DIR
if not settings.get('PACKAGE'):
# The default value for PACKAGE is derived by figuring out where
# init_settings was called from in terms of package and scope.
settings['PACKAGE'] = derive_top_level_package_name(package_level, stack_level)
if not settings.get('PACKAGE_DIR'):
# The default value for PACKAGE_DIR is simply the directory
# corresponding to PACKAGE.
settings['PACKAGE_DIR'] = pkg_resources.resource_filename(settings['PACKAGE'], '')
if not settings.get('ROOT_DIR'):
# The default value for ROOT_DIR is the directory N levels up
# from PACKAGE_DIR, where N is equal to the package depth of the
# top level package. Note that in most cases N is 1; it will be
# greater than 1 when the top level package is contained in a
# namespace package.
package_depth = len(settings['PACKAGE'].split('.'))
parts = os.path.split(settings['PACKAGE_DIR'])
root_dir = os.path.join(*parts[:package_depth])
settings['ROOT_DIR'] = root_dir
if local_settings:
init_local_settings(settings, prompt=prompt, quiet=quiet)
# NOTE: We can't simply use Django's timezone.now() here because it
# accesses settings.USE_TZ, but at this point the settings
# may not be considered fully configured by Django, so we have
# to do this to avoid an ImproperlyConfigured exception.
use_tz = settings.get('USE_TZ', False)
now = datetime.utcnow().replace(tzinfo=timezone.utc) if use_tz else datetime.now()
settings.setdefault('START_TIME', now)
# Remove the MIDDLEWARE_CLASSES setting on Django >= 1.10, but only
# if the MIDDLEWARE setting is present *and* set.
if DJANGO_VERSION[:2] >= (1, 10):
if settings.get('MIDDLEWARE'):
settings.pop('MIDDLEWARE_CLASSES', None)
# Drop irrelevant settings.
for name in drop:
del settings[name]
for processor in settings_processors:
processor(settings)
return settings
def init_local_settings(settings, prompt=None, quiet=None):
"""Initialize the local settings defined in ``settings``.
Args:
settings (dict): A dict of settings as you'd get from calling
``globals()`` in a Django settings module.
quiet (bool): Squelch standard out when loading local settings.
.. note:: If your project has additional local settings, they must
be defined *before* this function is called.
"""
suggested_secret_key = base64.b64encode(os.urandom(64)).decode('utf-8')
defaults = {
'DEBUG': LocalSetting(False),
'ADMINS': LocalSetting([]),
'ALLOWED_HOSTS': LocalSetting([]),
'GOOGLE': {
'analytics': {
'tracking_id': LocalSetting(
None, doc='Enter Google Analytics tracking ID (UA-NNNNNNNN-N)'
),
},
},
'MANAGERS': LocalSetting([]),
'SECRET_KEY': SecretSetting(doc='Suggested: "{suggested_secret_key}"'.format(**locals())),
'DATABASES': {
'default': {
'ENGINE': LocalSetting('django.db.backends.postgresql'),
'NAME': LocalSetting(settings.get('PACKAGE', NO_DEFAULT)),
'USER': LocalSetting(''),
'PASSWORD': SecretSetting(),
'HOST': LocalSetting(''),
},
},
}
for k, v in defaults.items():
settings.setdefault(k, v)
settings.update(load_and_check_settings(settings, prompt=prompt, quiet=quiet))
def get_setting(name, default=NO_DEFAULT, settings=None):
"""Get setting for ``name``, falling back to ``default`` if passed.
``name`` should be a string like 'ARC.cdn.hosts' or 'X.Y.0'. The
name is split on dots into path segments, then the settings are
traversed like this:
- Set current value to django.conf.settings.{first segment}
- For each other segment
- Get current_value[segment] if current value is a dict
- Get current_value[int(segment)] if current value is a list
If the setting isn't found, the ``default`` value will be returned
if specified; otherwise, a ``KeyError`` will be raised.
``settings`` can be used to retrieve the setting from a settings
object other than the default ``django.conf.settings``.
:class:`local_settings.settings.DottedAccessDict` is used to
implement this functionality. See the django-local-settings project
for more details about settings traversal.
"""
if settings is None:
settings = django_settings
if not isinstance(settings, LocalSettings):
settings = DottedAccessDict(get_settings_dict(settings))
return settings.get_dotted(name, default)
class PrefixedSettings:
"""Read-only settings for a given ``prefix``.
Args:
prefix: An upper case setting name such as "CAS" or "LDAP"
defaults: A dict of defaults for the prefix
The idea is to make it easy to fetch sub-settings within a given
package.
For example::
>>> DEFAULT_CAS_SETTINGS = {
... 'base_url': 'https://example.com/cas/',
... # plus a bunch more CAS settings...
... }
>>> cas_settings = PrefixedSettings('CAS', DEFAULT_CAS_SETTINGS)
>>> cas_settings.get('base_url')
'https://example.com/cas/'
>>> cas_settings.get('logout_path', default='/default/logout/path')
'/default/logout/path'
See the ``cas``, ``ldap``, and ``masquerade`` packages for concrete
examples of how this is used.
"""
def __init__(self, prefix, defaults=None, settings=None):
defaults = get_settings_dict(defaults)
settings = get_settings_dict(settings if settings is not None else django_settings)
self.__prefix = prefix
self.__defaults = DottedAccessDict(defaults)
self.__settings = DottedAccessDict(settings)
def get(self, name, default=NO_DEFAULT):
"""Get setting for configured ``prefix``.
Args:
name: setting name without ``prefix``
default: value to use if setting isn't present in the
project's settings or in the ``defaults``
Returns:
object: Value of setting
Attempt to get setting from:
1. Project settings for ``prefix``
2. Default settings from ``defaults``
3. ``default`` arg
Raises:
KeyError: When the setting isn't found in the project's
settings or in the ``defaults`` and no fallback is
passed via the ``default`` keyword arg
"""
qualified_name = '{prefix}.{name}'.format(prefix=self.__prefix, name=name)
try:
return self.__settings.get_dotted(qualified_name)
except KeyError:
return self.__defaults.get_dotted(name, default=default)
def __getitem__(self, key):
return PrefixedSettings.get(self, key, NO_DEFAULT)
# Internal helper functions
def get_settings_dict(settings):
"""For a given settings object, return a dict.
Args:
settings (object): Usually either a Django settings object or
a dict; can also be a sequence that can be converted to
a dict or some other non-dict mapping
Returns:
empty dict: ``settings`` is ``None``
vars(settings._wrapped): ``settings`` is (or appears to be)
a Django settings object
dict(settings): ``settings`` is any other type of object
"""
if settings is None:
return {}
if hasattr(settings, '_wrapped'):
# A Django settings object
# TODO: Find a better way to check for Django settings?
return vars(settings._wrapped)
return dict(settings)
def derive_top_level_package_name(package_level=0, stack_level=1):
"""Return top level package name.
Args:
package_level (int): How many package levels down the caller
is. 0 indicates this function is being called from the top
level package, 1 indicates that it's being called from a
sub-package, etc.
stack_level (int): How many levels down the stack the caller is
from here. 1 indicates this function is being called from
module scope, 2 indicates this function is being called from
another function, etc.
This will first get the package name of the module containing the
caller. ``package_level`` segments will be then be chopped off of
the package name.
If this is called from a sub-package, ``package_level`` will have to
be adjusted accordingly (add 1 for each sub-package).
If this is called indirectly (e.g., via :func:`init_settings`)
``stack_level`` will have to be adjusted accordingly (add 1 for each
nested function).
"""
assert package_level >= 0, 'Package level should be greater than or equal to 0'
assert stack_level > 0, 'Stack level should be greater than 0'
frame = inspect.stack()[stack_level][0]
package = frame.f_globals['__package__']
package = package.rsplit('.', package_level)[0]
return package
def get_module_globals(stack_level=2):
frame = inspect.stack()[stack_level][0]
return frame.f_globals
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from urllib import unquote
from swift.account.utils import account_listing_response
from swift.common.request_helpers import get_listing_content_type
from swift.common.utils import public
from swift.common.constraints import check_metadata, MAX_ACCOUNT_NAME_LENGTH
from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE
from swift.proxy.controllers.base import Controller, clear_info_cache
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = 'Account'
def __init__(self, app, account_name, **kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
if not self.app.allow_account_management:
self.allowed_methods.remove('PUT')
self.allowed_methods.remove('DELETE')
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)
return resp
partition, nodes = self.app.account_ring.get_nodes(self.account_name)
resp = self.GETorHEAD_base(
req, _('Account'), self.app.account_ring, partition,
req.path_info.rstrip('/'))
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE
elif self.app.account_autocreate:
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req))
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
return resp
@public
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)
return resp
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'PUT',
req.path_info, [headers] * len(accounts))
return resp
@public
def POST(self, req):
"""HTTP POST request handler."""
if len(self.account_name) > MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name), MAX_ACCOUNT_NAME_LENGTH)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.path_info, [headers] * len(accounts))
if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:
self.autocreate_account(req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.path_info, [headers] * len(accounts))
return resp
@public
def DELETE(self, req):
"""HTTP DELETE request handler."""
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'DELETE',
req.path_info, [headers] * len(accounts))
return resp
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 21:03:57 2020
@author: Mehul
"""
#importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
import warnings
from matplotlib import style
from collections import Counter
from math import sqrt
style.use('fivethirtyeight')
#defining knn function
def k_nearest_neighbors(data,predict,k=3):
distances=[]
if(len(data)>=k):
#this is not an error it is just a warning , the algorithm still works
warnings.warn('The value of k is less than the number of voting groups.')
for group in data:
#data is a dictionary of lists with different groups of classes
for features in data[group]:
#features represent the points in the dataset
#original way
#euclidean_distance=sqrt((features[0]-predict[0])**2+(features[1]-predict[1])**2)
#faster way
euclidean_distance=np.linalg.norm(np.array(features)-np.array(predict))
distances.append([euclidean_distance,group])
#once we have the distances we dont care about them
#we populate the list of votes which has the top k neighbors to the prediction point
votes=[i[1] for i in sorted(distances)[:k] ]
#using counter we calculate the most common out of the nearest neighbors
vote_result=Counter(votes).most_common(1)[0][0]
#we can also give our confidence,confidence is the probability of your prediction being right
#confidence=Counter(votes).most_common(1)[0][1]/k
return vote_result
def accuracy_of_result(train_set,test_set):
#intialising
correct=0
total=0
#testing and finding accuracy
for group in test_set:
for data in test_set[group]:
#iterating through all the data in a class
result=k_nearest_neighbors(train_set,data,k=5)
if (group==result):
correct=correct+1
total=total+1
accuracy=correct/total
return accuracy
''''
#trial data
#our data is in form of dictionary of lists
dataset={'k':[[1,2],[2,3,],[3,1]],'r':[[6,5],[7,7],[8,6]]}
new_features=[5,7]
#plotting the data
plt.scatter(new_features[0],new_features[1],s=50)
for i in dataset:
for j in dataset[i]:
print(j)
plt.scatter(j[0],j[1],s=100,color=i)
#applying knn model
result=k_nearest_neighbors(dataset,new_features,k=3)#result represents the class the prediction point belongs to
#plotting the prediction
plt.scatter(new_features[0],new_features[1],s=50,color=result)
for i in dataset:
for j in dataset[i]:
print(j)
plt.scatter(j[0],j[1],s=100,color=i)
'''
#Implmenting the model on the test dataset
#importing the dataset
dataset=pd.read_csv('breast-cancer-wisconsin.data.txt')
#replacing missing instances with large numbers
dataset.replace('?',-99999,inplace=True)
dataset.drop(['id'],1,inplace=True)
dataset=dataset.astype(float).values.tolist()
#shuffling to data to include some randomness
#this does not change the raltionship between the data
#this is what can be used for cross-validation
random.shuffle(dataset)
#splitting the dataset into test set and train set
test_size=0.2
#the train set and the test set are dictionary of lists
train_set={2:[],4:[]}
test_set={2:[],4:[]}
#slicing the data into train_data and test_data
train_data=dataset[:-int(test_size*len(dataset))] #all the data upto the last 20%
test_data=dataset[-int(test_size*len(dataset)):] #the last 20%
#populating the dictionary
#here we take the data from the train_data and the test_data and use it to populate our dictionaries
for i in train_data:
train_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row
for i in test_data:
test_set[i[-1]].append(i[:-1])# i[-1] represents the class of the particular row
#getting the accuracy of our knn model on the dataset
print('Accuracy of the result:',accuracy_of_result(train_set,test_set))
|
# -*- coding:utf-8 -*-
from mongoengine import (IntField, DateTimeField, StringField, ReferenceField, DictField)
from model import BaseModel
# from ext import db
class Account(BaseModel):
name = StringField(max_length=5000, null=False)
tel = IntField(null=False)
password = StringField(max_length=5000, null=False)
head_img_key = StringField(max_length=5000, null=False)
meta = {'collection': 'account'}
|
import autofit as af
import autolens as al
from test_autolens.integration.tests.interferometer import runner
test_type = "lens_only"
test_name = "lens_x2_light__hyper"
data_type = "lens_x2_light"
data_resolution = "sma"
def make_pipeline(
name,
phase_folders,
real_space_shape_2d=(100, 100),
real_space_pixel_scales=(0.1, 0.1),
non_linear_class=af.MultiNest,
):
class LensPlaneGalaxyX2Phase(al.PhaseInterferometer):
def customize_priors(self, results):
self.galaxies.lens_0.light.centre_0 = -1.0
self.galaxies.lens_0.light.centre_1 = -1.0
self.galaxies.lens_1.light.centre_0 = 1.0
self.galaxies.lens_1.light.centre_1 = 1.0
phase1 = LensPlaneGalaxyX2Phase(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens_0=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
lens_1=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
non_linear_class=non_linear_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 40
phase1.optimizer.sampling_efficiency = 0.8
phase1 = phase1.extend_with_multiple_hyper_phases(hyper_galaxy=True)
phase2 = al.PhaseInterferometer(
phase_name="phase_2",
phase_folders=phase_folders,
galaxies=dict(
lens_0=al.GalaxyModel(
redshift=0.5,
light=phase1.result.model.galaxies.lens_0.light,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.lens_0.hyper_galaxy,
),
lens_1=al.GalaxyModel(
redshift=0.5,
light=phase1.result.model.galaxies.lens_1.light,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.lens_1.hyper_galaxy,
),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
non_linear_class=non_linear_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 40
phase2.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1, phase2)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
from django.test import TestCase, override_settings
from model_bakery import baker
from rest_framework.test import APIClient
from accounts.models import User
from core.models import CoreSettings
from rest_framework.authtoken.models import Token
class TacticalTestCase(TestCase):
def authenticate(self):
self.john = User(username="john")
self.john.set_password("hunter2")
self.john.save()
self.alice = User(username="alice")
self.alice.set_password("hunter2")
self.alice.save()
self.client_setup()
self.client.force_authenticate(user=self.john)
def setup_agent_auth(self, agent):
agent_user = User.objects.create_user(
username=agent.agent_id, password=User.objects.make_random_password(60)
)
Token.objects.create(user=agent_user)
def client_setup(self):
self.client = APIClient()
# fixes tests waiting 2 minutes for mesh token to appear
@override_settings(
MESH_TOKEN_KEY="41410834b8bb4481446027f87d88ec6f119eb9aa97860366440b778540c7399613f7cabfef4f1aa5c0bd9beae03757e17b2e990e5876b0d9924da59bdf24d3437b3ed1a8593b78d65a72a76c794160d9"
)
def setup_coresettings(self):
self.coresettings = CoreSettings.objects.create()
def check_not_authenticated(self, method, url):
self.client.logout()
switch = {
"get": self.client.get(url),
"post": self.client.post(url),
"put": self.client.put(url),
"patch": self.client.patch(url),
"delete": self.client.delete(url),
}
r = switch.get(method)
self.assertEqual(r.status_code, 401)
def create_checks(self, policy=None, agent=None, script=None):
if not policy and not agent:
return
# will create 1 of every check and associate it with the policy object passed
check_recipes = [
"checks.diskspace_check",
"checks.ping_check",
"checks.cpuload_check",
"checks.memory_check",
"checks.winsvc_check",
"checks.script_check",
"checks.eventlog_check",
]
checks = list()
for recipe in check_recipes:
if not script:
checks.append(baker.make_recipe(recipe, policy=policy, agent=agent))
else:
checks.append(
baker.make_recipe(recipe, policy=policy, agent=agent, script=script)
)
return checks
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
import numpy as np
import scipy.signal
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.tensorflow_components.heads.policy_head import PolicyHeadParameters
from rl_coach.architectures.tensorflow_components.heads.v_head import VHeadParameters
from rl_coach.architectures.tensorflow_components.middlewares.fc_middleware import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \
AgentParameters
from rl_coach.logger import screen
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.spaces import DiscreteActionSpace
from rl_coach.utils import last_sample
from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedderParameters
class ActorCriticAlgorithmParameters(AlgorithmParameters):
def __init__(self):
super().__init__()
self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE
self.apply_gradients_every_x_episodes = 5
self.beta_entropy = 0
self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers
self.gae_lambda = 0.96
self.estimate_state_value_using_gae = False
class ActorCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [VHeadParameters(), PolicyHeadParameters()]
self.loss_weights = [0.5, 1.0]
self.rescale_gradient_from_head_by_factor = [1, 1]
self.optimizer_type = 'Adam'
self.clip_gradients = 40.0
self.async_training = True
class ActorCriticAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=ActorCriticAlgorithmParameters(),
exploration=None, #TODO this should be different for continuous (ContinuousEntropyExploration)
# and discrete (CategoricalExploration) action spaces.
memory=SingleEpisodeBufferParameters(),
networks={"main": ActorCriticNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent'
# Actor Critic - https://arxiv.org/abs/1602.01783
class ActorCriticAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.action_advantages = self.register_signal('Advantages')
self.state_values = self.register_signal('Values')
self.value_loss = self.register_signal('Value Loss')
self.policy_loss = self.register_signal('Policy Loss')
# Discounting function used to calculate discounted returns.
def discount(self, x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def get_general_advantage_estimation_values(self, rewards, values):
# values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n)
bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]])
# Approximation based calculation of GAE (mathematically correct only when Tmax = inf,
# although in practice works even in much smaller Tmax values, e.g. 20)
deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1]
gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda)
if self.ap.algorithm.estimate_state_value_using_gae:
discounted_returns = np.expand_dims(gae + values[:-1], -1)
else:
discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards,
self.ap.algorithm.discount)), 1)[:-1]
return gae, discounted_returns
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
result = self.networks['main'].online_network.predict(batch.states(network_keys))
current_state_values = result[0]
self.state_values.add_sample(current_state_values)
# the targets for the state value estimator
num_transitions = batch.size
state_value_head_targets = np.zeros((num_transitions, 1))
# estimate the advantage function
action_advantages = np.zeros((num_transitions, 1))
if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE:
if batch.game_overs()[-1]:
R = 0
else:
R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
for i in reversed(range(num_transitions)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i] = R
action_advantages[i] = R - current_state_values[i]
elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE:
# get bootstraps
bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0]
values = np.append(current_state_values, bootstrapped_value)
if batch.game_overs()[-1]:
values[-1] = 0
# get general discounted returns table
gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values)
action_advantages = np.vstack(gae_values)
else:
screen.warning("WARNING: The requested policy gradient rescaler is not available")
action_advantages = action_advantages.squeeze(axis=-1)
actions = batch.actions()
if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2:
actions = np.expand_dims(actions, -1)
# train
result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys),
'output_1_0': actions},
[state_value_head_targets, action_advantages])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.action_advantages.add_sample(action_advantages)
self.unclipped_grads.add_sample(unclipped_grads)
self.value_loss.add_sample(losses[0])
self.policy_loss.add_sample(losses[1])
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
tf_input_state = self.prepare_batch_for_inference(states, "main")
return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value
|
""" parquet compat """
from __future__ import annotations
import io
import os
from typing import Any
from warnings import catch_warnings
from pandas._typing import (
FilePath,
ReadBuffer,
StorageOptions,
WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas import (
DataFrame,
MultiIndex,
get_option,
)
from pandas.core.shared_docs import _shared_docs
from pandas.util.version import Version
from pandas.io.common import (
IOHandles,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
def get_engine(engine: str) -> BaseImpl:
"""return our implementation"""
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
engine_classes = [PyArrowImpl, FastParquetImpl]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"A suitable version of "
"pyarrow or fastparquet is required for parquet "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
def _get_path_or_handle(
path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
fs: Any,
storage_options: StorageOptions = None,
mode: str = "rb",
is_dir: bool = False,
) -> tuple[
FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any
]:
"""File handling for PyArrow."""
path_or_handle = stringify_path(path)
if is_fsspec_url(path_or_handle) and fs is None:
fsspec = import_optional_dependency("fsspec")
fs, path_or_handle = fsspec.core.url_to_fs(
path_or_handle, **(storage_options or {})
)
elif storage_options and (not is_url(path_or_handle) or mode != "rb"):
# can't write to a remote url
# without making use of fsspec at the moment
raise ValueError("storage_options passed with buffer, or non-supported URL")
handles = None
if (
not fs
and not is_dir
and isinstance(path_or_handle, str)
and not os.path.isdir(path_or_handle)
):
# use get_handle only when we are very certain that it is not a directory
# fsspec resources can also point to directories
# this branch is used for example when reading from non-fsspec URLs
handles = get_handle(
path_or_handle, mode, is_text=False, storage_options=storage_options
)
fs = None
path_or_handle = handles.handle
return path_or_handle, handles, fs
class BaseImpl:
@staticmethod
def validate_dataframe(df: DataFrame):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names for all index levels (strings only)
if isinstance(df.columns, MultiIndex):
if not all(
x.inferred_type in {"string", "empty"} for x in df.columns.levels
):
raise ValueError(
"""
parquet must have string column names for all values in
each level of the MultiIndex
"""
)
else:
if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df: DataFrame, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
import_optional_dependency(
"pyarrow", extra="pyarrow is required for parquet support."
)
import pyarrow.parquet
# import utils to register the pyarrow extension types
import pandas.core.arrays._arrow_utils # noqa:F401
self.api = pyarrow
def write(
self,
df: DataFrame,
path: FilePath | WriteBuffer[bytes],
compression: str | None = "snappy",
index: bool | None = None,
storage_options: StorageOptions = None,
partition_cols: list[str] | None = None,
**kwargs,
):
self.validate_dataframe(df)
from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)}
if index is not None:
from_pandas_kwargs["preserve_index"] = index
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle(
path,
kwargs.pop("filesystem", None),
storage_options=storage_options,
mode="wb",
is_dir=partition_cols is not None,
)
try:
if partition_cols is not None:
# writes to multiple files under the given path
self.api.parquet.write_to_dataset(
table,
path_or_handle,
compression=compression,
partition_cols=partition_cols,
**kwargs,
)
else:
# write to single output file
self.api.parquet.write_table(
table, path_or_handle, compression=compression, **kwargs
)
finally:
if handles is not None:
handles.close()
def read(
self,
path,
columns=None,
use_nullable_dtypes=False,
storage_options: StorageOptions = None,
**kwargs,
):
kwargs["use_pandas_metadata"] = True
to_pandas_kwargs = {}
if use_nullable_dtypes:
import pandas as pd
mapping = {
self.api.int8(): pd.Int8Dtype(),
self.api.int16(): pd.Int16Dtype(),
self.api.int32(): pd.Int32Dtype(),
self.api.int64(): pd.Int64Dtype(),
self.api.uint8(): pd.UInt8Dtype(),
self.api.uint16(): pd.UInt16Dtype(),
self.api.uint32(): pd.UInt32Dtype(),
self.api.uint64(): pd.UInt64Dtype(),
self.api.bool_(): pd.BooleanDtype(),
self.api.string(): pd.StringDtype(),
}
to_pandas_kwargs["types_mapper"] = mapping.get
manager = get_option("mode.data_manager")
if manager == "array":
to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment]
path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle(
path,
kwargs.pop("filesystem", None),
storage_options=storage_options,
mode="rb",
)
try:
result = self.api.parquet.read_table(
path_or_handle, columns=columns, **kwargs
).to_pandas(**to_pandas_kwargs)
if manager == "array":
result = result._as_manager("array", copy=False)
return result
finally:
if handles is not None:
handles.close()
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
fastparquet = import_optional_dependency(
"fastparquet", extra="fastparquet is required for parquet support."
)
self.api = fastparquet
def write(
self,
df: DataFrame,
path,
compression="snappy",
index=None,
partition_cols=None,
storage_options: StorageOptions = None,
**kwargs,
):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
"partition_cols. Use partition_cols for partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
# cannot use get_handle as write() does not accept file buffers
path = stringify_path(path)
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
# if filesystem is provided by fsspec, file must be opened in 'wb' mode.
kwargs["open_with"] = lambda path, _: fsspec.open(
path, "wb", **(storage_options or {})
).open()
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
with catch_warnings(record=True):
self.api.write(
path,
df,
compression=compression,
write_index=index,
partition_on=partition_cols,
**kwargs,
)
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
parquet_kwargs: dict[str, Any] = {}
use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False)
if Version(self.api.__version__) >= Version("0.7.1"):
# We are disabling nullable dtypes for fastparquet pending discussion
parquet_kwargs["pandas_nulls"] = False
if use_nullable_dtypes:
raise ValueError(
"The 'use_nullable_dtypes' argument is not supported for the "
"fastparquet engine"
)
path = stringify_path(path)
handles = None
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
if Version(self.api.__version__) > Version("0.6.1"):
parquet_kwargs["fs"] = fsspec.open(
path, "rb", **(storage_options or {})
).fs
else:
parquet_kwargs["open_with"] = lambda path, _: fsspec.open(
path, "rb", **(storage_options or {})
).open()
elif isinstance(path, str) and not os.path.isdir(path):
# use get_handle only when we are very certain that it is not a directory
# fsspec resources can also point to directories
# this branch is used for example when reading from non-fsspec URLs
handles = get_handle(
path, "rb", is_text=False, storage_options=storage_options
)
path = handles.handle
parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
result = parquet_file.to_pandas(columns=columns, **kwargs)
if handles is not None:
handles.close()
return result
@doc(storage_options=_shared_docs["storage_options"])
def to_parquet(
df: DataFrame,
path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
storage_options: StorageOptions = None,
partition_cols: list[str] | None = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If None, the result is
returned as bytes. If a string, it will be used as Root Directory path
when writing a partitioned dataset. The engine fastparquet does not
accept file-like objects.
.. versionchanged:: 1.2.0
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
default 'snappy'. Name of the compression to use. Use ``None``
for no compression. The supported compression methods actually
depend on which engine is used. For 'pyarrow', 'snappy', 'gzip',
'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet',
only 'gzip' and 'snappy' are supported.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
partition_cols : str or list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
{storage_options}
.. versionadded:: 1.2.0
kwargs
Additional keyword arguments passed to the engine
Returns
-------
bytes if no path argument is provided else None
"""
if isinstance(partition_cols, str):
partition_cols = [partition_cols]
impl = get_engine(engine)
path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
impl.write(
df,
path_or_buf,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
if path is None:
assert isinstance(path_or_buf, io.BytesIO)
return path_or_buf.getvalue()
else:
return None
@doc(storage_options=_shared_docs["storage_options"])
def read_parquet(
path,
engine: str = "auto",
columns=None,
storage_options: StorageOptions = None,
use_nullable_dtypes: bool = False,
**kwargs,
):
"""
Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
columns : list, default=None
If not None, only these columns will be read from the file.
{storage_options}
.. versionadded:: 1.3.0
use_nullable_dtypes : bool, default False
If True, use dtypes that use ``pd.NA`` as missing value indicator
for the resulting DataFrame. (only applicable for the ``pyarrow``
engine)
As new dtypes are added that support ``pd.NA`` in the future, the
output with this option will change to use those dtypes.
Note: this is an experimental option, and behaviour (e.g. additional
support dtypes) may change without notice.
.. versionadded:: 1.2.0
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(
path,
columns=columns,
storage_options=storage_options,
use_nullable_dtypes=use_nullable_dtypes,
**kwargs,
)
|
from builtins import zip
from builtins import range
from builtins import object
import re
import csv
import unicodecsv
from bs4 import BeautifulSoup
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if '2002' in election_id:
loader = MDLoader2002()
elif '2000' in election_id and 'primary' in election_id:
loader = MDLoader2000Primary()
elif '2008' in election_id and 'special' in election_id:
loader = MDLoader2008Special()
else:
loader = MDLoader()
loader.run(mapping)
class CountyOCDMixin(object):
"""
Loader mixin that adds convenience method for generating county-level
OCD IDs
"""
def _get_county_ocd_id(self, jurisdiction):
"""
Build an OCD ID for a county-level jurisdiction when the mapping
reflects the state OCD ID.
"""
# Baltimore City is treated like a county in the results, but we
# should use the city's OCD ID
if jurisdiction == "Baltimore City":
ocd_id = "{}/place:baltimore".format(self.mapping['ocd_id'])
else:
ocd_id = "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction))
return ocd_id
class MDBaseLoader(CountyOCDMixin, BaseLoader):
datasource = Datasource()
target_offices = set([
'President - Vice Pres',
'President and Vice President of the United States',
'U.S. Senator',
'U.S. Congress',
'Representative in Congress',
'Governor / Lt. Governor',
'Comptroller',
'Attorney General',
'State Senator',
'House of Delegates',
])
district_offices = set([
'U.S. Congress',
'Representative in Congress',
'State Senator',
"House of Delegates",
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class MDLoader(MDBaseLoader):
"""
Parse Maryland election results for the 2000 general election and
all elections after 2002.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row['Office Name'] == None:
return True
return row['Office Name'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['Office Name'].strip(),
'district': row['Office District'].strip(),
}
# Add party if it's a primary
#TODO: QUESTION: Should semi-closed also have party?
if primary_type == 'closed':
kwargs['primary_party'] = row['Party'].strip()
return kwargs
def _build_candidate_kwargs(self, row):
try:
full_name = row['Candidate Name'].strip()
except KeyError:
# 2000 results use "Candidate" for the column name
full_name = row['Candidate'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _get_state_ocd_id(self):
"""
Get the state portion of the mapping's OCD ID
This is neccessary because the mappings for some files have OCD IDs
like 'ocd-division/country:us/state:md/sldl:all'. We need to extract
the state portion, 'ocd-division/country:us/state:md' to build OCD
IDs for lower jurisdictions.
"""
bits = []
state_bit = "state:"+ self.state
for bit in self.mapping['ocd_id'].split('/'):
bits.append(bit)
if bit == state_bit:
break
return '/'.join(bits)
def _prep_state_leg_results(self, row):
kwargs = self._base_kwargs(row)
kwargs.update({
'reporting_level': 'state_legislative',
'winner': row['Winner'].strip(),
'write_in': self._writein(row),
'party': row['Party'].strip(),
})
try:
kwargs['write_in'] = row['Write-In?'].strip() # at the contest-level
except KeyError as e:
pass
results = []
for field, val in list(row.items()):
clean_field = field.strip()
# Legislative fields prefixed with LEGS
if not clean_field.startswith('LEGS'):
continue
kwargs.update({
'jurisdiction': clean_field,
# Remove the "LEGS " from the ocd_id. This is a somewhat
# transformy action, but do it here in order to make the OCD IDs
# as usable as possible when we bake out raw results
'ocd_id': "{}/sldl:{}".format(self._get_state_ocd_id(),
ocd_type_id(clean_field.replace("LEGS ", ""))),
'votes': self._votes(val),
})
results.append(RawResult(**kwargs))
return results
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
vote_brkdown_fields = [
('election_day', 'Election Night Votes'),
('absentee', 'Absentees Votes'),
('provisional', 'Provisional Votes'),
('second_absentee', '2nd Absentees Votes'),
]
vote_breakdowns = {}
for field, key in vote_brkdown_fields:
try:
vote_breakdowns[field] = self._votes(row[key].strip())
except KeyError:
pass
kwargs.update({
'reporting_level': 'county',
'jurisdiction': self.mapping['name'],
'ocd_id': self.mapping['ocd_id'],
'party': row['Party'].strip(),
'votes': self._votes(row['Total Votes']),
'vote_breakdowns': vote_breakdowns,
})
if (kwargs['office'] not in self.district_offices
and kwargs['district'] != ''):
kwargs['reporting_level'] = 'congressional_district_by_county'
kwargs['reporting_district'] = kwargs['district']
del kwargs['district']
return RawResult(**kwargs)
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = "%s-%s" % (row['Election District'], row['Election Precinct'].strip())
ocd_id = "{}/precinct:{}".format(self.mapping['ocd_id'],
ocd_type_id(precinct))
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': self.mapping['name'],
'ocd_id': ocd_id,
'party': row['Party'].strip(),
'votes': self._votes(row['Election Night Votes']),
'votes_type': 'election_day',
'winner': row['Winner'],
'write_in': self._writein(row),
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class MDLoader2002(MDBaseLoader):
"""
Loads Maryland results for 2002.
Format:
Maryland results for 2002 are in a delimited text file where the delimiter
is '|'.
Fields:
0: Office
1: Office District - '-' is used to denote null values
2: County
3: Last Name - "zz998" is used for write-in candidates
4: Middle Name - "\\N" is used to denote null values
5: First Name - "Other Write-Ins" is used for write-in candidates
6: Party
7: Winner - Value is 0 or 1
8: UNKNOWN - Values are "(Vote for One)", "(Vote for No More Than Three)", etc.
9: Votes
10: UNKNOWN - Values are "\\N" for every row
Sample row:
House of Delegates |32 |Anne Arundel County |Burton |W. |Robert |Republican | 0|(Vote for No More Than Three) | 1494|\\N
Notes:
In the general election file, there are rows for judges and for
"Statewide Ballot Questions". The columns in these rows are shifted over,
but we can ignore these rows since we're not interested in these offices.
"""
def load(self):
headers = [
'office',
'district',
'jurisdiction',
'family_name',
'additional_name',
'given_name',
'party',
'winner',
'vote_type',
'votes',
'fill2'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames=headers, delimiter='|')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if rr_kwargs['primary_type'] == 'closed':
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['jurisdiction'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': self._get_county_ocd_id(jurisdiction),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'family_name': row['family_name'].strip(),
'given_name': row['given_name'].strip(),
'additional_name': row['additional_name'].strip(),
}
class MDLoader2000Primary(MDBaseLoader):
office_choices = [
"President and Vice President of the United States",
"U.S. Senator",
"Representative in Congress",
"Judge of the Circuit Court",
"Female Delegates and Alternate to the Democratic National Convention",
"Female Delegates to the Democratic National Convention",
"Male Delegates to the Democratic National Convention",
"Male Delegates and Alternate to the Democratic National Convention",
"Delegates to the Republican National Convention",
]
def load(self):
candidates = {}
results = []
last_office = None
last_party = None
last_district = None
common_kwargs = self._build_common_election_kwargs()
with self._file_handle as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue # Skip blank lines
# determine if this is a row with an office
office, party, district = self._parse_header(row)
if office:
# It's a header row
if office in self.target_offices:
# It's an office we care about. Save the office and
# party for the next row
last_office = office
last_party = party
last_district = district
else:
last_office = None
last_party = None
last_district = None
elif last_office and row[0] == '':
# Candidate name row
candidates, winner_name = self._parse_candidates(row)
elif last_office: # has to be a county result
new_results = self._parse_results(row, last_office,
last_party, last_district,
candidates, winner_name, common_kwargs)
results.extend(new_results)
RawResult.objects.insert(results)
def _parse_header(self, row):
"""
Returns a tuple of office and party and congressional district
if the row is a header.
Returns (None, None, None) for a non-header row.
Note that the district doesn't represent the district of the office
"""
office = self._parse_office(row)
if office:
party = self._parse_party(row)
district = self._parse_district(row)
else:
party = None
district = None
return office, party, district
def _parse_office(self, row):
for o in self.office_choices:
if o in row[0]:
return o
return None
def _parse_party(self, row):
if 'Democratic' in row[0]:
return 'Democratic'
elif 'Republican' in row[0]:
return 'Republican'
else:
return None
def _parse_district(self, row):
if 'District' not in row[0]:
return None
return re.search(r'(\d+)', row[0]).groups(0)[0]
def _parse_candidates(self, row):
candidates = []
for col in row:
if col != '':
full_name = col.strip()
if 'Winner' in full_name:
# Trim winner from candidate name
full_name, remainder = full_name.split(' Winner')
winner = full_name
candidates.append(full_name)
return candidates, winner
# TODO: QUESTION: How to handle "Uncomitted to any ..." values
def _parse_results(self, row, office, party, district, candidates,
winner_name, common_kwargs):
results = []
cols = [x.strip() for x in row if x != '']
county = cols[0].strip()
cand_results = list(zip(candidates, cols[1:]))
for cand, votes in cand_results:
result_kwargs = common_kwargs.copy()
result_kwargs.update({
'jurisdiction': county,
'ocd_id': self._get_county_ocd_id(county),
'office': office,
'party': party,
'full_name': cand,
'votes': int(votes),
})
if result_kwargs['primary_type'] == 'closed':
result_kwargs['primary_party'] = party
if office == "Representative in Congress":
# In the case of U.S. representatives, the district represents
# the office district. In all other cases, it just
# represents the level of result aggregation.
result_kwargs['district'] = district
if cand == winner_name:
result_kwargs['winner'] = 'Winner'
# Try to figure out if this is a case where results are
# provided by congressional district split by county and
# record this.
result_kwargs['reporting_level'] = self._get_reporting_level(district)
if result_kwargs['reporting_level'] == 'congressional_district_by_county':
result_kwargs['reporting_district'] = district
results.append(RawResult(**result_kwargs))
return results
def _get_reporting_level(self, district):
"""
Returns the reporting level based on the value of the results' district.
This deals with the way in which results for 2000 primaries are
returned broken down by both congressional district, split by county.
"""
if district:
return "congressional_district_by_county"
else:
return "county"
class MDLoader2008Special(CountyOCDMixin, BaseLoader):
"""
Loader for the Maryland 2008 4th Congressional District Special election results
"""
datasource = Datasource()
def load(self):
table = self._get_html_table()
rows = self._parse_html_table(table)
winner_name = self._parse_winner_name(table)
candidate_attrs = self._parse_candidates_and_parties(rows[0],
winner_name)
results = self._parse_results(rows[1:3], candidate_attrs)
RawResult.objects.insert(results)
def _get_html_table(self):
soup = BeautifulSoup(self._file_handle, 'html.parser')
return soup.find(text=re.compile("Donna Edwards")).parent.parent.parent
def _parse_html_table(self, table):
rows = []
for tr in table.find_all('tr'):
rows.append(self._parse_html_table_row(tr))
return rows
def _parse_html_table_row(self, tr):
row = []
cells = tr.find_all('th') + tr.find_all('td')
for cell in cells:
row.append(cell.text.strip())
return row
def _parse_winner_name(self, table):
cell = table.select('th > img')[0].parent
return self._parse_name(cell.text.strip())
def _parse_candidates_and_parties(self, row, winner_name):
candidate_attrs = []
for cell in row[1:]:
# Skip the first cell. It's a header, "County"
attrs = {
'full_name': self._parse_name(cell),
'party': self._parse_party(cell),
'write_in': self._parse_write_in(cell),
}
if attrs['full_name'] == winner_name:
attrs['contest_winner'] = True
candidate_attrs.append(attrs)
return candidate_attrs
def _parse_name(self, s):
if s == "Other Write-Ins":
return s
# We know that all the candidate names are just first and last names
bits = re.split(r'\s', s)
return ' '.join(bits[:2])
def _parse_party(self, s):
if s == "Other Write-Ins":
return None
bits = re.split(r'\s', s)
return bits[2]
def _parse_write_in(self, s):
if s == "Other Write-Ins":
return s
elif "Write-In" in s:
return "Write-In"
else:
return ""
def _parse_results(self, rows, candidate_attrs):
# These raw result attributes will be the same for every result.
common_kwargs = self._build_common_election_kwargs()
common_kwargs.update({
'office': "Representative in Congress",
'district': '4',
'reporting_level': "county",
})
results = []
for row in rows:
county = row[0]
for i in range(1, len(row)):
kwargs = common_kwargs.copy()
kwargs.update(candidate_attrs[i-1])
kwargs['jurisdiction'] = county
kwargs['ocd_id'] = self._get_county_ocd_id(county)
kwargs['votes'] = self._parse_votes(row[i])
results.append(RawResult(**kwargs))
return results
def _parse_votes(self, s):
return int(s.split(' ')[0].replace(',', ''))
|
# -*- coding: utf-8 -*-
"""
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import re
from django.template import base
# from typing import List
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
DEBUG = False
ALLOWED_HOSTS = [] # type: List[str]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admindocs",
"django.contrib.sites",
"django.contrib.humanize",
{% if cookiecutter.install_allauth == "y" %}
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.facebook",
"allauth.socialaccount.providers.google",
{% endif %}
"{{cookiecutter.project_slug}}.apps.core.apps.CoreConfig",
{%- if cookiecutter.install_allauth == "y" %}
"{{cookiecutter.project_slug}}.apps.myauth",
"{{cookiecutter.project_slug}}.apps.profile.apps.ProfileConfig",
{%- endif %}
{%- if cookiecutter.install_rq == "y" %}
"django_rq",
"rq_scheduler",
"django_redis",
{%- endif %}
{%- if cookiecutter.install_wagtail == "y" %}
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.core",
"modelcluster",
"taggit",
{%- endif %}
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"{{cookiecutter.project_slug}}.contrib.request.global_middleware",
{%- if cookiecutter.install_allauth == "y" %}
"{{cookiecutter.project_slug}}.apps.profile.middleware.ProfileMiddleware",
{%- endif %}
{% if cookiecutter.install_wagtail == "y" %}
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
{% endif -%}
] # List[str]
ROOT_URLCONF = "{{cookiecutter.project_slug}}.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["{{cookiecutter.project_slug}}/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django_settings_export.settings_export",
],
},
},
]
WSGI_APPLICATION = "{{cookiecutter.project_slug}}.wsgi.application"
{% if cookiecutter.install_allauth == "y" -%}
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
{%- endif %}
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Pacific/Auckland"
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, "static"),
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s [%(asctime)s] %(name)s.%(funcName)s:%(lineno)s| %(message)s"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"{{cookiecutter.project_slug}}": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
SITE_ID = 1
STATIC_ROOT = os.path.join(BASE_DIR, ".static")
GTM_ID = None
SETTINGS_EXPORT = [
"DEBUG",
"GTM_ID",
]
MEDIA_ROOT = os.path.join(BASE_DIR, "tmp")
MEDIA_URL = "/media/"
MEDIA_SECRET = "change_me_in_prod"
# this allows Django template tags to span multiple lines.
# http://zachsnow.com/#!/blog/2016/multiline-template-tags-django/
base.tag_re = re.compile(base.tag_re.pattern, re.DOTALL)
{%- if cookiecutter.install_rq == "y" %}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get("REDIS_URL", "redis://127.0.0.1:6379/1"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient"
}
},
}
RQ_QUEUES = {
"default": {
"USE_REDIS_CACHE": "default",
"ASYNC": True
},
}
RQ_SHOW_ADMIN_LINK = True
{% endif %}
STATIC_LOGGING = {
"env": os.environ.get("DJANGO_SETTINGS_MODULE", "").split(".")[-1]
}
{% if cookiecutter.install_wagtail == "y" %}
WAGTAIL_SITE_NAME = "{{cookiecutter.project_slug}}"
{% endif -%}
{%- if cookiecutter.install_allauth == "y" %}
# DJANGO ALLAUTH CONFIG: http://django-allauth.readthedocs.io/en/stable/configuration.html
ACCOUNT_USER_DISPLAY = "allauth.account.utils.user_email"
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_ADAPTER = "{{cookiecutter.project_slug}}.apps.myauth.adapters.AccountAdapter"
LOGIN_URL = "account_login"
LOGIN_REDIRECT_URL = "profile:edit"
LOGOUT_REDIRECT_URL = "account_login"
{% endif -%}
|
""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000.
"""
def mul_sum(a: int=3, b: int=5):
max_num = 1000
all_nums = [x for x in range(1, max_num) if (x % 3 == 0) | (x % 5 == 0)]
return sum(all_nums)
if __name__ == "__main__":
result = mul_sum()
print(result)
|
from ..adapter import CustomSocialAccountAdapter
def test_authentication_error_logs(mocker):
mocker.patch(
"allauth.socialaccount.adapter.DefaultSocialAccountAdapter.authentication_error"
) # noqa
error = mocker.patch("{{cookiecutter.project_slug}}.multisalesforce.adapter.logger.error")
adapter = CustomSocialAccountAdapter()
adapter.authentication_error()
assert error.called
|
from pydantic import BaseSettings
class Settings(BaseSettings):
APP_ENDPOINT: str = 'localhost:8080'
CONFIG_PATH: str = None
DATACENTER_ID: int = 0
WORKER_ID: int = 0
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'offline.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
import os
from PIL import Image
#Vérification
rep_cour=os.getcwd()
if rep_cour!="C:\Documents and Settings\Administrateur\Bureau\ISN/trait_img":
os.chdir("C:\Documents and Settings\Administrateur\Bureau\ISN/trait_img")
print(os.getcwd())
print("Tout est en ordre!")
#Paramètres de l'image + son affichage
nom_image=("img_base.pgm")
img_in=Image.open(nom_image)
print("Nom de l'image :",nom_image)
print("Format de l'image :",img_in.format)
print("Taille de l'image :",img_in.size)
print("Mode de l'image :",img_in.mode)
#img_in.show()
#Création d'une copie nb
taille=img_in.size
col=taille[0]
lgn=taille[1]
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
if p<175:
img_out.putpixel((x,y),p)
else:
img_out.putpixel((x,y),255)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie.pgm")
img_out.save(nom_copie_image)
img_in_1=Image.open(nom_copie_image)
#img_in_1.show()
#Création d'une copie négatif
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
p=255-p
img_out.putpixel((x,y),p)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie_negatif.pgm")
img_out.save(nom_copie_image)
img_in_2=Image.open(nom_copie_image)
#img_in_2.show()
#Création d'une copie réduction
img_out=Image.new(img_in.mode,(int(col/2)+1,int(lgn/2)+1))
y=0
x=0
y1=0
x1=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
img_out.putpixel((x1,y1),p)
y=y+2
y1=y1+1
p=0
y1=0
y=0
x1=x1+1
x=x+2
nom_copie_image=("img_copie_reduc.pgm")
img_out.save(nom_copie_image)
img_in_3=Image.open(nom_copie_image)
#img_in_3.show()
#Création d'une copie réduction
img_out=Image.new(img_in.mode,img_in.size)
y=0
x=0
y1=0
x1=0
while (x<col):
while(y<lgn):
p=img_in.getpixel((x,y))
img_out.putpixel((x1+int(col/2),y1),p)
img_out.putpixel((x1,y1+int(lgn/2)),p)
img_out.putpixel((x1,y1),p)
img_out.putpixel((x1+int(col/2),y1+int(lgn/2)),p)
y=y+2
y1=y1+1
p=0
y1=0
y=0
x1=x1+1
x=x+2
nom_copie_image=("img_copie_photomaton.pgm")
img_out.save(nom_copie_image)
img_in_4=Image.open(nom_copie_image)
#img_in_4.show()
#Création d'une copie effet de bord
img_out=Image.new(img_in.mode,img_in.size)
y=1
x=1
while (x<col-1):
while(y<lgn-1):
b=img_in.getpixel((x+1,y),p)
c=img_in.getpixel((x,y+1),p)
d=img_in.getpixel((x-1,y),p)
e=img_in.getpixel((x,y-1),p)
t=((b-d)**2+(c-e)**2)**0.5
if t>25:
p=255
else:
p=0
b=img_out.putpixel((x,y),p)
y=y+1
p=0
y=0
x=x+1
nom_copie_image=("img_copie_effetbord.pgm")
img_out.save(nom_copie_image)
img_in_5=Image.open(nom_copie_image)
img_in_5.show()
|
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = (d for n, d in G.degree())
>>> nx.is_graphical(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0] * p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d < 0 or d >= p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d > 0:
dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum % 2 or dsum > n * (n - 1):
raise nx.NetworkXUnfeasible
return dmax, dmin, dsum, n, num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is $O(s)$ where $s$ is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
modstubs = [0] * (dmax + 1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n - 1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax] - 1, n - 1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k] - 1, n - 1
if k > 1:
modstubs[mslen] = k - 1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub] + 1, n + 1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is $O(n)$ where $n$ is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where d_k >= k and the value n_j is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin - 1, -1):
if dk < k + 1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k + run_size: # Check if end of run is past Durfee index
run_size = dk - k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k + v]
sum_jnj += (k + v) * num_degs[k + v]
k += run_size
if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where $n$ is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d < 0:
return False
dsum, dmax = dsum + d, max(dmax, d)
if dsum % 2 or dsum < 2 * dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is $O(n)$ where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s) % 2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the
sum and length of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn == 0:
return True
stubheap, zeroheap = [], []
for n in range(maxn):
in_deg, out_deg = 0, 0
if n < nout:
out_deg = out_deg_sequence[n]
if n < nin:
in_deg = in_deg_sequence[n]
if in_deg < 0 or out_deg < 0:
return False
sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1 * out_deg, -1 * in_deg))
elif out_deg > 0:
zeroheap.append(-1 * out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0, 0)] * (maxin + 1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop(stubheap)
freein *= -1
if freein > len(stubheap) + len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout + 1 < 0 or stubin < 0:
modstubs[mslen] = (stubout + 1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout < 0:
heapq.heappush(zeroheap, freeout)
return True
|
import logging
import operator
import time
import traceback
from pathlib import Path
from typing import List, Type, Set, Tuple, Optional
from PyQt5.QtCore import QEvent, Qt, pyqtSignal
from PyQt5.QtGui import QIcon, QWindowStateChangeEvent, QCursor
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHeaderView, QToolBar, \
QLabel, QPlainTextEdit, QProgressBar, QPushButton, QComboBox, QApplication, QListView, QSizePolicy, \
QMenu, QHBoxLayout
from bauh.api import user
from bauh.api.abstract.cache import MemoryCache
from bauh.api.abstract.context import ApplicationContext
from bauh.api.abstract.controller import SoftwareManager, SoftwareAction
from bauh.api.abstract.model import SoftwarePackage
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.api.paths import LOGS_DIR
from bauh.commons.html import bold
from bauh.context import set_theme
from bauh.stylesheet import read_all_themes_metadata, ThemeMetadata
from bauh.view.core.config import CoreConfigManager
from bauh.view.core.tray_client import notify_tray
from bauh.view.qt import dialog, commons, qt_utils
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.apps_table import PackagesTable, UpgradeToggleButton
from bauh.view.qt.commons import sum_updates_displayed
from bauh.view.qt.components import new_spacer, IconButton, QtComponentsManager, to_widget, QSearchBar, \
QCustomMenuAction, QCustomToolbar
from bauh.view.qt.dialog import ConfirmationDialog
from bauh.view.qt.history import HistoryDialog
from bauh.view.qt.info import InfoDialog
from bauh.view.qt.root import RootDialog
from bauh.view.qt.screenshots import ScreenshotsDialog
from bauh.view.qt.settings import SettingsWindow
from bauh.view.qt.thread import UpgradeSelected, RefreshApps, UninstallPackage, DowngradePackage, ShowPackageInfo, \
ShowPackageHistory, SearchPackages, InstallPackage, AnimateProgress, NotifyPackagesReady, FindSuggestions, \
ListWarnings, \
AsyncAction, LaunchPackage, ApplyFilters, CustomSoftwareAction, ShowScreenshots, CustomAction, \
NotifyInstalledLoaded, \
IgnorePackageUpdates, SaveTheme, StartAsyncAction
from bauh.view.qt.view_model import PackageView, PackageViewStatus
from bauh.view.util import util, resource
from bauh.view.util.translation import I18n
DARK_ORANGE = '#FF4500'
# action ids
ACTION_APPLY_FILTERS = 1
ACTION_SEARCH = 2
ACTION_INSTALL = 3
ACTION_UNINSTALL = 4
ACTION_INFO = 5
ACTION_HISTORY = 6
ACTION_DOWNGRADE = 7
ACTION_UPGRADE = 8
ACTION_LAUNCH = 9
ACTION_CUSTOM_ACTION = 10
ACTION_SCREENSHOTS = 11
ACTION_IGNORE_UPDATES = 12
# components ids
SEARCH_BAR = 1
BT_INSTALLED = 2
BT_REFRESH = 3
BT_SUGGESTIONS = 4
BT_UPGRADE = 5
CHECK_UPDATES = 6
CHECK_APPS = 7
COMBO_TYPES = 8
COMBO_CATEGORIES = 9
INP_NAME = 10
CHECK_DETAILS = 11
BT_SETTINGS = 12
BT_CUSTOM_ACTIONS = 13
BT_ABOUT = 14
BT_THEMES = 15
# component groups ids
GROUP_FILTERS = 1
GROUP_VIEW_INSTALLED = 2
GROUP_VIEW_SEARCH = 3
GROUP_UPPER_BAR = 4
GROUP_LOWER_BTS = 5
class ManageWindow(QWidget):
signal_user_res = pyqtSignal(bool)
signal_root_password = pyqtSignal(bool, str)
signal_table_update = pyqtSignal()
signal_stop_notifying = pyqtSignal()
def __init__(self, i18n: I18n, icon_cache: MemoryCache, manager: SoftwareManager, screen_size, config: dict,
context: ApplicationContext, http_client: HttpClient, logger: logging.Logger, icon: QIcon):
super(ManageWindow, self).__init__()
self.setObjectName('manage_window')
self.comp_manager = QtComponentsManager()
self.i18n = i18n
self.logger = logger
self.manager = manager
self.working = False # restrict the number of threaded actions
self.installed_loaded = False # used to control the state when the interface is set to not load the apps on startup
self.pkgs = [] # packages current loaded in the table
self.pkgs_available = [] # all packages loaded in memory
self.pkgs_installed = [] # cached installed packages
self.display_limit = config['ui']['table']['max_displayed']
self.icon_cache = icon_cache
self.screen_size = screen_size
self.config = config
self.context = context
self.http_client = http_client
self.icon_app = icon
self.setWindowIcon(self.icon_app)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.toolbar_status = QToolBar()
self.toolbar_status.setObjectName('toolbar_status')
self.toolbar_status.addWidget(new_spacer())
self.label_status = QLabel()
self.label_status.setObjectName('label_status')
self.label_status.setText('')
self.toolbar_status.addWidget(self.label_status)
self.search_bar = QSearchBar(search_callback=self.search)
self.search_bar.set_placeholder(i18n['window_manage.search_bar.placeholder'] + "...")
self.search_bar.set_tooltip(i18n['window_manage.search_bar.tooltip'])
self.search_bar.set_button_tooltip(i18n['window_manage.search_bar.button_tooltip'])
self.comp_manager.register_component(SEARCH_BAR, self.search_bar, self.toolbar_status.addWidget(self.search_bar))
self.toolbar_status.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_status)
self.toolbar_filters = QWidget()
self.toolbar_filters.setObjectName('table_filters')
self.toolbar_filters.setLayout(QHBoxLayout())
self.toolbar_filters.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_filters.setContentsMargins(0, 0, 0, 0)
self.check_updates = QCheckBox()
self.check_updates.setObjectName('check_updates')
self.check_updates.setCursor(QCursor(Qt.PointingHandCursor))
self.check_updates.setText(self.i18n['updates'].capitalize())
self.check_updates.stateChanged.connect(self._handle_updates_filter)
self.check_updates.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_updates)
self.comp_manager.register_component(CHECK_UPDATES, self.check_updates)
self.check_apps = QCheckBox()
self.check_apps.setObjectName('check_apps')
self.check_apps.setCursor(QCursor(Qt.PointingHandCursor))
self.check_apps.setText(self.i18n['manage_window.checkbox.only_apps'])
self.check_apps.setChecked(True)
self.check_apps.stateChanged.connect(self._handle_filter_only_apps)
self.check_apps.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.check_apps)
self.comp_manager.register_component(CHECK_APPS, self.check_apps)
self.any_type_filter = 'any'
self.cache_type_filter_icons = {}
self.combo_filter_type = QComboBox()
self.combo_filter_type.setObjectName('combo_types')
self.combo_filter_type.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setView(QListView())
self.combo_filter_type.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_filter_type.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_filter_type.setEditable(True)
self.combo_filter_type.lineEdit().setReadOnly(True)
self.combo_filter_type.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_filter_type.activated.connect(self._handle_type_filter)
self.combo_filter_type.addItem('--- {} ---'.format(self.i18n['type'].capitalize()), self.any_type_filter)
self.combo_filter_type.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.combo_filter_type)
self.comp_manager.register_component(COMBO_TYPES, self.combo_filter_type)
self.any_category_filter = 'any'
self.combo_categories = QComboBox()
self.combo_categories.setObjectName('combo_categories')
self.combo_categories.setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.combo_categories.view().setCursor(QCursor(Qt.PointingHandCursor))
self.combo_categories.setEditable(True)
self.combo_categories.lineEdit().setReadOnly(True)
self.combo_categories.lineEdit().setAlignment(Qt.AlignCenter)
self.combo_categories.activated.connect(self._handle_category_filter)
self.combo_categories.sizePolicy().setRetainSizeWhenHidden(True)
self.combo_categories.addItem('--- {} ---'.format(self.i18n['category'].capitalize()), self.any_category_filter)
self.toolbar_filters.layout().addWidget(self.combo_categories)
self.comp_manager.register_component(COMBO_CATEGORIES, self.combo_categories)
self.input_name = QSearchBar(search_callback=self.begin_apply_filters)
self.input_name.palette().swap(self.combo_categories.palette())
self.input_name.setObjectName('name_filter')
self.input_name.set_placeholder(self.i18n['manage_window.name_filter.placeholder'] + '...')
self.input_name.set_tooltip(self.i18n['manage_window.name_filter.tooltip'])
self.input_name.set_button_tooltip(self.i18n['manage_window.name_filter.button_tooltip'])
self.input_name.sizePolicy().setRetainSizeWhenHidden(True)
self.toolbar_filters.layout().addWidget(self.input_name)
self.comp_manager.register_component(INP_NAME, self.input_name)
self.toolbar_filters.layout().addWidget(new_spacer())
toolbar_bts = []
bt_inst = QPushButton()
bt_inst.setObjectName('bt_installed')
bt_inst.setProperty('root', 'true')
bt_inst.setCursor(QCursor(Qt.PointingHandCursor))
bt_inst.setToolTip(self.i18n['manage_window.bt.installed.tooltip'])
bt_inst.setText(self.i18n['manage_window.bt.installed.text'].capitalize())
bt_inst.clicked.connect(self._begin_loading_installed)
bt_inst.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_inst)
self.toolbar_filters.layout().addWidget(bt_inst)
self.comp_manager.register_component(BT_INSTALLED, bt_inst)
bt_ref = QPushButton()
bt_ref.setObjectName('bt_refresh')
bt_ref.setProperty('root', 'true')
bt_ref.setCursor(QCursor(Qt.PointingHandCursor))
bt_ref.setToolTip(i18n['manage_window.bt.refresh.tooltip'])
bt_ref.setText(self.i18n['manage_window.bt.refresh.text'])
bt_ref.clicked.connect(self.begin_refresh_packages)
bt_ref.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(bt_ref)
self.toolbar_filters.layout().addWidget(bt_ref)
self.comp_manager.register_component(BT_REFRESH, bt_ref)
self.bt_upgrade = QPushButton()
self.bt_upgrade.setProperty('root', 'true')
self.bt_upgrade.setObjectName('bt_upgrade')
self.bt_upgrade.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_upgrade.setToolTip(i18n['manage_window.bt.upgrade.tooltip'])
self.bt_upgrade.setText(i18n['manage_window.bt.upgrade.text'])
self.bt_upgrade.clicked.connect(self.upgrade_selected)
self.bt_upgrade.sizePolicy().setRetainSizeWhenHidden(True)
toolbar_bts.append(self.bt_upgrade)
self.toolbar_filters.layout().addWidget(self.bt_upgrade)
self.comp_manager.register_component(BT_UPGRADE, self.bt_upgrade)
# setting all buttons to the same size:
bt_biggest_size = 0
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_width > bt_biggest_size:
bt_biggest_size = bt_width
for bt in toolbar_bts:
bt_width = bt.sizeHint().width()
if bt_biggest_size > bt_width:
bt.setFixedWidth(bt_biggest_size)
self.layout.addWidget(self.toolbar_filters)
self.table_container = QWidget()
self.table_container.setObjectName('table_container')
self.table_container.setContentsMargins(0, 0, 0, 0)
self.table_container.setLayout(QVBoxLayout())
self.table_container.layout().setContentsMargins(0, 0, 0, 0)
self.table_apps = PackagesTable(self, self.icon_cache, download_icons=bool(self.config['download']['icons']))
self.table_apps.change_headers_policy()
self.table_container.layout().addWidget(self.table_apps)
self.layout.addWidget(self.table_container)
self.toolbar_console = QWidget()
self.toolbar_console.setObjectName('console_toolbar')
self.toolbar_console.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.toolbar_console.setLayout(QHBoxLayout())
self.toolbar_console.setContentsMargins(0, 0, 0, 0)
self.check_details = QCheckBox()
self.check_details.setObjectName('check_details')
self.check_details.setCursor(QCursor(Qt.PointingHandCursor))
self.check_details.setText(self.i18n['manage_window.checkbox.show_details'])
self.check_details.stateChanged.connect(self._handle_console)
self.toolbar_console.layout().addWidget(self.check_details)
self.comp_manager.register_component(CHECK_DETAILS, self.check_details)
self.toolbar_console.layout().addWidget(new_spacer())
self.label_displayed = QLabel()
self.label_displayed.setObjectName('apps_displayed')
self.label_displayed.setCursor(QCursor(Qt.WhatsThisCursor))
self.label_displayed.setToolTip(self.i18n['manage_window.label.apps_displayed.tip'])
self.toolbar_console.layout().addWidget(self.label_displayed)
self.label_displayed.hide()
self.layout.addWidget(self.toolbar_console)
self.textarea_details = QPlainTextEdit(self)
self.textarea_details.setObjectName('textarea_details')
self.textarea_details.setProperty('console', 'true')
self.textarea_details.resize(self.table_apps.size())
self.layout.addWidget(self.textarea_details)
self.textarea_details.setVisible(False)
self.textarea_details.setReadOnly(True)
self.toolbar_substatus = QToolBar()
self.toolbar_substatus.setObjectName('toolbar_substatus')
self.toolbar_substatus.addWidget(new_spacer())
self.label_substatus = QLabel()
self.label_substatus.setObjectName('label_substatus')
self.label_substatus.setCursor(QCursor(Qt.WaitCursor))
self.toolbar_substatus.addWidget(self.label_substatus)
self.toolbar_substatus.addWidget(new_spacer())
self.layout.addWidget(self.toolbar_substatus)
self._change_label_substatus('')
self.thread_update = self._bind_async_action(UpgradeSelected(self.manager, context.internet_checker, self.i18n), finished_call=self._finish_upgrade_selected)
self.thread_refresh = self._bind_async_action(RefreshApps(self.manager), finished_call=self._finish_refresh_packages, only_finished=True)
self.thread_uninstall = self._bind_async_action(UninstallPackage(self.manager, self.icon_cache, self.i18n), finished_call=self._finish_uninstall)
self.thread_show_info = self._bind_async_action(ShowPackageInfo(self.manager), finished_call=self._finish_show_info)
self.thread_show_history = self._bind_async_action(ShowPackageHistory(self.manager, self.i18n), finished_call=self._finish_show_history)
self.thread_search = self._bind_async_action(SearchPackages(self.manager), finished_call=self._finish_search, only_finished=True)
self.thread_downgrade = self._bind_async_action(DowngradePackage(self.manager, self.i18n), finished_call=self._finish_downgrade)
self.thread_suggestions = self._bind_async_action(FindSuggestions(man=self.manager), finished_call=self._finish_load_suggestions, only_finished=True)
self.thread_launch = self._bind_async_action(LaunchPackage(self.manager), finished_call=self._finish_launch_package, only_finished=False)
self.thread_custom_action = self._bind_async_action(CustomAction(manager=self.manager, i18n=self.i18n), finished_call=self._finish_execute_custom_action)
self.thread_screenshots = self._bind_async_action(ShowScreenshots(self.manager), finished_call=self._finish_show_screenshots)
self.thread_apply_filters = ApplyFilters()
self.thread_apply_filters.signal_finished.connect(self._finish_apply_filters)
self.thread_apply_filters.signal_table.connect(self._update_table_and_upgrades)
self.signal_table_update.connect(self.thread_apply_filters.stop_waiting)
self.thread_install = InstallPackage(manager=self.manager, icon_cache=self.icon_cache, i18n=self.i18n)
self._bind_async_action(self.thread_install, finished_call=self._finish_install)
self.thread_animate_progress = AnimateProgress()
self.thread_animate_progress.signal_change.connect(self._update_progress)
self.thread_notify_pkgs_ready = NotifyPackagesReady()
self.thread_notify_pkgs_ready.signal_changed.connect(self._update_package_data)
self.thread_notify_pkgs_ready.signal_finished.connect(self._update_state_when_pkgs_ready)
self.signal_stop_notifying.connect(self.thread_notify_pkgs_ready.stop_working)
self.thread_ignore_updates = IgnorePackageUpdates(manager=self.manager)
self._bind_async_action(self.thread_ignore_updates, finished_call=self.finish_ignore_updates)
self.thread_reload = StartAsyncAction(delay_in_milis=5)
self.thread_reload.signal_start.connect(self._reload)
self.container_bottom = QWidget()
self.container_bottom.setObjectName('container_bottom')
self.container_bottom.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.container_bottom.setLayout(QHBoxLayout())
self.container_bottom.layout().setContentsMargins(0, 0, 0, 0)
self.container_bottom.layout().addWidget(new_spacer())
if config['suggestions']['enabled']:
bt_sugs = IconButton(action=lambda: self._begin_load_suggestions(filter_installed=True),
i18n=i18n,
tooltip=self.i18n['manage_window.bt.suggestions.tooltip'])
bt_sugs.setObjectName('suggestions')
self.container_bottom.layout().addWidget(bt_sugs)
self.comp_manager.register_component(BT_SUGGESTIONS, bt_sugs)
bt_themes = IconButton(self.show_themes,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_themes.tip'])
bt_themes.setObjectName('themes')
self.container_bottom.layout().addWidget(bt_themes)
self.comp_manager.register_component(BT_THEMES, bt_themes)
self.custom_actions = [a for a in manager.gen_custom_actions()]
bt_custom_actions = IconButton(action=self.show_custom_actions,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_custom_actions.tip'])
bt_custom_actions.setObjectName('custom_actions')
bt_custom_actions.setVisible(bool(self.custom_actions))
self.container_bottom.layout().addWidget(bt_custom_actions)
self.comp_manager.register_component(BT_CUSTOM_ACTIONS, bt_custom_actions)
bt_settings = IconButton(action=self.show_settings,
i18n=self.i18n,
tooltip=self.i18n['manage_window.bt_settings.tooltip'])
bt_settings.setObjectName('settings')
self.container_bottom.layout().addWidget(bt_settings)
self.comp_manager.register_component(BT_SETTINGS, bt_settings)
bt_about = IconButton(action=self._show_about,
i18n=self.i18n,
tooltip=self.i18n['manage_window.settings.about'])
bt_about.setObjectName('about')
self.container_bottom.layout().addWidget(bt_about)
self.comp_manager.register_component(BT_ABOUT, bt_about)
self.layout.addWidget(self.container_bottom)
self.container_progress = QCustomToolbar(spacing=0, policy_height=QSizePolicy.Fixed)
self.container_progress.setObjectName('container_progress')
self.container_progress.add_space()
self.progress_bar = QProgressBar()
self.progress_bar.setObjectName('progress_manage')
self.progress_bar.setCursor(QCursor(Qt.WaitCursor))
self.progress_bar.setTextVisible(False)
self.container_progress.add_widget(self.progress_bar)
self.container_progress.add_space()
self.layout.addWidget(self.container_progress)
qt_utils.centralize(self)
self.filter_only_apps = True
self.type_filter = self.any_type_filter
self.category_filter = self.any_category_filter
self.filter_updates = False
self._maximized = False
self.progress_controll_enabled = True
self.recent_uninstall = False
self.types_changed = False
self.dialog_about = None
self.load_suggestions = bool(config['suggestions']['enabled'])
self.suggestions_requested = False
self.first_refresh = True
self.thread_warnings = ListWarnings(man=manager, i18n=i18n)
self.thread_warnings.signal_warnings.connect(self._show_warnings)
self.settings_window = None
self.search_performed = False
self.thread_save_theme = SaveTheme(theme_key='')
self.thread_load_installed = NotifyInstalledLoaded()
self.thread_load_installed.signal_loaded.connect(self._finish_loading_installed)
self.setMinimumHeight(int(screen_size.height() * 0.5))
self.setMinimumWidth(int(screen_size.width() * 0.6))
self._register_groups()
def _register_groups(self):
filters = (CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME)
self.comp_manager.register_group(GROUP_FILTERS, False, *filters)
self.comp_manager.register_group(GROUP_VIEW_SEARCH, False,
COMBO_CATEGORIES, COMBO_TYPES, INP_NAME, # filters
BT_INSTALLED, BT_SUGGESTIONS) # buttons
self.comp_manager.register_group(GROUP_VIEW_INSTALLED, False,
BT_REFRESH, BT_UPGRADE, # buttons
*filters)
self.comp_manager.register_group(GROUP_UPPER_BAR, False,
CHECK_APPS, CHECK_UPDATES, COMBO_CATEGORIES, COMBO_TYPES, INP_NAME,
BT_INSTALLED, BT_SUGGESTIONS, BT_REFRESH, BT_UPGRADE)
self.comp_manager.register_group(GROUP_LOWER_BTS, False, BT_SUGGESTIONS, BT_THEMES, BT_CUSTOM_ACTIONS, BT_SETTINGS, BT_ABOUT)
def update_custom_actions(self):
self.custom_actions = [a for a in self.manager.gen_custom_actions()]
def _update_process_progress(self, val: int):
if self.progress_controll_enabled:
self.thread_animate_progress.set_progress(val)
def _change_status(self, status: str = None):
if status:
self.label_status.setText(status + '...')
self.label_status.setCursor(QCursor(Qt.WaitCursor))
else:
self.label_status.setText('')
self.label_status.unsetCursor()
def _set_table_enabled(self, enabled: bool):
self.table_apps.setEnabled(enabled)
if enabled:
self.table_container.unsetCursor()
else:
self.table_container.setCursor(QCursor(Qt.WaitCursor))
def begin_apply_filters(self):
self.stop_notifying_package_states()
self._begin_action(action_label=self.i18n['manage_window.status.filtering'],
action_id=ACTION_APPLY_FILTERS)
self.comp_manager.disable_visible_from_groups(GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self.comp_manager.set_component_read_only(INP_NAME, True)
self.thread_apply_filters.filters = self._gen_filters()
self.thread_apply_filters.pkgs = self.pkgs_available
self.thread_apply_filters.start()
self.setFocus(Qt.NoFocusReason)
def _finish_apply_filters(self):
self._finish_action(ACTION_APPLY_FILTERS)
self.update_bt_upgrade()
def stop_notifying_package_states(self):
if self.thread_notify_pkgs_ready.isRunning():
self.signal_stop_notifying.emit()
self.thread_notify_pkgs_ready.wait(1000)
def _update_table_and_upgrades(self, pkgs_info: dict):
self._update_table(pkgs_info=pkgs_info, signal=True)
if self.pkgs:
self._update_state_when_pkgs_ready()
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.start()
def _bind_async_action(self, action: AsyncAction, finished_call, only_finished: bool = False) -> AsyncAction:
action.signal_finished.connect(finished_call)
if not only_finished:
action.signal_confirmation.connect(self._ask_confirmation)
action.signal_output.connect(self._update_action_output)
action.signal_message.connect(self._show_message)
action.signal_status.connect(self._change_label_status)
action.signal_substatus.connect(self._change_label_substatus)
action.signal_progress.connect(self._update_process_progress)
action.signal_progress_control.connect(self.set_progress_controll)
action.signal_root_password.connect(self._pause_and_ask_root_password)
self.signal_user_res.connect(action.confirm)
self.signal_root_password.connect(action.set_root_password)
return action
def _ask_confirmation(self, msg: dict):
self.thread_animate_progress.pause()
extra_widgets = [to_widget(comp=c, i18n=self.i18n) for c in msg['components']] if msg.get('components') else None
diag = ConfirmationDialog(title=msg['title'],
body=msg['body'],
i18n=self.i18n,
widgets=extra_widgets,
confirmation_label=msg['confirmation_label'],
deny_label=msg['deny_label'],
deny_button=msg['deny_button'],
window_cancel=msg['window_cancel'],
confirmation_button=msg.get('confirmation_button', True))
diag.ask()
res = diag.confirmed
self.thread_animate_progress.animate()
self.signal_user_res.emit(res)
def _pause_and_ask_root_password(self):
self.thread_animate_progress.pause()
valid, password = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
self.thread_animate_progress.animate()
self.signal_root_password.emit(valid, password)
def _show_message(self, msg: dict):
self.thread_animate_progress.pause()
dialog.show_message(title=msg['title'], body=msg['body'], type_=msg['type'])
self.thread_animate_progress.animate()
def _show_warnings(self, warnings: List[str]):
if warnings:
dialog.show_message(title=self.i18n['warning'].capitalize(), body='<p>{}</p>'.format('<br/><br/>'.join(warnings)), type_=MessageType.WARNING)
def show(self):
super(ManageWindow, self).show()
if not self.thread_warnings.isFinished():
self.thread_warnings.start()
qt_utils.centralize(self)
def verify_warnings(self):
self.thread_warnings.start()
def _begin_loading_installed(self):
if self.installed_loaded:
self.search_bar.clear()
self.input_name.set_text('')
self._begin_action(self.i18n['manage_window.status.installed'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_load_installed.start()
else:
self.load_suggestions = False
self.begin_refresh_packages()
def _finish_loading_installed(self):
self._finish_action()
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
self.update_pkgs(new_pkgs=None, as_installed=True)
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._set_lower_buttons_visible(True)
self._reorganize()
def _update_bts_installed_and_suggestions(self):
available_types = len(self.manager.get_managed_types())
self.comp_manager.set_component_visible(BT_INSTALLED, available_types > 0 and any([self.suggestions_requested, self.search_performed]))
self.comp_manager.set_component_visible(BT_SUGGESTIONS, available_types > 0)
def _hide_filters_no_packages(self):
if not self.pkgs:
self.comp_manager.set_group_visible(GROUP_FILTERS, False)
def _show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.config)
self.dialog_about.show()
def _handle_updates_filter(self, status: int):
self.filter_updates = status == 2
self.begin_apply_filters()
def _handle_filter_only_apps(self, status: int):
self.filter_only_apps = status == 2
self.begin_apply_filters()
def _handle_type_filter(self, idx: int):
self.type_filter = self.combo_filter_type.itemData(idx)
self.combo_filter_type.adjustSize()
self.begin_apply_filters()
def _handle_category_filter(self, idx: int):
self.category_filter = self.combo_categories.itemData(idx)
self.begin_apply_filters()
def _update_state_when_pkgs_ready(self):
if self.progress_bar.isVisible():
return
self._reload_categories()
self._reorganize()
def _update_package_data(self, idx: int):
if self.table_apps.isEnabled():
pkg = self.pkgs[idx]
pkg.status = PackageViewStatus.READY
self.table_apps.update_package(pkg)
def _reload_categories(self):
categories = set()
for p in self.pkgs_available:
if p.model.categories:
for c in p.model.categories:
if c:
cat = c.strip().lower()
if cat:
categories.add(cat)
if categories:
self._update_categories(categories, keep_selected=True)
def changeEvent(self, e: QEvent):
if isinstance(e, QWindowStateChangeEvent):
self._maximized = self.isMaximized()
self.table_apps.change_headers_policy(maximized=self._maximized)
def _handle_console(self, checked: bool):
if checked:
self.textarea_details.show()
else:
self.textarea_details.hide()
def _handle_console_option(self, enable: bool):
if enable:
self.textarea_details.clear()
self.comp_manager.set_component_visible(CHECK_DETAILS, enable)
self.check_details.setChecked(False)
self.textarea_details.hide()
def begin_refresh_packages(self, pkg_types: Optional[Set[Type[SoftwarePackage]]] = None):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.refreshing'])
self.comp_manager.set_components_visible(False)
self._handle_console_option(False)
self.suggestions_requested = False
self.search_performed = False
self.thread_refresh.pkg_types = pkg_types
self.thread_refresh.start()
def _finish_refresh_packages(self, res: dict, as_installed: bool = True):
self._finish_action()
self._set_lower_buttons_visible(True)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
if self.search_performed or self.suggestions_requested:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
else:
self.comp_manager.set_group_visible(GROUP_VIEW_INSTALLED, True)
if self.update_pkgs(res['installed'], as_installed=as_installed, types=res['types']):
self._hide_filters_no_packages()
self._update_bts_installed_and_suggestions()
self._reorganize()
self.load_suggestions = False
self.types_changed = False
def load_without_packages(self):
self.load_suggestions = False
self._handle_console_option(False)
self._finish_refresh_packages({'installed': None, 'types': None}, as_installed=False)
def _begin_load_suggestions(self, filter_installed: bool):
self.search_bar.clear()
self._begin_action(self.i18n['manage_window.status.suggestions'])
self._handle_console_option(False)
self.comp_manager.set_components_visible(False)
self.suggestions_requested = True
self.thread_suggestions.filter_installed = filter_installed
self.thread_suggestions.start()
def _finish_load_suggestions(self, res: dict):
self._finish_search(res)
def begin_uninstall(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.UNINSTALL, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.uninstalling'], pkg.model.name),
action_id=ACTION_UNINSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_uninstall.pkg = pkg
self.thread_uninstall.root_pwd = pwd
self.thread_uninstall.start()
def _finish_uninstall(self, res: dict):
self._finish_action(action_id=ACTION_UNINSTALL)
if res['success']:
src_pkg = res['pkg']
if self._can_notify_user():
util.notify_user('{} ({}) {}'.format(src_pkg.model.name, src_pkg.model.get_type(), self.i18n['uninstalled']))
if res['removed']:
for list_idx, pkg_list in enumerate((self.pkgs_available, self.pkgs, self.pkgs_installed)):
if pkg_list:
removed_idxs = []
for pkgv_idx, pkgv in enumerate(pkg_list):
if len(removed_idxs) == len(res['removed']):
break
for model in res['removed']:
if pkgv.model == model:
if list_idx == 0: # updates the model
pkgv.update_model(model)
if not self.search_performed or list_idx == 2: # always from the installed packages
removed_idxs.append(pkgv_idx)
if self.search_performed and list_idx == 1: # only for displayed
self.table_apps.update_package(pkgv, change_update_col=True)
break # as the model has been found, stops the loop
if removed_idxs:
# updating the list
removed_idxs.sort()
for decrement, pkg_idx in enumerate(removed_idxs):
del pkg_list[pkg_idx - decrement]
if list_idx == 1: # updates the rows if the current list reprents the displayed packages:
for decrement, idx in enumerate(removed_idxs):
self.table_apps.removeRow(idx - decrement)
self._update_table_indexes()
self.update_bt_upgrade()
self.update_custom_actions()
self._show_console_checkbox_if_output()
notify_tray()
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.uninstall.failed']))
def _update_table_indexes(self):
if self.pkgs:
for new_idx, pkgv in enumerate(self.pkgs): # updating the package indexes
pkgv.table_index = new_idx
def begin_launch_package(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.running_app'].format(pkg.model.name),
action_id=ACTION_LAUNCH)
self.comp_manager.disable_visible()
self.thread_launch.pkg = pkg
self.thread_launch.start()
def _finish_launch_package(self, success: bool):
self._finish_action(action_id=ACTION_LAUNCH)
def _can_notify_user(self):
return bool(self.config['system']['notifications']) and (self.isHidden() or self.isMinimized())
def _change_label_status(self, status: str):
self.label_status.setText(status)
def _change_label_substatus(self, substatus: str):
self.label_substatus.setText('<p>{}</p>'.format(substatus))
if not substatus:
self.toolbar_substatus.hide()
elif not self.toolbar_substatus.isVisible() and self.progress_bar.isVisible():
self.toolbar_substatus.show()
def _reorganize(self):
if not self._maximized:
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
def _update_table(self, pkgs_info: dict, signal: bool = False):
self.pkgs = pkgs_info['pkgs_displayed']
if pkgs_info['not_installed'] == 0:
update_check = sum_updates_displayed(pkgs_info) > 0
else:
update_check = False
self.table_apps.update_packages(self.pkgs, update_check_enabled=update_check)
if not self._maximized:
self.label_displayed.show()
self.table_apps.change_headers_policy(QHeaderView.Stretch)
self.table_apps.change_headers_policy()
self._resize(accept_lower_width=len(self.pkgs) > 0)
if len(self.pkgs) == 0 and len(self.pkgs_available) == 0:
self.label_displayed.setText('')
else:
self.label_displayed.setText('{} / {}'.format(len(self.pkgs), len(self.pkgs_available)))
else:
self.label_displayed.hide()
if signal:
self.signal_table_update.emit()
def update_bt_upgrade(self, pkgs_info: dict = None):
show_bt_upgrade = False
if not any([self.suggestions_requested, self.search_performed]) and (not pkgs_info or pkgs_info['not_installed'] == 0):
for pkg in (pkgs_info['pkgs_displayed'] if pkgs_info else self.pkgs):
if not pkg.model.is_update_ignored() and pkg.update_checked:
show_bt_upgrade = True
break
self.comp_manager.set_component_visible(BT_UPGRADE, show_bt_upgrade)
if show_bt_upgrade:
self._reorganize()
def change_update_state(self, pkgs_info: dict, trigger_filters: bool = True, keep_selected: bool = False):
self.update_bt_upgrade(pkgs_info)
if pkgs_info['updates'] > 0:
if pkgs_info['not_installed'] == 0:
if not self.comp_manager.is_visible(CHECK_UPDATES):
self.comp_manager.set_component_visible(CHECK_UPDATES, True)
if not self.filter_updates and not keep_selected:
self._change_checkbox(self.check_updates, True, 'filter_updates', trigger_filters)
if pkgs_info['napp_updates'] > 0 and self.filter_only_apps and not keep_selected:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger_filters)
else:
if not keep_selected:
self._change_checkbox(self.check_updates, False, 'filter_updates', trigger_filters)
self.comp_manager.set_component_visible(CHECK_UPDATES, False)
def _change_checkbox(self, checkbox: QCheckBox, checked: bool, attr: str = None, trigger: bool = True):
if not trigger:
checkbox.blockSignals(True)
checkbox.setChecked(checked)
if not trigger:
setattr(self, attr, checked)
checkbox.blockSignals(False)
def _gen_filters(self, ignore_updates: bool = False) -> dict:
return {
'only_apps': False if self.search_performed else self.filter_only_apps,
'type': self.type_filter,
'category': self.category_filter,
'updates': False if ignore_updates else self.filter_updates,
'name': self.input_name.text().lower() if self.input_name.text() else None,
'display_limit': None if self.filter_updates else self.display_limit
}
def update_pkgs(self, new_pkgs: Optional[List[SoftwarePackage]], as_installed: bool, types: Optional[Set[type]] = None, ignore_updates: bool = False, keep_filters: bool = False) -> bool:
self.input_name.set_text('')
pkgs_info = commons.new_pkgs_info()
filters = self._gen_filters(ignore_updates=ignore_updates)
if new_pkgs is not None:
old_installed = None
if as_installed:
old_installed = self.pkgs_installed
self.pkgs_installed = []
for pkg in new_pkgs:
app_model = PackageView(model=pkg, i18n=self.i18n)
commons.update_info(app_model, pkgs_info)
commons.apply_filters(app_model, filters, pkgs_info)
if old_installed and types:
for pkgv in old_installed:
if pkgv.model.__class__ not in types:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
else: # use installed
for pkgv in self.pkgs_installed:
commons.update_info(pkgv, pkgs_info)
commons.apply_filters(pkgv, filters, pkgs_info)
if pkgs_info['apps_count'] == 0:
if self.load_suggestions or self.types_changed:
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self._begin_load_suggestions(filter_installed=False)
self.load_suggestions = False
return False
else:
if not keep_filters:
self._change_checkbox(self.check_apps, False, 'filter_only_apps', trigger=False)
self.check_apps.setCheckable(False)
else:
if not keep_filters:
self.check_apps.setCheckable(True)
self._change_checkbox(self.check_apps, True, 'filter_only_apps', trigger=False)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_categories(pkgs_info['categories'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._update_type_filters(pkgs_info['available_types'], keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self._apply_filters(pkgs_info, ignore_updates=ignore_updates)
self.change_update_state(pkgs_info=pkgs_info, trigger_filters=False, keep_selected=keep_filters and bool(pkgs_info['pkgs_displayed']))
self.pkgs_available = pkgs_info['pkgs']
if as_installed:
self.pkgs_installed = pkgs_info['pkgs']
self.pkgs = pkgs_info['pkgs_displayed']
self._update_table(pkgs_info=pkgs_info)
if new_pkgs:
self.stop_notifying_package_states()
self.thread_notify_pkgs_ready.work = True
self.thread_notify_pkgs_ready.pkgs = self.pkgs
self.thread_notify_pkgs_ready.start()
self._resize(accept_lower_width=bool(self.pkgs_installed))
if self.first_refresh:
qt_utils.centralize(self)
self.first_refresh = False
if not self.installed_loaded and as_installed:
self.installed_loaded = True
return True
def _apply_filters(self, pkgs_info: dict, ignore_updates: bool):
pkgs_info['pkgs_displayed'] = []
filters = self._gen_filters(ignore_updates=ignore_updates)
for pkgv in pkgs_info['pkgs']:
commons.apply_filters(pkgv, filters, pkgs_info)
def _clean_combo_types(self):
if self.combo_filter_type.count() > 1:
for _ in range(self.combo_filter_type.count() - 1):
self.combo_filter_type.removeItem(1)
def _update_type_filters(self, available_types: dict = None, keep_selected: bool = False):
if available_types is None:
self.comp_manager.set_component_visible(COMBO_TYPES, self.combo_filter_type.count() > 2)
else:
keeping_selected = keep_selected and available_types and self.type_filter in available_types
if not keeping_selected:
self.type_filter = self.any_type_filter
if not available_types:
self._clean_combo_types()
if available_types:
self._clean_combo_types()
sel_type = -1
for idx, item in enumerate(available_types.items()):
app_type, icon_path, label = item[0], item[1]['icon'], item[1]['label']
icon = self.cache_type_filter_icons.get(app_type)
if not icon:
icon = QIcon(icon_path)
self.cache_type_filter_icons[app_type] = icon
self.combo_filter_type.addItem(icon, label, app_type)
if keeping_selected and app_type == self.type_filter:
sel_type = idx + 1
self.combo_filter_type.blockSignals(True)
self.combo_filter_type.setCurrentIndex(sel_type if sel_type > -1 else 0)
self.combo_filter_type.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_TYPES, len(available_types) > 1)
else:
self.comp_manager.set_component_visible(COMBO_TYPES, False)
def _update_categories(self, categories: Set[str] = None, keep_selected: bool = False):
if categories is None:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, self.combo_categories.count() > 1)
else:
keeping_selected = keep_selected and categories and self.category_filter in categories
if not keeping_selected:
self.category_filter = self.any_category_filter
if categories:
if self.combo_categories.count() > 1:
for _ in range(self.combo_categories.count() - 1):
self.combo_categories.removeItem(1)
selected_cat = -1
cat_list = list(categories)
cat_list.sort()
for idx, c in enumerate(cat_list):
self.__add_category(c)
if keeping_selected and c == self.category_filter:
selected_cat = idx + 1
self.combo_categories.blockSignals(True)
self.combo_categories.setCurrentIndex(selected_cat if selected_cat > -1 else 0)
self.combo_categories.blockSignals(False)
self.comp_manager.set_component_visible(COMBO_CATEGORIES, True)
else:
self.comp_manager.set_component_visible(COMBO_CATEGORIES, False)
def __add_category(self, category: str):
i18n_cat = self.i18n.get('category.{}'.format(category), self.i18n.get(category, category))
self.combo_categories.addItem(i18n_cat.capitalize(), category)
def _get_current_categories(self) -> Set[str]:
if self.combo_categories.count() > 1:
return {self.combo_categories.itemData(idx) for idx in range(self.combo_categories.count()) if idx > 0}
def _resize(self, accept_lower_width: bool = True):
table_width = self.table_apps.get_width()
toolbar_width = self.toolbar_filters.sizeHint().width()
topbar_width = self.toolbar_status.sizeHint().width()
new_width = max(table_width, toolbar_width, topbar_width)
new_width *= 1.05 # this extra size is not because of the toolbar button, but the table upgrade buttons
if (self.pkgs and accept_lower_width) or new_width > self.width():
self.resize(int(new_width), self.height())
def set_progress_controll(self, enabled: bool):
self.progress_controll_enabled = enabled
def upgrade_selected(self):
body = QWidget()
body.setLayout(QHBoxLayout())
body.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
body.layout().addWidget(QLabel(self.i18n['manage_window.upgrade_all.popup.body']))
body.layout().addWidget(UpgradeToggleButton(pkg=None, root=self, i18n=self.i18n, clickable=False))
if ConfirmationDialog(title=self.i18n['manage_window.upgrade_all.popup.title'],
i18n=self.i18n, body=None,
widgets=[body]).ask():
self._begin_action(action_label=self.i18n['manage_window.status.upgrading'],
action_id=ACTION_UPGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_update.pkgs = self.pkgs
self.thread_update.start()
def _finish_upgrade_selected(self, res: dict):
self._finish_action()
if res.get('id'):
output = self.textarea_details.toPlainText()
if output:
try:
Path(UpgradeSelected.UPGRADE_LOGS_DIR).mkdir(parents=True, exist_ok=True)
logs_path = '{}/{}.log'.format(UpgradeSelected.UPGRADE_LOGS_DIR, res['id'])
with open(logs_path, 'w+') as f:
f.write(output)
self.textarea_details.appendPlainText('\n*Upgrade summary generated at: {}'.format(UpgradeSelected.SUMMARY_FILE.format(res['id'])))
self.textarea_details.appendPlainText('*Upgrade logs generated at: {}'.format(logs_path))
except:
traceback.print_exc()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_UPGRADE)
self.begin_refresh_packages(pkg_types=res['types'])
self._show_console_checkbox_if_output()
if self._can_notify_user():
util.notify_user('{} {}'.format(res['updated'], self.i18n['notification.update_selected.success']))
notify_tray()
else:
self.comp_manager.restore_state(ACTION_UPGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.update_selected.failed'])
self.update_custom_actions()
def _show_console_errors(self):
if self.textarea_details.toPlainText():
self.check_details.setChecked(True)
else:
self._handle_console_option(False)
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def _update_action_output(self, output: str):
self.textarea_details.appendPlainText(output)
def _begin_action(self, action_label: str, action_id: int = None):
self.thread_animate_progress.stop = False
self.thread_animate_progress.start()
self.progress_bar.setVisible(True)
if action_id is not None:
self.comp_manager.save_states(action_id, only_visible=True)
self._set_table_enabled(False)
self.comp_manager.set_component_visible(SEARCH_BAR, False)
self._change_status(action_label)
def _set_lower_buttons_visible(self, visible: bool):
self.comp_manager.set_group_visible(GROUP_LOWER_BTS, visible)
if visible:
self.comp_manager.set_component_visible(BT_CUSTOM_ACTIONS, bool(self.custom_actions))
def _finish_action(self, action_id: int = None):
self.thread_animate_progress.stop = True
self.thread_animate_progress.wait(msecs=1000)
self.progress_bar.setVisible(False)
self.progress_bar.setValue(0)
self.progress_bar.setTextVisible(False)
if action_id is not None:
self.comp_manager.restore_state(action_id)
self.comp_manager.set_component_visible(SEARCH_BAR, True)
self._change_status()
self._change_label_substatus('')
self._set_table_enabled(True)
self.progress_controll_enabled = True
def begin_downgrade(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.DOWNGRADE, pkg)
if not proceed:
return
self._begin_action(action_label='{} {}'.format(self.i18n['manage_window.status.downgrading'], pkg.model.name),
action_id=ACTION_DOWNGRADE)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_downgrade.pkg = pkg
self.thread_downgrade.root_pwd = pwd
self.thread_downgrade.start()
def _finish_downgrade(self, res: dict):
self._finish_action()
if res['success']:
self.comp_manager.remove_saved_state(ACTION_DOWNGRADE)
if self._can_notify_user():
util.notify_user('{} {}'.format(res['app'], self.i18n['downgraded']))
self.begin_refresh_packages(pkg_types={res['app'].model.__class__} if len(self.pkgs) > 1 else None)
self._show_console_checkbox_if_output()
self.update_custom_actions()
notify_tray()
else:
self.comp_manager.restore_state(ACTION_DOWNGRADE)
self._show_console_errors()
if self._can_notify_user():
util.notify_user(self.i18n['notification.downgrade.failed'])
def begin_show_info(self, pkg: dict):
self._begin_action(self.i18n['manage_window.status.info'], action_id=ACTION_INFO)
self.comp_manager.disable_visible()
self.thread_show_info.pkg = pkg
self.thread_show_info.start()
def _finish_show_info(self, pkg_info: dict):
self._finish_action(action_id=ACTION_INFO)
if pkg_info:
if len(pkg_info) > 1:
dialog_info = InfoDialog(pkg_info=pkg_info, icon_cache=self.icon_cache,
i18n=self.i18n, screen_size=self.screen_size)
dialog_info.exec_()
else:
dialog.show_message(title=self.i18n['warning'].capitalize(),
body=self.i18n['manage_window.info.no_info'].format(bold(pkg_info['__app__'].model.name)),
type_=MessageType.WARNING)
def begin_show_screenshots(self, pkg: PackageView):
self._begin_action(action_label=self.i18n['manage_window.status.screenshots'].format(bold(pkg.model.name)),
action_id=ACTION_SCREENSHOTS)
self.comp_manager.disable_visible()
self.thread_screenshots.pkg = pkg
self.thread_screenshots.start()
def _finish_show_screenshots(self, res: dict):
self._finish_action(ACTION_SCREENSHOTS)
if res.get('screenshots'):
diag = ScreenshotsDialog(pkg=res['pkg'],
http_client=self.http_client,
icon_cache=self.icon_cache,
logger=self.logger,
i18n=self.i18n,
screenshots=res['screenshots'])
diag.exec_()
else:
dialog.show_message(title=self.i18n['error'],
body=self.i18n['popup.screenshots.no_screenshot.body'].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def begin_show_history(self, pkg: PackageView):
self._begin_action(self.i18n['manage_window.status.history'], action_id=ACTION_HISTORY)
self.comp_manager.disable_visible()
self.thread_show_history.pkg = pkg
self.thread_show_history.start()
def _finish_show_history(self, res: dict):
self._finish_action(ACTION_HISTORY)
if res.get('error'):
self._handle_console_option(True)
self.textarea_details.appendPlainText(res['error'])
self.check_details.setChecked(True)
elif not res['history'].history:
dialog.show_message(title=self.i18n['action.history.no_history.title'],
body=self.i18n['action.history.no_history.body'].format(bold(res['history'].pkg.name)),
type_=MessageType.WARNING)
else:
dialog_history = HistoryDialog(res['history'], self.icon_cache, self.i18n)
dialog_history.exec_()
def _begin_search(self, word, action_id: int = None):
self.filter_updates = False
self._begin_action('{} {}'.format(self.i18n['manage_window.status.searching'], word if word else ''), action_id=action_id)
def search(self):
word = self.search_bar.text().strip()
if word:
self._handle_console(False)
self._begin_search(word, action_id=ACTION_SEARCH)
self.comp_manager.set_components_visible(False)
self.thread_search.word = word
self.thread_search.start()
def _finish_search(self, res: dict):
self._finish_action()
self.search_performed = True
if not res['error']:
self.comp_manager.set_group_visible(GROUP_VIEW_SEARCH, True)
self.update_pkgs(res['pkgs_found'], as_installed=False, ignore_updates=True)
self._set_lower_buttons_visible(True)
self._update_bts_installed_and_suggestions()
self._hide_filters_no_packages()
self._reorganize()
else:
self.comp_manager.restore_state(ACTION_SEARCH)
dialog.show_message(title=self.i18n['warning'].capitalize(), body=self.i18n[res['error']], type_=MessageType.WARNING)
def _ask_root_password(self, action: SoftwareAction, pkg: PackageView) -> Tuple[Optional[str], bool]:
pwd = None
requires_root = self.manager.requires_root(action, pkg.model)
if not user.is_root() and requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return pwd, False
return pwd, True
def install(self, pkg: PackageView):
pwd, proceed = self._ask_root_password(SoftwareAction.INSTALL, pkg)
if not proceed:
return
self._begin_action('{} {}'.format(self.i18n['manage_window.status.installing'], pkg.model.name), action_id=ACTION_INSTALL)
self.comp_manager.set_groups_visible(False, GROUP_UPPER_BAR, GROUP_LOWER_BTS)
self._handle_console_option(True)
self.thread_install.pkg = pkg
self.thread_install.root_pwd = pwd
self.thread_install.start()
def _finish_install(self, res: dict):
self._finish_action(action_id=ACTION_INSTALL)
console_output = self.textarea_details.toPlainText()
if console_output:
log_path = f"{LOGS_DIR}/install/{res['pkg'].model.get_type()}/{res['pkg'].model.name}"
try:
Path(log_path).mkdir(parents=True, exist_ok=True)
log_file = f'{log_path}/{int(time.time())}.log'
with open(log_file, 'w+') as f:
f.write(console_output)
self.textarea_details.appendPlainText(self.i18n['console.install_logs.path'].format('"{}"'.format(log_file)))
except:
self.textarea_details.appendPlainText("[warning] Could not write install log file to '{}'".format(log_path))
if res['success']:
if self._can_notify_user():
util.notify_user(msg='{} ({}) {}'.format(res['pkg'].model.name, res['pkg'].model.get_type(), self.i18n['installed']))
models_updated = []
for key in ('installed', 'removed'):
if res.get(key):
models_updated.extend(res[key])
if models_updated:
installed_available_idxs = []
for idx, available in enumerate(self.pkgs_available):
for pidx, model in enumerate(models_updated):
if available.model == model:
available.update_model(model)
if model.installed:
installed_available_idxs.append((idx, pidx, available))
# re-indexing all installed so they always will be be displayed when no filters are applied
if installed_available_idxs:
# removing from available
installed_available_idxs.sort(key=operator.itemgetter(0))
for decrement, data in enumerate(installed_available_idxs):
del self.pkgs_available[data[0] - decrement]
# re-inserting into the available
installed_available_idxs.sort(key=operator.itemgetter(1))
for new_idx, data in enumerate(installed_available_idxs):
self.pkgs_available.insert(new_idx, data[2])
# updating the respective table rows:
for displayed in self.pkgs:
for model in models_updated:
if displayed.model == model:
self.table_apps.update_package(displayed, change_update_col=True)
self.update_bt_upgrade()
# updating installed packages
if res['removed'] and self.pkgs_installed:
to_remove = []
for idx, installed in enumerate(self.pkgs_installed):
for removed in res['removed']:
if installed.model == removed:
to_remove.append(idx)
if to_remove:
to_remove.sort()
for decrement, idx in enumerate(to_remove):
del self.pkgs_installed[idx - decrement]
if res['installed']:
for idx, model in enumerate(res['installed']):
self.pkgs_installed.insert(idx, PackageView(model, self.i18n))
self.update_custom_actions()
self.table_apps.change_headers_policy(policy=QHeaderView.Stretch, maximized=self._maximized)
self.table_apps.change_headers_policy(policy=QHeaderView.ResizeToContents, maximized=self._maximized)
self._resize(accept_lower_width=False)
else:
self._show_console_errors()
if self._can_notify_user():
util.notify_user('{}: {}'.format(res['pkg'].model.name, self.i18n['notification.install.failed']))
def _update_progress(self, value: int):
self.progress_bar.setValue(value)
def begin_execute_custom_action(self, pkg: Optional[PackageView], action: CustomSoftwareAction):
if pkg is None and action.requires_confirmation and \
not ConfirmationDialog(title=self.i18n['confirmation'].capitalize(),
body='<p>{}</p>'.format(self.i18n['custom_action.proceed_with'].capitalize().format(bold(self.i18n[action.i18n_label_key]))),
icon=QIcon(action.icon_path) if action.icon_path else QIcon(resource.get_path('img/logo.svg')),
i18n=self.i18n).ask():
return False
pwd = None
if not user.is_root() and action.requires_root:
valid, pwd = RootDialog.ask_password(self.context, i18n=self.i18n, comp_manager=self.comp_manager)
if not valid:
return
self._begin_action(action_label='{}{}'.format(self.i18n[action.i18n_status_key], ' {}'.format(pkg.model.name) if pkg else ''),
action_id=ACTION_CUSTOM_ACTION)
self.comp_manager.set_components_visible(False)
self._handle_console_option(True)
self.thread_custom_action.pkg = pkg
self.thread_custom_action.root_pwd = pwd
self.thread_custom_action.custom_action = action
self.thread_custom_action.start()
def _finish_execute_custom_action(self, res: dict):
self._finish_action()
if res['success']:
if res['action'].refresh:
self.comp_manager.remove_saved_state(ACTION_CUSTOM_ACTION)
self.begin_refresh_packages(pkg_types={res['pkg'].model.__class__} if res['pkg'] else None)
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_checkbox_if_output()
else:
self.comp_manager.restore_state(ACTION_CUSTOM_ACTION)
self._show_console_errors()
if res['error']:
dialog.show_message(title=self.i18n['warning' if res['error_type'] == MessageType.WARNING else 'error'].capitalize(),
body=self.i18n[res['error']],
type_=res['error_type'])
def _show_console_checkbox_if_output(self):
if self.textarea_details.toPlainText():
self.comp_manager.set_component_visible(CHECK_DETAILS, True)
else:
self.comp_manager.set_component_visible(CHECK_DETAILS, False)
def show_settings(self):
if self.settings_window:
self.settings_window.handle_display()
else:
self.settings_window = SettingsWindow(self.manager, self.i18n, self.screen_size, self)
self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4))
self.settings_window.resize(self.size())
self.settings_window.adjustSize()
qt_utils.centralize(self.settings_window)
self.settings_window.show()
def _map_custom_action(self, action: CustomSoftwareAction, parent: QWidget) -> QCustomMenuAction:
if action.icon_path:
try:
if action.icon_path.startswith('/'):
icon = QIcon(action.icon_path)
else:
icon = QIcon.fromTheme(action.icon_path)
except:
icon = None
else:
icon = None
return QCustomMenuAction(parent=parent,
label=self.i18n[action.i18n_label_key],
action=lambda: self.begin_execute_custom_action(None, action),
icon=icon)
def show_custom_actions(self):
if self.custom_actions:
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
actions = [self._map_custom_action(a, menu_row) for a in self.custom_actions]
menu_row.addActions(actions)
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def begin_ignore_updates(self, pkg: PackageView):
status_key = 'ignore_updates' if not pkg.model.is_update_ignored() else 'ignore_updates_reverse'
self._begin_action(action_label=self.i18n['manage_window.status.{}'.format(status_key)].format(pkg.model.name),
action_id=ACTION_IGNORE_UPDATES)
self.comp_manager.disable_visible()
self.thread_ignore_updates.pkg = pkg
self.thread_ignore_updates.start()
def finish_ignore_updates(self, res: dict):
self._finish_action(action_id=ACTION_IGNORE_UPDATES)
if res['success']:
hide_package = commons.is_package_hidden(res['pkg'], self._gen_filters())
if hide_package:
idx_to_remove = None
for pkg in self.pkgs:
if pkg == res['pkg']:
idx_to_remove = pkg.table_index
break
if idx_to_remove is not None:
del self.pkgs[idx_to_remove]
self.table_apps.removeRow(idx_to_remove)
self._update_table_indexes()
self.update_bt_upgrade()
else:
for pkg in self.pkgs:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
self.table_apps.update_package(pkg, change_update_col=not any([self.search_performed, self.suggestions_requested]))
self.update_bt_upgrade()
break
for pkg_list in (self.pkgs_available, self.pkgs_installed):
if pkg_list:
for pkg in pkg_list:
if pkg == res['pkg']:
pkg.update_model(res['pkg'].model)
break
self._add_pkg_categories(res['pkg'])
dialog.show_message(title=self.i18n['success'].capitalize(),
body=self.i18n['action.{}.success'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.INFO)
else:
dialog.show_message(title=self.i18n['fail'].capitalize(),
body=self.i18n['action.{}.fail'.format(res['action'])].format(bold(res['pkg'].model.name)),
type_=MessageType.ERROR)
def _add_pkg_categories(self, pkg: PackageView):
if pkg.model.categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c and c.strip()}
if pkg_categories:
current_categories = self._get_current_categories()
if current_categories:
pkg_categories = {c.strip().lower() for c in pkg.model.categories if c}
if pkg_categories:
categories_to_add = {c for c in pkg_categories if c and c not in current_categories}
if categories_to_add:
for cat in categories_to_add:
self.__add_category(cat)
else:
self._update_categories(pkg_categories)
def _map_theme_action(self, theme: ThemeMetadata, menu: QMenu) -> QCustomMenuAction:
def _change_theme():
set_theme(theme_key=theme.key, app=QApplication.instance(), logger=self.context.logger)
self.thread_save_theme.theme_key = theme.key
self.thread_save_theme.start()
return QCustomMenuAction(label=theme.get_i18n_name(self.i18n),
action=_change_theme,
parent=menu,
tooltip=theme.get_i18n_description(self.i18n))
def show_themes(self):
menu_row = QMenu()
menu_row.setCursor(QCursor(Qt.PointingHandCursor))
menu_row.addActions(self._map_theme_actions(menu_row))
menu_row.adjustSize()
menu_row.popup(QCursor.pos())
menu_row.exec_()
def _map_theme_actions(self, menu: QMenu) -> List[QCustomMenuAction]:
core_config = CoreConfigManager().get_config()
current_theme_key, current_action = core_config['ui']['theme'], None
actions = []
for t in read_all_themes_metadata():
if not t.abstract:
action = self._map_theme_action(t, menu)
if current_action is None and current_theme_key is not None and current_theme_key == t.key:
action.button.setProperty('current', 'true')
current_action = action
else:
actions.append(action)
if not current_action:
invalid_action = QCustomMenuAction(label=self.i18n['manage_window.bt_themes.option.invalid'], parent=menu)
invalid_action.button.setProperty('current', 'true')
current_action = invalid_action
actions.sort(key=lambda a: a.get_label())
actions.insert(0, current_action)
return actions
def reload(self):
self.thread_reload.start()
def _reload(self):
self.update_custom_actions()
self.verify_warnings()
self.types_changed = True
self.begin_refresh_packages()
|
from fastapi import FastAPI
from fastapi.testclient import TestClient
app = FastAPI()
@app.get("/api/v1/healthcheck")
async def read_main():
return "OK"
@app.post("/api/v1/query")
async def query():
return [{"event_date": "20210105"}]
client = TestClient(app)
def test_read_main():
response = client.get("/api/v1/healthcheck")
assert response.status_code == 200
assert response.json() == "OK"
def test_query():
response = client.post(
"/api/v1/query",
json={
"type": "service_account",
"date": "20210105",
"projet_id": "test-project",
"private_key_id": "test",
"private_key": "testkey",
"client_email": "test@test.com",
"client_id": "test",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "ttps://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "test",
"client_x509_cert_url": "test"
}
)
assert response.status_code == 200
assert response.json() == [{"event_date": "20210105"}]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from yandex.cloud.datasphere.v1 import app_token_service_pb2 as yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2
class AppTokenServiceStub(object):
"""A set of methods for managing app tokens.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Validate = channel.unary_unary(
'/yandex.cloud.datasphere.v1.AppTokenService/Validate',
request_serializer=yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class AppTokenServiceServicer(object):
"""A set of methods for managing app tokens.
"""
def Validate(self, request, context):
"""Validates app token.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AppTokenServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Validate': grpc.unary_unary_rpc_method_handler(
servicer.Validate,
request_deserializer=yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.datasphere.v1.AppTokenService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AppTokenService(object):
"""A set of methods for managing app tokens.
"""
@staticmethod
def Validate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.datasphere.v1.AppTokenService/Validate',
yandex_dot_cloud_dot_datasphere_dot_v1_dot_app__token__service__pb2.AppTokenValidateRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
import pytest
@pytest.mark.usefixtures("smart_setup")
class TestObjectValue:
def test_get_sheet_object_value(self, smart_setup):
smart = smart_setup['smart']
sheet = smart.Sheets.get_sheet(smart_setup['sheet'].id, include='objectValue')
assert isinstance(sheet.rows[0].cells[0].object_value, smart.models.StringObjectValue)
assert isinstance(sheet, smart.models.Sheet)
def test_predecessors(self, smart_setup):
smart = smart_setup['smart']
templates = smart.Templates.list_public_templates(include_all=True)
for template in templates.data:
if template.name == 'Basic Project with Gantt & Dependencies':
break
sheet = smart.models.Sheet({
'name': 'example_project_python_sdk' + smart_setup['now'],
'fromId': template.id
})
action = smart.Home.create_sheet_from_template(sheet)
sheet = action.result
assert action.message == 'SUCCESS'
sheet = smart.Sheets.get_sheet(sheet.id)
# add 'Task1'
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task1'
})
break
action = smart.Sheets.add_rows(sheet.id, [row])
task1_row = action.result[0]
assert isinstance(task1_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# add 'Task2' with 'Task1' predecessor
p1 = smart.models.Predecessor()
p1.type = 'FS'
p1.row_id = task1_row.id
predecessor_list = smart.models.PredecessorList()
predecessor_list.predecessors = [p1]
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task2'
})
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'object_value': predecessor_list
})
action = smart.Sheets.add_rows(sheet.id, [row])
task2_row = action.result[0]
assert isinstance(task2_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# add 'Task3' with 'Task1','Task2' predecessors
p1 = smart.models.Predecessor()
p1.type = 'FS'
p1.row_id = task1_row.id
p2 = smart.models.Predecessor()
p2.type = 'FS'
p2.row_id = task2_row.id
predecessor_list = smart.models.PredecessorList()
predecessor_list.predecessors = [p1, p2]
row = smart.models.Row()
row.to_bottom = True
for col in sheet.columns:
if col.primary:
row.cells.append({
'column_id': col.id,
'value': 'Task3'
})
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'object_value': predecessor_list
})
action = smart.Sheets.add_rows(sheet.id, [row])
task3_row = action.result[0]
assert isinstance(task3_row, smart.models.row.Row)
assert action.request_response.status_code == 200
# clear the predecessor list from task 3
row = smart.models.Row()
row.id = task3_row.id
for col in sheet.columns:
if col.type == 'PREDECESSOR':
row.cells.append({
'column_id': col.id,
'value': smart.models.ExplicitNull()
})
break
action = smart.Sheets.update_rows(sheet.id, [row])
assert action.request_response.status_code == 200
for cell in action.data[0].cells:
if cell.column_id == col.id:
break;
assert cell.object_value is None
# clean up
action = smart.Sheets.delete_sheet(sheet.id)
assert action.message == 'SUCCESS'
|
import random
import time
class Athlete():
name = ""
health = 100
def __init__(self, newName):
self.name = newName
print("На ринге появляется новый боец, его имя - ", self.name )
print()
def punch(self, other):
time.sleep(1)
print(self.name, "наносит удар бойцу ", other.name)
other.health -= 20
print("Уровень физического состояния бойца ", other.name, " - ", other.health)
print()
fighter1 = Athlete("Владимир")
fighter2 = Athlete("Николай")
while (fighter1.health != 0) and (fighter2.health != 0):
fighters = [fighter1, fighter2]
if fighters[random.randint(0,1)] == fighter1:
fighter1.punch(fighter2)
else:
fighter2.punch(fighter1)
print("Победу в поединке одержал " + (fighter1.name if fighter1.health > 0 else fighter2.name) + "!")
|
import bagel
import numpy as np
from sklearn.metrics import precision_recall_curve
from typing import Sequence, Tuple, Dict, Optional
def _adjust_scores(labels: np.ndarray,
scores: np.ndarray,
delay: Optional[int] = None,
inplace: bool = False) -> np.ndarray:
if np.shape(scores) != np.shape(labels):
raise ValueError('`labels` and `scores` must have same shape')
if delay is None:
delay = len(scores)
splits = np.where(labels[1:] != labels[:-1])[0] + 1
is_anomaly = labels[0] == 1
adjusted_scores = np.copy(scores) if not inplace else scores
pos = 0
for part in splits:
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: ptr] = np.max(adjusted_scores[pos: ptr])
adjusted_scores[ptr: part] = np.maximum(adjusted_scores[ptr: part], adjusted_scores[pos])
is_anomaly = not is_anomaly
pos = part
part = len(labels)
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: part] = np.max(adjusted_scores[pos: ptr])
return adjusted_scores
def _ignore_missing(series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:
ret = []
for series in series_list:
series = np.copy(series)
ret.append(series[missing != 1])
return tuple(ret)
def _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:
precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)
f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=None)
best_threshold = thresholds[np.argmax(f1score)]
best_precision = precision[np.argmax(f1score)]
best_recall = recall[np.argmax(f1score)]
return best_threshold, best_precision, best_recall, np.max(f1score)
def get_test_results(labels: np.ndarray,
scores: np.ndarray,
missing: np.ndarray,
window_size: int,
delay: Optional[int] = None) -> Dict:
labels = labels[window_size - 1:]
scores = scores[window_size - 1:]
missing = missing[window_size - 1:]
adjusted_scores = _adjust_scores(labels=labels, scores=scores, delay=delay)
adjusted_labels, adjusted_scores = _ignore_missing([labels, adjusted_scores], missing=missing)
threshold, precision, recall, f1score = _best_f1score(labels=adjusted_labels, scores=adjusted_scores)
return {'threshold': threshold,
'precision': precision,
'recall': recall,
'f1score': f1score}
class KPIStats:
def __init__(self, kpi: bagel.data.KPI):
self.num_points = len(kpi.values)
self.num_missing = len(kpi.missing[kpi.missing == 1])
self.num_anomaly = len(kpi.labels[kpi.labels == 1])
self.missing_rate = self.num_missing / self.num_points
self.anomaly_rate = self.num_anomaly / self.num_points
def get_kpi_stats(*kpis: bagel.data.KPI) -> Tuple[KPIStats, ...]:
ret = []
for kpi in kpis:
ret.append(KPIStats(kpi))
return tuple(ret)
|
"""
This is a library for defining and using particle filters.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
from contextlib import contextmanager
from yt.fields.field_info_container import \
NullFunc, TranslationFunc
from yt.funcs import mylog
from yt.utilities.exceptions import YTIllDefinedFilter
# One to one mapping
filter_registry = {}
class DummyFieldInfo(object):
particle_type = True
dfi = DummyFieldInfo()
class ParticleFilter(object):
def __init__(self, name, function, requires, filtered_type):
self.name = name
self.function = function
self.requires = requires[:]
self.filtered_type = filtered_type
@contextmanager
def apply(self, dobj):
with dobj._chunked_read(dobj._current_chunk):
with dobj._field_type_state(self.filtered_type, dfi):
# We won't be storing the field data from the whole read, so we
# start by filtering now.
filter = self.function(self, dobj)
yield
# Retain a reference here, and we'll filter all appropriate fields
# later.
fd = dobj.field_data
for f, tr in fd.items():
if f[0] != self.filtered_type: continue
if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
raise YTIllDefinedFilter(self, tr.shape, filter.shape)
else:
d = tr[filter]
dobj.field_data[self.name, f[1]] = d
def available(self, field_list):
# Note that this assumes that all the fields in field_list have the
# same form as the 'requires' attributes. This won't be true if the
# fields are implicitly "all" or something.
return all((self.filtered_type, field) in field_list for field in self.requires)
def missing(self, field_list):
return list((self.filtered_type, field) for field in self.requires if
(self.filtered_type, field) not in field_list)
def wrap_func(self, field_name, old_fi):
new_fi = copy.copy(old_fi)
new_fi.name = (self.name, field_name[1])
if old_fi._function == NullFunc:
new_fi._function = TranslationFunc(old_fi.name)
# Marking the field as inherited
new_fi._inherited_particle_filter = True
return new_fi
def add_particle_filter(name, function, requires=None, filtered_type="all"):
r"""Create a new particle filter in the global namespace of filters
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type. After defining a new filter, it still needs to be added to the
dataset by calling
:func:`~yt.data_objects.static_output.add_particle_filter`.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.particle_filter` decorator to
define a new particle filter.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter.
function : reference to a function
The function that defines the particle filter. The function should
accept two arguments: a reference to a particle filter object and a
reference to an abstract yt data object. See the example below.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Example
-------
>>> import yt
>>> def _stars(pfilter, data):
... return data[(pfilter.filtered_type, 'particle_type')] == 2
>>> yt.add_particle_filter("stars", function=_stars, filtered_type='all',
... requires=["particle_type"])
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> ds.add_particle_filter('stars')
>>> ad = ds.all_data()
>>> print (ad['stars', 'particle_mass'])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
if requires is None:
requires = []
filter = ParticleFilter(name, function, requires, filtered_type)
if filter_registry.get(name, None) is not None:
mylog.warning('The %s particle filter already exists. Overriding.' % name)
filter_registry[name] = filter
def particle_filter(name=None, requires=None, filtered_type='all'):
r"""A decorator that adds a new particle filter
A particle filter is a short name that corresponds to an algorithm for
filtering a set of particles into a subset. This is useful for creating new
particle types based on a cut on a particle field, such as particle mass, ID
or type.
.. note::
Alternatively, you can make use of the
:func:`~yt.data_objects.particle_filters.add_particle_filter` function
to define a new particle filter using a more declarative syntax.
Parameters
----------
name : string
The name of the particle filter. New particle fields with particle type
set by this name will be added to any dataset that enables this particle
filter. If not set, the name will be inferred from the name of the
filter function.
function : reference to a function
The function that defines the particle filter. The function should
accept two arguments: a reference to a particle filter object and a
reference to an abstract yt data object. See the example below.
requires : a list of field names
A list of field names required by the particle filter definition.
filtered_type : string
The name of the particle type to be filtered.
Example
-------
>>> import yt
>>> # define a filter named "stars"
>>> @yt.particle_filter(requires=["particle_type"], filtered_type='all')
>>> def stars(pfilter, data):
... return data[(pfilter.filtered_type, 'particle_type')] == 2
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> ds.add_particle_filter('stars')
>>> ad = ds.all_data()
>>> print (ad['stars', 'particle_mass'])
[ 1.68243760e+38 1.65690882e+38 1.65813321e+38 ..., 2.04238266e+38
2.04523901e+38 2.04770938e+38] g
"""
def wrapper(function):
if name is None:
used_name = function.__name__
else:
used_name = name
return add_particle_filter(used_name, function, requires, filtered_type)
return wrapper
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.f
import dirlistproc
def proc_xml(input_fn: str, output_fn: str, _) -> bool:
print("Converting %s to %s" % (input_fn, output_fn))
return True
def main():
dlp = dirlistproc.DirectoryListProcessor(None, "Convert XML to Text", ".xml", ".txt")
nfiles, nsuccess = dlp.run(proc_xml)
print("Total=%d Successful=%d" % (nfiles, nsuccess))
if __name__ == '__main__':
main()
|
from .connection import Connection
|
#Split one picture
import cv2
import numpy.random as random
import numpy as np
import os
import time
#borders
#mitochondria
#mitochondria borders
#PSD
#vesicles
def is_Img(name):
img_type = ('.png', '.jpg', '.jpeg')
if name.endswith((img_type)):
return True
else:
return False
file_dir_arr = ["axon", "mitochondria", "PSD", "vesicles", "boundaries","mitochondrial boundaries"]
name_list = []
mask_list = []
out_dir = "cutting data"
size_data = 256
size_step = 128
if not os.path.isdir(out_dir):
print("создаю out_dir:" + out_dir)
os.makedirs(out_dir)
dir_input_img = "original data/original/"
dir_input_mask ="original data/"
###########################################################
img_name = "training075.png"
###########################################################
if is_Img(os.path.join(dir_input_img, img_name)):
count = 0
img = cv2.imread(os.path.join(dir_input_img, img_name), 0)
h,w = img.shape[0:2]
if not os.path.isdir(out_dir+"/original"):
print("создаю out_dir:" + "original")
os.makedirs(out_dir+"/original")
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_img = img[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir + "/original/" + img_name + "_" + str(size_data) +"_" + str(size_step) +"_" +str(count)+".png", cutting_img)
count+=1
for i,dir_name in enumerate(file_dir_arr):
if is_Img(os.path.join(dir_input_mask + dir_name, img_name)):
img = cv2.imread(os.path.join(dir_input_mask +dir_name, img_name), 0)
img[img < 128] = 0
img[img > 127] = 255
if name_list.count(img_name) == 0:
name_list.append(img_name)
mask_list.append(np.zeros((len(file_dir_arr),)+ img.shape, np.uint8))
index = name_list.index(img_name)
mask_list[index][i] = img
print(name_list)
for index, mask_stack in enumerate(mask_list):
count = 0
for i,dir_name in enumerate(file_dir_arr):
local_count = count
mask_write = mask_stack[i]
h,w = mask_write.shape[0:2]
if not os.path.isdir(out_dir+"/"+dir_name):
print("создаю out_dir:" + "mask")
os.makedirs(out_dir+"/"+dir_name )
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_mask = mask_write[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir+"/"+dir_name +"/" + name_list[index] + "_" + str(size_data) +"_" + str(size_step) +"_" +str(local_count)+".png", cutting_mask)
local_count+=1
|
"""payu.cli
========
Command line interface tools
:copyright: Copyright 2011 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details
"""
import argparse
from distutils import sysconfig
import importlib
import os
import pkgutil
import shlex
import subprocess
import sys
import payu
import payu.envmod as envmod
from payu.models import index as supported_models
import payu.subcommands
# Default configuration
DEFAULT_CONFIG = 'config.yaml'
def parse():
"""Parse the command line inputs and execute the subcommand."""
# Build the list of subcommand modules
modnames = [mod for (_, mod, _)
in pkgutil.iter_modules(payu.subcommands.__path__,
prefix=payu.subcommands.__name__ + '.')
if mod.endswith('_cmd')]
subcmds = [importlib.import_module(mod) for mod in modnames]
# Construct the subcommand parser
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='payu {0}'.format(payu.__version__))
subparsers = parser.add_subparsers()
for cmd in subcmds:
cmd_parser = subparsers.add_parser(cmd.title, **cmd.parameters)
cmd_parser.set_defaults(run_cmd=cmd.runcmd)
for arg in cmd.arguments:
cmd_parser.add_argument(*arg['flags'], **arg['parameters'])
# Display help if no arguments are provided
if len(sys.argv) == 1:
parser.print_help()
else:
args = vars(parser.parse_args())
run_cmd = args.pop('run_cmd')
run_cmd(**args)
def get_model_type(model_type, config):
"""Determine and validate the active model type."""
# If no model type is given, then check the config file
if not model_type:
model_type = config.get('model')
# If there is still no model type, try the parent directory
if not model_type:
model_type = os.path.basename(os.path.abspath(os.pardir))
print('payu: warning: Assuming model is {0} based on parent directory '
'name.'.format(model_type))
if model_type not in supported_models:
print('payu: error: Unknown model {0}'.format(model_type))
sys.exit(-1)
def set_env_vars(init_run=None, n_runs=None, lab_path=None, dir_path=None,
reproduce=None):
"""Construct the environment variables used by payu for resubmissions."""
payu_env_vars = {}
# Setup Python dynamic library link
lib_paths = sysconfig.get_config_vars('LIBDIR')
payu_env_vars['LD_LIBRARY_PATH'] = ':'.join(lib_paths)
if 'PYTHONPATH' in os.environ:
payu_env_vars['PYTHONPATH'] = os.environ['PYTHONPATH']
# Set (or import) the path to the PAYU scripts (PAYU_PATH)
# NOTE: We may be able to use sys.path[0] here.
payu_binpath = os.environ.get('PAYU_PATH')
if not payu_binpath or not os.path.isdir(payu_binpath):
payu_binpath = os.path.dirname(sys.argv[0])
payu_env_vars['PAYU_PATH'] = payu_binpath
# Set the run counters
if init_run:
init_run = int(init_run)
assert init_run >= 0
payu_env_vars['PAYU_CURRENT_RUN'] = init_run
if n_runs:
n_runs = int(n_runs)
assert n_runs > 0
payu_env_vars['PAYU_N_RUNS'] = n_runs
# Import explicit project paths
if lab_path:
payu_env_vars['PAYU_LAB_PATH'] = os.path.normpath(lab_path)
if dir_path:
payu_env_vars['PAYU_DIR_PATH'] = os.path.normpath(dir_path)
if reproduce:
payu_env_vars['PAYU_REPRODUCE'] = reproduce
return payu_env_vars
def submit_job(pbs_script, pbs_config, pbs_vars=None):
"""Submit a userscript the scheduler."""
# Initialisation
if pbs_vars is None:
pbs_vars = {}
pbs_flags = []
pbs_queue = pbs_config.get('queue', 'normal')
pbs_flags.append('-q {queue}'.format(queue=pbs_queue))
pbs_project = pbs_config.get('project', os.environ['PROJECT'])
pbs_flags.append('-P {project}'.format(project=pbs_project))
pbs_resources = ['walltime', 'ncpus', 'mem', 'jobfs']
for res_key in pbs_resources:
res_flags = []
res_val = pbs_config.get(res_key)
if res_val:
res_flags.append('{key}={val}'.format(key=res_key, val=res_val))
if res_flags:
pbs_flags.append('-l {res}'.format(res=','.join(res_flags)))
# TODO: Need to pass lab.config_path somehow...
pbs_jobname = pbs_config.get('jobname', os.path.basename(os.getcwd()))
if pbs_jobname:
# PBSPro has a 15-character jobname limit
pbs_flags.append('-N {name}'.format(name=pbs_jobname[:15]))
pbs_priority = pbs_config.get('priority')
if pbs_priority:
pbs_flags.append('-p {priority}'.format(priority=pbs_priority))
pbs_flags.append('-l wd')
pbs_join = pbs_config.get('join', 'n')
if pbs_join not in ('oe', 'eo', 'n'):
print('payu: error: unknown qsub IO stream join setting.')
sys.exit(-1)
else:
pbs_flags.append('-j {join}'.format(join=pbs_join))
# Append environment variables to qsub command
# TODO: Support full export of environment variables: `qsub -V`
pbs_vstring = ','.join('{0}={1}'.format(k, v)
for k, v in pbs_vars.items())
pbs_flags.append('-v ' + pbs_vstring)
# Append any additional qsub flags here
pbs_flags_extend = pbs_config.get('qsub_flags')
if pbs_flags_extend:
pbs_flags.append(pbs_flags_extend)
if not os.path.isabs(pbs_script):
# NOTE: PAYU_PATH is always set if `set_env_vars` was always called.
# This is currently always true, but is not explicitly enforced.
# So this conditional check is a bit redundant.
payu_bin = pbs_vars.get('PAYU_PATH', os.path.dirname(sys.argv[0]))
pbs_script = os.path.join(payu_bin, pbs_script)
assert os.path.isfile(pbs_script)
# Set up environment modules here for PBS.
envmod.setup()
envmod.module('load', 'pbs')
# Construct job submission command
cmd = 'qsub {flags} -- {python} {script}'.format(
flags=' '.join(pbs_flags),
python=sys.executable,
script=pbs_script
)
print(cmd)
subprocess.check_call(shlex.split(cmd))
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
""" Create a new user profile"""
if not email:
raise ValueError('Users must have an email address')
email =self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
""" Create a new superuser profile"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name"""
return self.name
def get_short_name(self):
"""Retrieve shot name of user"""
return self.name
def __str__(self):
""" Return string representation for our users"""
return self.email
class ProfileFeedItem(models.Model):
""" Profile status update """
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE,
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
""" Return the model as string """
return self.status_text
|
import dash
import dash_bootstrap_components as dbc
# bootstrap theme
# https://bootswatch.com/lux/
external_stylesheets = [dbc.themes.YETI]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets,
suppress_callback_exceptions=True)
server = app.server
|
# coding: utf-8
__author__ = 'cleardusk'
import os.path as osp
import time
import numpy as np
import cv2
import torch
from torchvision.transforms import Compose
import torch.backends.cudnn as cudnn
import _3DDFA_V2.models as models
from _3DDFA_V2.bfm import BFMModel
from _3DDFA_V2.utils.io import _load
from _3DDFA_V2.utils.functions import (
crop_img, parse_roi_box_from_bbox, parse_roi_box_from_landmark,
)
from _3DDFA_V2.utils.tddfa_util import (
load_model, _parse_param, similar_transform,
ToTensorGjz, NormalizeGjz
)
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
class TDDFA(object):
"""TDDFA: named Three-D Dense Face Alignment (TDDFA)"""
def __init__(self, **kvs):
torch.set_grad_enabled(False)
print(make_abs_path('configs/bfm_noneck_v3.pkl'))
# load BFM
self.bfm = BFMModel(
bfm_fp=kvs.get('bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),
shape_dim=kvs.get('shape_dim', 40),
exp_dim=kvs.get('exp_dim', 10)
)
self.tri = self.bfm.tri
# config
self.gpu_mode = kvs.get('gpu_mode', False)
self.gpu_id = kvs.get('gpu_id', 0)
self.size = kvs.get('size', 120)
param_mean_std_fp = kvs.get(
'param_mean_std_fp', make_abs_path(f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl')
)
# load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)
model = getattr(models, kvs.get('arch'))(
num_classes=kvs.get('num_params', 62),
widen_factor=kvs.get('widen_factor', 1),
size=self.size,
mode=kvs.get('mode', 'small')
)
model = load_model(model, kvs.get('checkpoint_fp'))
if self.gpu_mode:
cudnn.benchmark = True
model = model.cuda(device=self.gpu_id)
self.model = model
self.model.eval() # eval mode, fix BN
# data normalization
transform_normalize = NormalizeGjz(mean=127.5, std=128)
transform_to_tensor = ToTensorGjz()
transform = Compose([transform_to_tensor, transform_normalize])
self.transform = transform
# params normalization config
r = _load(param_mean_std_fp)
self.param_mean = r.get('mean')
self.param_std = r.get('std')
# print('param_mean and param_srd', self.param_mean, self.param_std)
def __call__(self, img_ori, objs, **kvs):
"""The main call of TDDFA, given image and box / landmark, return 3DMM params and roi_box
:param img_ori: the input image
:param objs: the list of box or landmarks
:param kvs: options
:return: param list and roi_box list
"""
# Crop image, forward to get the param
param_lst = []
roi_box_lst = []
crop_policy = kvs.get('crop_policy', 'box')
for obj in objs:
if crop_policy == 'box':
# by face box
roi_box = parse_roi_box_from_bbox(obj)
elif crop_policy == 'landmark':
# by landmarks
roi_box = parse_roi_box_from_landmark(obj)
else:
raise ValueError(f'Unknown crop policy {crop_policy}')
roi_box_lst.append(roi_box)
img = crop_img(img_ori, roi_box)
img = cv2.resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)
inp = self.transform(img).unsqueeze(0)
if self.gpu_mode:
inp = inp.cuda(device=self.gpu_id)
if kvs.get('timer_flag', False):
end = time.time()
param = self.model(inp)
elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'
print(elapse)
else:
param = self.model(inp)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
param = param * self.param_std + self.param_mean # re-scale
# print('output', param)
param_lst.append(param)
return param_lst, roi_box_lst
def recon_vers(self, param_lst, roi_box_lst, **kvs):
dense_flag = kvs.get('dense_flag', False)
size = self.size
ver_lst = []
for param, roi_box in zip(param_lst, roi_box_lst):
if dense_flag:
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (self.bfm.u + self.bfm.w_shp @ alpha_shp + self.bfm.w_exp @ alpha_exp). \
reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
else:
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (self.bfm.u_base + self.bfm.w_shp_base @ alpha_shp + self.bfm.w_exp_base @ alpha_exp). \
reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
ver_lst.append(pts3d)
return ver_lst
|
import sys
def sol():
input = sys.stdin.readline
N = int(input())
node = [[] for i in range(N)]
for i in range(N):
vector = list(map(int, input().split(" ")))
for j in range(N):
if vector[j] == 1:
node[i].append(j)
for i in range(N):
visited = ["0"] * N
dfs(node, visited, i)
print(" ".join(visited))
def bfs(N, node, i):
queue = []
visited = [False] * N
queue.append(i)
while len(queue) > 0:
v = queue.pop(0)
for w in node[v]:
if not visited[w]:
visited[w] = True
queue.append(w)
result = []
for check in visited:
if check:
result.append("1")
else:
result.append("0")
return result
def dfs(node, visited, v):
for w in node[v]:
if visited[w] == "0":
visited[w] = "1"
dfs(node, visited, w)
if __name__ == "__main__":
sol()
|
from .._sign.sphincs_sha256_128f_robust import ffi as __ffi, lib as __lib
from .common import _sign_generate_keypair_factory, _sign_sign_factory, _sign_verify_factory
PUBLIC_KEY_SIZE = __lib.CRYPTO_PUBLICKEYBYTES
SECRET_KEY_SIZE = __lib.CRYPTO_SECRETKEYBYTES
SIGNATURE_SIZE = __lib.CRYPTO_BYTES
generate_keypair = _sign_generate_keypair_factory(__ffi, __lib)
sign = _sign_sign_factory(__ffi, __lib)
verify = _sign_verify_factory(__ffi, __lib)
|
from n0s3p4ss.domain_list import SubdomainList
from n0s3p4ss.attack_surface_discoverer import discover
from n0s3p4ss.sniffer_switcher_http_status_based import apply_flow_for
def sniff(target_domains):
subdomains = SubdomainList().list_each_domain_subdomains(target_domains)
attack_surfaces = [discover(subdomain) for subdomain in subdomains]
return [
apply_flow_for(attack_surface) for attack_surface in attack_surfaces
]
|
import DBinterface as DB
import random
import datetime as dt
def print_ranking(my_ranking,ranking_size,top_or_bottom):
Tweet=""
if top_or_bottom == True:
Tweet += ("The first " + ranking_size + " cities with more CO2 emissions due to traffic are: \r\n ")
else:
Tweet += ("The first " + ranking_size + " cities with less CO2 emissions due to traffic are: \r\n" +
"Congratulations!!!!! The Earth loves you :D \r\n")
for i in range(ranking_size):
Tweet += (str((i+1)) + "º " + str(my_ranking[i][0]) + " with a CO2 value of " + str(my_ranking[i][1]) + "\r\n")
return(Tweet)
def rank(api):
interface = DB.nasaDBinterface()
ranking_size = random.randint(2,10)
top_or_bottom = random.choice([True, False])
my_ranking = interface.getranking(ranking_size, top_or_bottom)
Tweet=print_ranking(my_ranking,ranking_size,top_or_bottom)
api.update_status(status=Tweet)
def leer_hashtag(T):
L=list(T)
L.append(" ")
for a in range(len(L)):
if L[a]=="#":
a=a+1
ht=[]
while L[a]!=" ":
ht.append(L[a])
a=a+1
ht_salida= ""
for e in ht:
ht_salida += e
return ht_salida
def get_city(TEXT):
L=TEXT.split()
c=""
ciudad=""
for a in range(len(L)):
if L[a]=="#consulta":
break
if L[a]=="City:":
for i in range(len(L)-a-2):
c += L[a+i+1] + " "
x=c.split()
for i in range(len(x)-1):
ciudad += x[i]+" "
if len(x) != 1:
ciudad += x[len(x)-1]
return ciudad.lower()
|
from experiments.experiments.PubIntegBackground import PubIntegBackground
import numpy as np
if __name__ == "__main__":
for i in np.arange(0.0, 10.0, 0.1):
PubIntegBackground(correlation=False, listing=True, pub='None', intensity=i)
|
import logging
from typing import Any, Dict, List, Union
import bleach
import cssutils
import markdown
from django.conf import settings
from django.core.mail import EmailMultiAlternatives, get_connection
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from i18nfield.strings import LazyI18nString
from inlinestyler.utils import inline_css
from pretix.base.i18n import language
from pretix.base.models import Event, InvoiceAddress, Order
from pretix.celery_app import app
from pretix.multidomain.urlreverse import build_absolute_uri
logger = logging.getLogger('pretix.base.mail')
INVALID_ADDRESS = 'invalid-pretix-mail-address'
cssutils.log.setLevel(logging.CRITICAL)
class TolerantDict(dict):
def __missing__(self, key):
return key
class SendMailException(Exception):
pass
def mail(email: str, subject: str, template: Union[str, LazyI18nString],
context: Dict[str, Any]=None, event: Event=None, locale: str=None,
order: Order=None, headers: dict=None, sender: str=None):
"""
Sends out an email to a user. The mail will be sent synchronously or asynchronously depending on the installation.
:param email: The email address of the recipient
:param subject: The email subject. Should be localized to the recipients's locale or a lazy object that will be
localized by being casted to a string.
:param template: The filename of a template to be used. It will be rendered with the locale given in the locale
argument and the context given in the next argument. Alternatively, you can pass a LazyI18nString and
``context`` will be used as the argument to a Python ``.format_map()`` call on the template.
:param context: The context for rendering the template (see ``template`` parameter)
:param event: The event this email is related to (optional). If set, this will be used to determine the sender,
a possible prefix for the subject and the SMTP server that should be used to send this email.
:param order: The order this email is related to (optional). If set, this will be used to include a link to the
order below the email.
:param headers: A dict of custom mail headers to add to the mail
:param locale: The locale to be used while evaluating the subject and the template
:param sender: Set the sender email address. If not set and ``event`` is set, the event's default will be used,
otherwise the system default.
:raises MailOrderException: on obvious, immediate failures. Not raising an exception does not necessarily mean
that the email has been sent, just that it has been queued by the email backend.
"""
if email == INVALID_ADDRESS:
return
headers = headers or {}
with language(locale):
if isinstance(context, dict) and order:
try:
context.update({
'invoice_name': order.invoice_address.name,
'invoice_company': order.invoice_address.company
})
except InvoiceAddress.DoesNotExist:
context.update({
'invoice_name': '',
'invoice_company': ''
})
body, body_md = render_mail(template, context)
sender = sender or (event.settings.get('mail_from') if event else settings.MAIL_FROM)
subject = str(subject)
body_plain = body
htmlctx = {
'site': settings.PRETIX_INSTANCE_NAME,
'site_url': settings.SITE_URL,
'body': body_md,
'color': '#8E44B3'
}
if event:
htmlctx['event'] = event
htmlctx['color'] = event.settings.primary_color
if event.settings.mail_from == settings.DEFAULT_FROM_EMAIL and event.settings.contact_mail:
headers['Reply-To'] = event.settings.contact_mail
prefix = event.settings.get('mail_prefix')
if prefix:
subject = "[%s] %s" % (prefix, subject)
body_plain += "\r\n\r\n-- \r\n"
signature = str(event.settings.get('mail_text_signature'))
if signature:
signature = signature.format(event=event.name)
signature_md = signature.replace('\n', '<br>\n')
signature_md = bleach.linkify(bleach.clean(markdown.markdown(signature_md), tags=bleach.ALLOWED_TAGS + ['p', 'br']))
htmlctx['signature'] = signature_md
body_plain += signature
body_plain += "\r\n\r\n-- \r\n"
if order:
body_plain += _(
"You are receiving this email because you placed an order for {event}."
).format(event=event.name)
htmlctx['order'] = order
body_plain += "\r\n"
body_plain += _(
"You can view your order details at the following URL:\n{orderurl}."
).replace("\n", "\r\n").format(
event=event.name, orderurl=build_absolute_uri(
order.event, 'presale:event.order', kwargs={
'order': order.code,
'secret': order.secret
}
)
)
body_plain += "\r\n"
tpl = get_template('pretixbase/email/plainwrapper.html')
body_html = tpl.render(htmlctx)
return mail_send([email], subject, body_plain, body_html, sender, event.id if event else None, headers)
@app.task
def mail_send_task(to: List[str], subject: str, body: str, html: str, sender: str,
event: int=None, headers: dict=None, bcc: List[str]=None) -> bool:
email = EmailMultiAlternatives(subject, body, sender, to=to, bcc=bcc, headers=headers)
if html is not None:
email.attach_alternative(inline_css(html), "text/html")
if event:
event = Event.objects.get(id=event)
backend = event.get_mail_backend()
else:
backend = get_connection(fail_silently=False)
try:
backend.send_messages([email])
except Exception:
logger.exception('Error sending email')
raise SendMailException('Failed to send an email to {}.'.format(to))
def mail_send(*args, **kwargs):
mail_send_task.apply_async(args=args, kwargs=kwargs)
def render_mail(template, context):
if isinstance(template, LazyI18nString):
body = str(template)
if context:
body = body.format_map(TolerantDict(context))
body_md = bleach.linkify(bleach.clean(markdown.markdown(body), tags=bleach.ALLOWED_TAGS + [
'p', 'pre'
]))
else:
tpl = get_template(template)
body = tpl.render(context)
body_md = bleach.linkify(markdown.markdown(body))
return body, body_md
|
# Time: O(n)
# Space: O(1)
class Solution(object):
# @param {integer[]} nums
# @return {integer[]}
def productExceptSelf(self, nums):
if not nums:
return []
left_product = [1 for _ in xrange(len(nums))]
for i in xrange(1, len(nums)):
left_product[i] = left_product[i - 1] * nums[i - 1]
right_product = 1
for i in xrange(len(nums) - 2, -1, -1):
right_product *= nums[i + 1]
left_product[i] = left_product[i] * right_product
return left_product
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron_lib.api import converters
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as provider
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.services.trunk import constants
from neutron._i18n import _
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
# This layer is introduced for keeping business logic and
# data persistence decoupled.
def trunk_can_be_managed(context, trunk):
"""Validate that the trunk can be managed."""
if not trunk.admin_state_up:
raise trunk_exc.TrunkDisabled(trunk_id=trunk.id)
def enforce_port_deletion_rules(resource, event, trigger, payload=None):
"""Prohibit the deletion of a port that's used in a trunk."""
# NOTE: the ML2 plugin properly catches these exceptions when raised, but
# non-ML2 plugins might not. To address this we should move the callback
# registry notification emitted in the ML2 plugin's delete_port() higher
# up in the plugin hierarchy.
context = payload.context
port_id = payload.resource_id
subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id)
if subport_obj:
raise trunk_exc.PortInUseAsSubPort(port_id=port_id,
trunk_id=subport_obj.trunk_id)
trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id)
if trunk_obj:
raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id,
trunk_id=trunk_obj.id)
class TrunkPortValidator(object):
def __init__(self, port_id):
self.port_id = port_id
self._port = None
def validate(self, context, parent_port=True):
"""Validate that the port can be used in a trunk.
:param parent_port: True if the port is intended for use
as parent in a trunk.
"""
# TODO(tidwellr): there is a chance of a race between the
# time these checks are performed and the time the trunk
# creation is executed. To be revisited, if it bites.
# Validate that the given port_id is not used by a subport.
subports = trunk_objects.SubPort.get_objects(
context, port_id=self.port_id)
if subports:
raise trunk_exc.TrunkPortInUse(port_id=self.port_id)
# Validate that the given port_id is not used by a trunk.
trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id)
if trunks:
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
if parent_port:
# if the port is being used as a parent in a trunk, check if
# it can be trunked, i.e. if it is already associated to physical
# resources (namely it is bound). Bound ports may be used as
# trunk parents, but that depends on the underlying driver in
# charge.
if not self.can_be_trunked_or_untrunked(context):
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
else:
# if the port is being used as subport in a trunk, check if it is a
# port that is not actively used for other purposes, e.g. a router
# port, compute port, DHCP port etc. We have no clue what the side
# effects of connecting the port to a trunk would be, and it is
# better to err on the side of caution and prevent the operation.
self.check_not_in_use(context)
return self.port_id
def is_bound(self, context):
"""Return true if the port is bound, false otherwise."""
# Validate that the given port_id does not have a port binding.
core_plugin = directory.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
return bool(self._port.get(portbindings.HOST_ID))
def can_be_trunked_or_untrunked(self, context):
""""Return true if a port can be trunked."""
if not self.is_bound(context):
# An unbound port can be trunked, always.
return True
trunk_plugin = directory.get_plugin('trunk')
vif_type = self._port.get(portbindings.VIF_TYPE)
binding_host = self._port.get(portbindings.HOST_ID)
# Determine the driver that will be in charge of the trunk: this
# can be determined based on the vif type, whether or not the
# driver is agent-based, and whether the host is running the agent
# associated to the driver itself.
host_agent_types = utils.get_agent_types_by_host(context, binding_host)
drivers = [
driver for driver in trunk_plugin.registered_drivers
if utils.is_driver_compatible(
context, driver, vif_type, host_agent_types)
]
if len(drivers) > 1:
raise trunk_exc.TrunkPluginDriverConflict()
elif len(drivers) == 1:
return drivers[0].can_trunk_bound_port
else:
return False
def check_not_in_use(self, context):
"""Raises PortInUse for ports assigned for device purposes."""
core_plugin = directory.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
# NOTE(armax): the trunk extension itself does not make use of the
# device_id field, because it has no reason to. If need be, this
# check can be altered to accommodate the change in logic.
if self._port['device_id']:
raise n_exc.PortInUse(net_id=self._port['network_id'],
port_id=self._port['id'],
device_id=self._port['device_id'])
class SubPortsValidator(object):
def __init__(self, segmentation_types, subports, trunk_port_id=None):
self._segmentation_types = segmentation_types
self.subports = subports
self.trunk_port_id = trunk_port_id
def validate(self, context,
basic_validation=False, trunk_validation=True):
"""Validate that subports can be used in a trunk."""
# Perform basic validation on subports, in case subports
# are not automatically screened by the API layer.
if basic_validation:
msg = validators.validate_subports(self.subports)
if msg:
raise n_exc.InvalidInput(error_message=msg)
if trunk_validation:
trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id)
subport_mtus = self._prepare_subports(context)
return [self._validate(context, s, trunk_port_mtu, subport_mtus)
for s in self.subports]
else:
return self.subports
def _prepare_subports(self, context):
"""Utility method to parse subports in the request
The objective of this method is two-fold:
* Update subports segmentation details if INHERIT is requested;
* Return the MTU for each of the subport in the request.
This method does two things rather than one to allow us to hit the DB
once, and thus minimize the number of lookups required to learn about
the segmentation type and the MTU of the networks on which subports
are plugged.
"""
InheritIndex = (
collections.namedtuple("InheritIndex", "index has_inherit"))
port_ids = {}
any_has_inherit = False
for i, s in enumerate(self.subports):
has_inherit = (s.get('segmentation_type') ==
constants.SEGMENTATION_TYPE_INHERIT)
any_has_inherit |= has_inherit
port_ids[s['port_id']] = (
InheritIndex(index=i, has_inherit=has_inherit))
core_plugin = directory.get_plugin()
if (any_has_inherit and
not extensions.is_extension_supported(
core_plugin, provider.ALIAS)):
msg = (_("Cannot accept segmentation type %s") %
constants.SEGMENTATION_TYPE_INHERIT)
raise n_exc.InvalidInput(error_message=msg)
ports = core_plugin.get_ports(context, filters={'id': port_ids})
network_port_map = collections.defaultdict(list)
for p in ports:
network_port_map[p['network_id']].append({'port_id': p['id']})
networks = core_plugin.get_networks(
context.elevated(), filters={'id': network_port_map})
subport_mtus = {}
for net in networks:
for port in network_port_map[net['id']]:
if port_ids[port['port_id']].has_inherit:
port.update(
{'segmentation_id': net[provider.SEGMENTATION_ID],
'segmentation_type': net[provider.NETWORK_TYPE]})
self.subports[port_ids[port['port_id']].index] = port
# To speed up the request, record the network MTU for each
# subport to avoid hitting the DB more than necessary. Do
# that only if the extension is available.
if extensions.is_extension_supported(core_plugin, 'net-mtu'):
subport_mtus[port['port_id']] = net[api.MTU]
return subport_mtus
def _get_port_mtu(self, context, port_id):
"""Get port MTU
Return MTU for the network where the given port belongs to.
If the network or port cannot be obtained, or if MTU is not defined,
returns None.
"""
core_plugin = directory.get_plugin()
if not extensions.is_extension_supported(core_plugin, 'net-mtu'):
return
try:
port = core_plugin.get_port(context, port_id)
return core_plugin.get_network(
context, port['network_id'])[api.MTU]
except (n_exc.PortNotFound, n_exc.NetworkNotFound):
# A concurrent request might have made the port or network
# disappear; though during DB insertion, the subport request
# will fail on integrity constraint, it is safer to return
# a None MTU here.
return
def _raise_subport_is_parent_port(self, context, subport):
if subport['port_id'] == self.trunk_port_id:
raise trunk_exc.ParentPortInUse(port_id=subport['port_id'])
def _raise_subport_invalid_mtu(self, context, subport, trunk_port_mtu,
subport_mtus):
# Check MTU sanity - subport MTU must not exceed trunk MTU.
# If for whatever reason trunk_port_mtu is not available,
# the MTU sanity check cannot be enforced.
if trunk_port_mtu:
# missing MTUs for subports is not an error condition: the
# subport UUID may be invalid or non existent.
subport_mtu = subport_mtus.get(subport['port_id'])
if subport_mtu and subport_mtu > trunk_port_mtu:
raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu(
port_id=subport['port_id'],
port_mtu=subport_mtu,
trunk_id=self.trunk_port_id,
trunk_mtu=trunk_port_mtu
)
def _raise_if_segmentation_details_missing(self, subport):
try:
segmentation_type = subport["segmentation_type"]
segmentation_id = (
converters.convert_to_int(subport["segmentation_id"]))
return (segmentation_type, segmentation_id)
except KeyError:
msg = _("Invalid subport details '%s': missing segmentation "
"information. Must specify both segmentation_id and "
"segmentation_type") % subport
raise n_exc.InvalidInput(error_message=msg)
except n_exc.InvalidInput:
msg = _("Invalid subport details: segmentation_id '%s' is "
"not an integer") % subport["segmentation_id"]
raise n_exc.InvalidInput(error_message=msg)
def _raise_if_segmentation_details_invalid(self,
segmentation_type,
segmentation_id):
if segmentation_type not in self._segmentation_types:
msg = _("Unknown segmentation_type '%s'") % segmentation_type
raise n_exc.InvalidInput(error_message=msg)
if not self._segmentation_types[segmentation_type](segmentation_id):
msg = _("Segmentation ID '%s' is not in range") % segmentation_id
raise n_exc.InvalidInput(error_message=msg)
def _raise_if_subport_is_used_in_other_trunk(self, context, subport):
trunk_validator = TrunkPortValidator(subport['port_id'])
trunk_validator.validate(context, parent_port=False)
def _validate(self, context, subport, trunk_port_mtu, subport_mtus):
self._raise_subport_is_parent_port(context, subport)
self._raise_subport_invalid_mtu(
context, subport, trunk_port_mtu, subport_mtus)
segmentation_type, segmentation_id = (
self._raise_if_segmentation_details_missing(subport))
self._raise_if_segmentation_details_invalid(
segmentation_type, segmentation_id)
self._raise_if_subport_is_used_in_other_trunk(context, subport)
return subport
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestChangeLanguage(GaiaTestCase):
def test_change_language_settings(self):
lang_name = self.marionette.execute_script("""
var qps = window.wrappedJSObject.navigator.mozL10n.qps;
return qps['qps-ploc'].name;
""")
header = self.marionette.execute_script("""
var qps = window.wrappedJSObject.navigator.mozL10n.qps;
return qps['qps-ploc'].translate('Settings');
""")
self.data_layer.set_setting('devtools.qps.enabled', True)
settings = Settings(self.marionette)
settings.launch()
language_settings = settings.open_language_settings()
language_settings.select_language(lang_name)
self.wait_for_condition(lambda m: language_settings.current_language == 'qps-ploc')
language_settings.go_back()
# Verify that language has changed
self.wait_for_condition(lambda m: settings.header_text == header)
self.assertEqual(self.data_layer.get_setting('language.current'), "qps-ploc")
|
# esle stmt
# using else block after for loop
s = 0
for i in range(1, 6):
s += i
else:
print("end of for loop!")
print("sum =",s)
# using else blokc after while loop
r = n = 1
while n <= 5:
r *= n
n += 1
else:
print("end of while loop!")
print("5! = " + str(r))
if r==3:
pass
|
# Generated by Django 3.0.3 on 2020-04-22 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20200422_1314'),
]
operations = [
migrations.AlterField(
model_name='user_movie',
name='insert_date',
field=models.DateTimeField(default='2020-04-22T13:20:19.335148', editable=False),
),
]
|
import os
import re
import subprocess
import logging
"""
Uses command line pdfinfo utility (from poppler pakage) for various
small operations (e.g. get pdf page count).
"""
logger = logging.getLogger(__name__)
def get_pagecount(filepath):
"""
Returns the number of pages in a PDF document as integer.
filepath - is filesystem path to a PDF document
"""
if not os.path.isfile(filepath):
raise ValueError("Filepath %s is not a file" % filepath)
if os.path.isdir(filepath):
raise ValueError("Filepath %s is a directory!" % filepath)
base, ext = os.path.splitext(filepath)
# pure images (png, jpeg) have only one page :)
if ext and ext.lower() in ('.jpeg', '.png', '.jpg'):
# whatever png/jpg image is there - it is
# considered by default one page document.
return 1
if ext and ext.lower() not in ('.pdf',):
raise ValueError(
"Only jpeg, png and pdf are handlerd by this"
" method"
)
# pdfinfo "${PDFFILE}" | grep Pages
cmd = ["/usr/bin/pdfinfo", filepath]
compl = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if compl.returncode:
logger.error(
"get_pagecount: cmd=%s args=%s stdout=%s stderr=%s code=%s",
cmd,
compl.args,
compl.stdout,
compl.stderr,
compl.returncode,
stack_info=True
)
raise Exception("Error occured while getting document page count.")
lines = compl.stdout.decode('utf-8').split('\n')
# look up for the line containing "Pages: 11"
for line in lines:
x = re.match("Pages:\W+(\d+)$", line.strip())
if x:
return int(x.group(1))
return 0
|
import glob
import pyclass
from sicparse import OptionParser
import sys
def main():
if len(sys.argv)!=3:
print "not enough arguments"
return
#
if (not pyclass.gotgdict()):
pyclass.get(verbose=False)
#
sys.argv = [arg.replace("\"","") for arg in sys.argv]
string_found = sys.argv[1] in sys.argv[2]
#
Sic.comm("let kosma%is_string_found {0}".format(str(string_found)))
if string_found:
Sic.comm('let kosma%string_found {0}'.format(sys.argv[1]))
else:
Sic.comm('let kosma%string_found "{0}"'.format(" "))
#
return
if __name__ == "__main__":
main()
|
from .models import Restriction
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=Restriction)
def post_save_restriction(sender, **kwargs):
msg = "worked"
pass
|
#!/usr/bin/python
from flask import Flask, request, flash, redirect, render_template, jsonify
from flaskext.mysql import MySQL
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
import twilio.twiml
import random
import requests
import json
import omdb
from googleplaces import GooglePlaces, types, lang
from microsofttranslator import Translator
from yahoo_finance import Share
from twilio.rest import TwilioRestClient
gp_api_key = 'AIzaSyAX_75N29J--rh3Qj9gXjMBVx9IuD_Um74'
google_places = GooglePlaces(gp_api_key)
bing_api_key = 'oeToVPEyRZIASRK2n2byOU1x0EMatLIpd8kCIvwXmMw'
# Credentials owner: avikantsainidbz@gmail.com
# Find these values at https://twilio.com/user/account
twilio_account_sid = "ACab3e465e67051257d227bf49a3c9a58e"
twilio_auth_token = "ca96731e12b0442bcf5b1c8f7dedc58d"
admin_phone = "+918095138333"
# admin_phone = "+918095718111"
# Returns a JSON formatted data with a HTTP status code
def dataFormatter(code, message, data):
resp = jsonify({
'code': code,
'message': message,
'data': data
})
resp.status_code = code
return resp
def get_verify_name(id, s, e):
verify_url = "http://api.tvmaze.com/shows/" + str(id) + "/episodebynumber?season=" + str(s) + "&number=" + str(e)
resp = requests.get(verify_url)
j = json.loads(resp.text)
name = j['name']
return name
test_mode = False
app = Flask(__name__)
app.config.from_object('config')
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'b4dea37336a229'
app.config['MYSQL_DATABASE_PASSWORD'] = '423dbfab'
app.config['MYSQL_DATABASE_DB'] = 'heroku_d5dd20eac082bba'
app.config['MYSQL_DATABASE_HOST'] = 'us-cdbr-iron-east-03.cleardb.net'
mysql.init_app(app)
# Main route
class SMSForm(Form):
phone_number = StringField('phone_number', validators=[DataRequired()])
query_string = StringField('query_string', validators=[DataRequired()])
# password_field = PasswordField('password_field', validators=[DataRequired()])
@app.route("/", methods=['GET', 'POST'])
def home_page():
form = SMSForm()
if form.validate_on_submit():
query = str(form.query_string.data)
number = str(form.phone_number.data)
# password = str(form.password_field.data)
# if password == get_verify_name(2, 4, 2):
print("Sending sms to " + number + " with query \'" + query + "\'.")
# message = process_query(query)
message = ""
if query.lower().startswith('subscribe'):
print("Subscribing...")
words = query.split()
ph_no = words[1]
city = words[2]
state = ""
for w in words[3:]:
state = state + w
subscriptions(ph_no, city.lower(), state.lower())
message = "Successfully subscribed to emergency services. Thank you for using hello_friend."
else:
message = process_query(query)
send_sms_to_number(message, number)
flash("Sent SMS to " + number + ": \'" + message + "\'.")
# else:
# flash("Invalid secret code, admins are not pleased.")
return render_template('index.html', form=form, number=number, query=query, showdetails=False)
return render_template('index.html', form=form, showdetails=True)
class EmergencyForm(Form):
message_field = StringField('message_field', validators=[DataRequired()])
location_field = StringField('location_field', validators=[DataRequired()])
class EmergencyForm2(Form):
phone_field = StringField('phone_field')
city_field = StringField('city_field')
state_field = StringField('state_field')
@app.route("/emergency/", methods=['GET', 'POST'])
def emergency_page():
form = EmergencyForm()
if form.validate_on_submit():
message = str(form.message_field.data)
state = str(form.location_field.data)
print("Broadcasting SMSs to people in state " + str(state))
# Send SMS to people here...
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT ph_no FROM subscribers WHERE state = %s", (state))
data = cursor.fetchall()
for value in data:
phone_no = value[0]
print("Sending Broadcast message to " + str(phone_no));
send_sms_to_number(message, str(phone_no))
cursor.close()
conn.close()
except:
cursor.close()
conn.close()
return render_template('emergency.html', form=form, showdetails=False)
form2 = EmergencyForm2()
if form2.validate_on_submit():
phone = str(form2.phone_field.data)
city = str(form2.city_field.data)
state = str(form2.state_field.data)
print("Adding subscription")
subscriptions(phone, city, state)
flash("Successfully subscribed to emergency services. Thank you for using hello_friend.")
return render_template('emergency.html', form=form, showdetails=True)
@app.route("/emergency_list/", methods=['GET'])
def emergency_list():
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM subscribers")
values = cursor.fetchall()
data = []
for value in values:
d = [value[0], value[1], value[2]]
data.append(d)
return dataFormatter(200, "LEL", data)
except:
return dataFormatter(400, "LEL", [])
@app.route("/add_s", methods=['GET', 'POST'])
def add_subscription():
form2 = EmergencyForm2()
if form2.validate_on_submit():
phone = str(form2.phone_field.data)
city = str(form2.city_field.data)
state = str(form2.state_field.data)
print("Adding subscription")
subscriptions(phone, city, state)
flash("Successfully subscribed to emergency services. Thank you for using hello_friend.")
return render_template('add.html', form2=form2, showdetails=True)
# Test routes
def send_sms_to_number(message, number):
client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
message = client.messages.create(to=number, from_="+13609001701", body=message)
def send_sms_to_admin(message):
send_sms_to_number(message, admin_phone)
# Test routing to specific phone number
@app.route("/test_phone/<phone>", methods=['POST'])
def test_method(phone):
try:
query = request.form.get('query')
msg = process_query(query)
send_sms_to_number(str(msg), phone)
return "Message \'\'\'" + str(msg) + "\'\'\' sent to " + str(phone) + ".\n"
except:
return "Failed to send message. :(\n"
# Main routes
noIntent = [
"I'm having trouble understanding you, could you rephrase your question?",
"I didn't catch that, could you rephrase your query?",
"Sorry, I didn't understand that. Try rephrasing your request."
]
examples = [
"Navigate from Lucknow to Kanpur",
"Will it rain in New York today",
"SOS Whitefield, Bangalore",
"Translate \'Have you gone crazy\'' to german",
"How do you say Madrid I'm finally here in spanish",
"imdb inception",
"stocks AAPL",
"atm near rajendra nagar hyderabad",
"Define Hitler",
"Show me sports news",
"Directions from Lucknow to Kanpur",
]
technicalIssues = [
"Looks like we are facing technical difficulties, please try again in sometime.",
"Looks like the server is taking to long to respond, please try again in sometime.",
"Looks like we have too many requests to handle at the moment, please try again in sometime.",
"Our monkeys are fixing some bugs in the server, please try again in sometime."
]
@app.route("/no_intent", methods=['POST'])
def no_intent():
message = random.choice(noIntent)
message += "\n\nDid you know you can try something like: \"" + random.choice(examples) + "\"\n\n- hello_friend."
return message
@app.route("/network_error", methods=['POST'])
def technical_issues():
message = random.choice(technicalIssues)
message += "\n\nDid you know you can try something like: \"" + random.choice(examples) + "\"\n\n- hello_friend."
return message
@app.route("/sos", methods=["POST"])
def sos(dict_response):
message = ""
# try:
query_text = dict_response["_text"].lower()
# remove sos prefix and clean location string
issos = False
if query_text.find("sos ") != -1:
query_text = query_text[4:]
issos = True
if query_text.find(" sos") != -1:
query_text = query_text[:-4]
issos = True
if query_text.find("help ") != -1:
query_text = query_text[5:]
if query_text.find(" help") != -1:
query_text = query_text[:-5]
query_result = google_places.nearby_search(location=query_text, keyword='hospital', radius=5000, types=[types.TYPE_HOSPITAL])
number_of_places = 0
message = "Nearby hospitals: \n"
for place in query_result.places:
if number_of_places < 3:
number_of_places += 1
message += place.name
place_info = place.get_details()
message += ", Ph: " + place.local_phone_number + "\n"
else:
break
if issos:
query_result = google_places.nearby_search(location=query_text, keyword='police', radius=5000, types=[types.TYPE_POLICE])
if len(query_result.places) > 0:
place = query_result.places[0]
place.get_details()
message += "\nNearest police station: " + place.name
message += ", Ph: " + place.local_phone_number + "\n"
# except:
# message = technical_issues()
return message
@app.route("/weather", methods=['POST'])
def weather(entities):
message = ""
try:
try:
location = entities['location'][0]['value'].lower()
except:
location = entities['local_search_query'][0]['value']
response = requests.get(url="http://api.openweathermap.org/data/2.5/weather?q=" + location + "&APPID=500d01a6ece6498b1cbf94ed23519119")
dict_response = json.loads(response.text)
temperature_in_celsius = round(dict_response['main']['temp'] - 273.15, 2)
humidity = dict_response['main']['humidity']
weather_description = dict_response['weather'][0]['description']
message = "The weather in " + location + ": " + weather_description + ". "
message += "Average: " + str(temperature_in_celsius) + " C, "
message += "Humidity: " + str(humidity) + "%"
try:
wind_speed = dict_response['wind']['speed']
message += ", Wind: " + str(wind_speed) + " km/h"
except:
message += "."
except:
message = technical_issues()
return message
@app.route("/navigate", methods=['POST'])
def navigate(entities):
try:
try:
destination = entities['to'][0]['value']
except:
destination = entities['search_query'][0]['value']
try:
origin = entities['from'][0]['value']
except:
try:
origin = entities['local_search_query'][0]['value']
except:
origin = entities['location'][0]['value']
print("Navigating from " + origin + " to " + destination + ".")
message = "Directions from " + origin + " to " + destination + ":\n\n"
key = "GSC5hkB0CEmUyk4nI2MY~HxNEzo1P1bHB1sX8EzDJpA~AmYeCHqvBerEI06DBSKWfo4pgB1w9Krgk7EH6lhGqqf3s5RaJArOzWJ-SL6AYVVw"
try:
try:
bingMapsResponse = requests.get(url="http://dev.virtualearth.net/REST/V1/Routes/Driving?wp.0=" + origin + "&wp.1=" + destination + "&avoid=minimizeTolls&key="+key)
bingMaps_dict = json.loads(bingMapsResponse.text)
except:
message = network_error()
return message
print(bingMaps_dict)
resources = bingMaps_dict.get('resourceSets')[0].get('resources')
routeLegs = resources[0].get('routeLegs')
distance = routeLegs[0].get('routeSubLegs')[0].get('travelDistance')
message += "Total Trip Distance: " + str(distance) + " km\n"
duration = routeLegs[0].get('routeSubLegs')[0].get('travelDuration')
message += "Total Trip Duration: " + str(duration/60) + " min \n"
itineraryItems = routeLegs[0].get('itineraryItems')
count = 1
for item in itineraryItems:
message += str(count) + ". " + item.get('instruction').get('text') + " ("
message += str(item.get('travelDistance')) + " km, "
message += str(item.get('travelDuration') / 60 ) + " min)"
message += "\n"
count +=1
except:
message = "We could not find a route from " + origin + " to " + destination + ". Please bear with us as we try to resolve this issue."
# Precaution
if (len(message) > 1536):
message = message[:1533] + "...";
except:
message = technical_issues()
return message
@app.route("/translate", methods=['POST'])
def translate(entities):
message = ""
try:
text_for_translation = entities['phrase_to_translate'][0]['value']
lang = entities['language'][0]['value'].lower()
language = ""
if lang == "spanish":
language = "es"
elif lang == "french":
language = "fr"
elif lang == "german":
language = "de"
elif lang == "chinese":
language = "zh-CHS"
else:
message = "We don't support translation to " + lang + " as of now. Check back later as support is being added."
return message
message = "\"" + text_for_translation + "\" in " + lang + " is \'"
translator = Translator('SMSAssistant', 'fhV+AdYFiK0QfQ4PFys+oQ/T0xiBBVQa32kxxbP55Ks=')
message += translator.translate(text_for_translation, language) + "\'"
if test_mode:
send_sms_to_admin(message)
except:
message = technical_issues()
return message
@app.route("/news", methods=['POST'])
def getNews(entities):
message = ""
try:
try:
newstopic = entities['news_topic'][0]['value'].lower()
# default topic
if newstopic is None:
newstopic = "world"
except:
newstopic = "world"
response = requests.get(url='https://api.datamarket.azure.com/Bing/Search/News?$format=json&Query=%27' + newstopic + "%27", \
auth=(bing_api_key, bing_api_key))
news_dict = json.loads(response.text)
news = news_dict.get('d').get('results')
message = ""
if len(news) >= 5:
message = "Here are the top 5 stories about " + newstopic + ":\n"
for x in range(0, 5):
message += str(x+1) + ". " + news[x].get('Title') + ".\n"
else:
message = "Here are the top news stories about " + newstopic + ":\n"
for item in news:
message += "- " + item.get('Title') + "\n"
if test_mode:
send_sms_to_admin(message)
except:
message = technical_issues()
return message
@app.route("/imdb", methods=['POST'])
def imdb(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("imdb ") != -1:
query_text = query_text[5:]
response = omdb.request(t='' + query_text + '', r='json')
data = json.loads(response.text)
mediatype = data["Type"]
year = data["Year"]
title = data["Title"]
if mediatype == "movie":
message += "Found a Movie, \"" + title + "\" (" + year + ")\n"
elif mediatype == "series":
message += "Found a TV show, \"" + title + "\" (" + year + ")\n"
for key in data:
if key in ["Rated", "Runtime", "Genre", "Director", "Writer"]:
if data[key] != "N/A":
message += key + ": " + data[key] + "\n"
if key == "imdbRating":
message += "IMDB: " + data[key] + "\n"
if data["Plot"] != "N/A":
message += "Plot: " + data["Plot"]
except:
message = technical_issues()
return message
@app.route("/stocks", methods=['POST'])
def stocks(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("stocks ") != -1:
query_text = query_text[7:]
y = Share(query_text)
message += "Trading information for " + y.get_name() + " (" + query_text + ") :\n"
message += "Opened: " + y.get_open() + "\n"
message += "Current: " + y.get_price() + "\n"
message += "Earnings share: " + y.get_earnings_share() + "\n"
message += "Short ratio: " + y.get_short_ratio() + "\n"
message += "Previous close: " + y.get_prev_close() + "\n"
except:
message = technical_issues()
return message
@app.route("/atm", methods=['POST'])
def atm(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("atm near ") != -1:
query_text = query_text[9:]
query_result = google_places.nearby_search(location=query_text, keyword='atm', radius=5000, types=[types.TYPE_ATM])
number_of_places = 0
message = "ATM's near \'" + query_text + "\':\n"
for place in query_result.places:
if number_of_places < 5:
number_of_places += 1
message = message + place.name
place_info = place.get_details()
if place.local_phone_number != None:
message = message + " " + place.local_phone_number
message = message + "\n"
else:
break
except:
message = technical_issues()
return message
@app.route("/define", methods=['POST'])
def define(dict_response):
message = ""
try:
query_text = dict_response['_text'].lower()
if query_text.find("define ") != -1:
topic = query_text[7:]
r = requests.get(url='http://api.duckduckgo.com/?q=' + topic + '&format=json&pretty=1')
message = ""
topic_response = json.loads(r.text)
all_definitions = topic_response['RelatedTopics']
if len(all_definitions) > 0:
top_definitions = all_definitions[0]
definition = top_definitions['Text']
message = "\"" + topic + "\": " + definition
else:
message = "Definition for " + topic + " was not found. We're working on this."
except:
message = technical_issues()
return message
def subscriptions(ph_no, city, state):
conn = mysql.connect()
try:
cursor = conn.cursor()
cursor.execute("INSERT INTO subscribers VALUES (%s, %s, %s)", (ph_no, city, state))
conn.commit()
cursor.close()
conn.close()
except:
cursor.close()
conn.close()
# Main SMS webhook
def process_query(query):
msg = ""
try:
response = requests.get(url='https://api.wit.ai/message?v=20161022&q='+query,headers={'Authorization': 'Bearer TUDKLORVVMITDT4FCJFMAARQAWB2NLJ2'})
except:
msg = technical_issues()
return msg
dict_response = json.loads(response.text)
print(dict_response);
intent = None
confidence = None
entities = None
msg = None
try:
if dict_response['entities']['intent']:
intent = dict_response['entities']['intent'][0]['value']
confidence = dict_response['entities']['intent'][0]['confidence']
entities = dict_response['entities']
print("Entities: ")
print(entities)
except:
msg = no_intent()
return msg
if intent is None or confidence < 0.2:
msg = no_intent()
elif intent == "weather":
msg = weather(entities)
elif intent == "navigate":
msg = navigate(entities)
elif intent == "sos":
msg = sos(dict_response)
elif intent == "translate":
msg = translate(entities)
elif intent == "news":
msg = getNews(entities)
elif intent == "imdb":
msg = imdb(dict_response)
elif intent == "atm":
msg = atm(dict_response)
elif intent == "stocks":
msg = stocks(dict_response)
elif intent == "define":
msg = define(dict_response)
else:
msg = "Feature not supported"
return msg
@app.route("/sms", methods=['POST'])
def sms():
query = request.values.get('Body', None)
resp = twilio.twiml.Response()
msg = ""
if query.lower().startswith('subscribe'):
print("Subscribing...")
words = query.split()
ph_no = words[1]
city = words[2]
state = ""
for w in words[3:]:
state = state + w
subscriptions(ph_no, city.lower(), state.lower())
msg = "Successfully subscribed to emergency services. Thank you for using hello_friend."
else:
msg = process_query(query)
if test_mode:
send_sms_to_admin(query)
resp.message(msg)
return str(resp)
# ------
# Update the json file.
def saveFile():
with open('data/voice.json', 'w') as outfile:
json.dump(data, outfile)
# Open the given json file in data.
try:
with open('data/voice.json') as data_file:
data = json.load(data_file)
except:
data = []
saveFile();
class VoiceForm(Form):
phone_number = StringField('phone_number', validators=[DataRequired()])
title_field = StringField('title_field', validators=[DataRequired()])
password_field = PasswordField('password_field', validators=[DataRequired()])
@app.route("/voice/", methods=['GET', 'POST'])
def voice_page():
form = VoiceForm()
if form.validate_on_submit():
title = str(form.title_field.data)
number = str(form.phone_number.data)
password = str(form.password_field.data)
if password == get_verify_name(2, 4, 2):
client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
routex = "http://hello-frrriend.herokuapp.com/voice/" + str(title)
call = client.calls.create(url=routex, to=number, from_="+13609001701")
flash("Rung " + number + ".")
else:
flash("Invalid secret code, admins are not pleased.")
return render_template('voice.html', form=form, number=number, title=title, showdetails=False, data=data)
return render_template('voice.html', form=form, showdetails=True, data=data)
@app.route('/voice/list', methods=['GET'])
def voice_list():
return dataFormatter(200, "Listing data", data)
@app.route('/voice/add', methods=['POST'])
def voice_add():
d = {}
title = request.values.get('title')
if title is None:
return dataFormatter(404, "Bad request, need title.", [])
d['title'] = title
message = request.values.get('message')
if message is not None:
d['message'] = message
url = request.values.get('url')
if url is not None:
d['url'] = url
p = None
for x in data:
if x['title'] == title:
p = x
if p is not None:
data.remove(p)
data.append(d)
saveFile()
return dataFormatter(201, "Updated", data)
data.append(d)
saveFile()
return dataFormatter(201, "Appended", data)
def voice_add_util(title, message, url):
d = {}
d['title'] = title
if len(message) > 0:
d['message'] = message
if len(url) > 0:
d['url'] = url
p = None
for x in data:
if x['title'] == title:
p = x
if p is not None:
data.remove(p)
data.append(d)
saveFile()
return;
data.append(d)
saveFile()
@app.route('/voice/<title>', methods=['POST', 'GET'])
def voice_title(title):
d = None
for x in data:
if x['title'] == title:
d = x
break
resp = twilio.twiml.Response()
print(d)
if d is None:
resp.say("Don't talk please")
else:
try:
message = d['message']
resp.say(d['message'], voice='Alice', language='en-IN')
except:
print("No message ofr the ")
try:
url = d['url']
resp.play(d['url'])
except:
print("No url in the query")
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
|
AMOUNTS = [
99999999999999999999999999999,
0x0,
0x1,
0x1000000000000000000000000,
0x30000000000000,
1000000000000000000,
0x180000000000000,
100000000000000000,
10000000000000000,
1000000000000000,
0x2,
5000000000000000,
0x20,
0x700000000000000,
0x8,
0x3c00000000000,
0xe00000000000000,
0x400000000000000000000000,
50000000000000000,
500000000000000000,
0x18000000000000,
0x3,
0x80,
0x300000000000000,
0x1000000000000000000000001,
5000000000000000000,
0x1c00000000000000,
0x4,
10000000000000000000,
0xc000000000000,
0x2000,
20000000000000000,
0x40,
200000000000000000,
2000000000000000,
0x800000000000000000000,
0x800000000000000000000000,
0x1000000000000000000000002,
0x400,
0x80000000000000,
0x100000000000000,
0xc00000000000,
0x1800000000000000000,
0x800000000000000000,
0x70000000000000,
250000000000000,
0x380000000000000,
0x8000000000000000000,
0x8000000000000000,
0x1000,
]
|
"""
Collection of functions to assist PyDoof modules.
"""
from collections import Iterable
from datetime import date
from enum import Enum
def parse_query_params(params):
"""
Parses a query-parameters dictionary into their proper parameters schema.
Each key value of the dictionary represents a parameter and its value. The
function parses each key-value based on the value type.
* Parses dates into a string following the "YYYYMMDD" format.
* Parses dictionaries like `parameter: {key: value}` into parameter
`parameter[key]: value`.
* Parses lists like `parameter: [val0, val1]` into parameter
`parameter[]: [val0, val1]`.
* Excludes parameters where its value is `None`.
"""
query_params = {}
for param, value in params.items():
query_params.update(
_parse_param(param, value)
)
return query_params
def _parse_param(param, value):
query_params = {}
if isinstance(value, date):
query_params[param] = value.strftime("%Y%m%d")
elif isinstance(value, dict):
for k, v in value.items():
query_params.update(
_parse_param(f'{param}[{k}]', v)
)
elif isinstance(value, Enum):
query_params[param] = value.value
elif not isinstance(value, str) and isinstance(value, Iterable):
query_params.update(
_dicts_appends(_parse_param(f'{param}[]', v) for v in value)
)
elif value is not None:
query_params[param] = value
return query_params
def _dicts_appends(dicts):
dict_join = {}
for dict_ in dicts:
for key, value in dict_.items():
if key in dict_join:
try:
dict_join[key].append(value)
except AttributeError:
dict_join[key] = [dict_join[key], value]
else:
dict_join[key] = value
return dict_join
|
# coding=utf-8
# flake8: noqa E302
"""
Test plugin infrastructure and hooks.
"""
import sys
import pytest
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
import cmd2
from cmd2 import plugin
class Plugin:
"""A mixin class for testing hook registration and calling"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
###
#
# preloop and postloop hooks
# which share the same signature and are thus interchangable
#
###
def prepost_hook_one(self) -> None:
"""Method used for preloop or postloop hooks"""
self.poutput("one")
def prepost_hook_two(self) -> None:
"""Another method used for preloop or postloop hooks"""
self.poutput("two")
def prepost_hook_too_many_parameters(self, param) -> None:
"""A preloop or postloop hook with too many parameters"""
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
"""A preloop or postloop hook with incorrect return type"""
pass
###
#
# preparse hook
#
###
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Preparsing hook"""
self.called_preparse += 1
return data
###
#
# Postparsing hooks
#
###
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook"""
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with requests application exit"""
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with raises an EmptyStatement exception"""
self.called_postparsing += 1
raise cmd2.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A postparsing hook which raises an exception"""
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with too many parameters"""
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with an undeclared parameter type"""
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
"""A postparsing hook with the wrong parameter type"""
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
"""A postparsing hook with an undeclared return type"""
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
"""A postparsing hook with the wrong return type"""
pass
###
#
# precommand hooks, some valid, some invalid
#
###
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
"""Override cmd.Cmd method"""
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook"""
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an EmptyStatement exception"""
self.called_precmd += 1
raise cmd2.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
"""A precommand hook which raises an exception"""
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
"""A precommand hook with no parameters"""
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
"""A precommand hook with too many parameters"""
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# postcommand hooks, some valid, some invalid
#
###
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
"""Override cmd.Cmd method"""
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook"""
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
"""A postcommand hook with raises an exception"""
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
"""A precommand hook with no parameters"""
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
"""A precommand hook with too many parameters"""
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
"""A precommand hook with no type annotation on the parameter"""
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
"""A precommand hook with the incorrect type annotation on the parameter"""
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
"""A precommand hook with no type annotation on the return value"""
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
###
#
# command finalization hooks, some valid, some invalid
#
###
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
"""A command finalization hook."""
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which requests application exit"""
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
"""A command finalization hook which raises an exception"""
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
"""A command finalization hook with no parameters."""
pass
def cmdfinalization_hook_too_many_parameters(self, one: plugin.CommandFinalizationData, two: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with too many parameters."""
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
"""A command finalization hook with no type annotation on the parameter."""
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
"""A command finalization hook with the incorrect type annotation on the parameter."""
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
"""A command finalizationhook with no type annotation on the return value."""
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
"""A command finalization hook with the wrong return type annotation."""
return self.statement_parser.parse('hi there')
class PluggedApp(Plugin, cmd2.Cmd):
"""A sample app with a plugin mixed in"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, statement):
"""Repeat back the arguments"""
self.poutput(statement)
###
#
# test pre and postloop hooks
#
###
def test_register_preloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_too_many_parameters)
def test_register_preloop_hook_with_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_preloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_preloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\nhello\n'
assert not err
def test_preloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_preloop_hook(app.prepost_hook_one)
app.register_preloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'one\ntwo\nhello\n'
assert not err
def test_register_postloop_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_too_many_parameters)
def test_register_postloop_hook_with_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postloop_hook(app.prepost_hook_with_wrong_return_annotation)
def test_postloop_hook(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\n'
assert not err
def test_postloop_hooks(capsys):
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", "say hello", 'quit']
with mock.patch.object(sys, 'argv', testargs):
app = PluggedApp()
app.register_postloop_hook(app.prepost_hook_one)
app.register_postloop_hook(app.prepost_hook_two)
app.cmdloop()
out, err = capsys.readouterr()
assert out == 'hello\none\ntwo\n'
assert not err
###
#
# test preparse hook
#
###
def test_preparse(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.preparse)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_preparse == 1
###
#
# test postparsing hooks
#
###
def test_postparsing_hook_too_many_parameters():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_too_many_parameters)
def test_postparsing_hook_undeclared_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_parameter_annotation)
def test_postparsing_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_parameter_annotation)
def test_postparsing_hook_undeclared_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_undeclared_return_annotation)
def test_postparsing_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postparsing_hook(app.postparse_hook_wrong_return_annotation)
def test_postparsing_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert not app.called_postparsing
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_stop_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
# register another function but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert stop
def test_postparsing_hook_stop_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 1
assert not stop
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
assert app.called_postparsing == 2
assert stop
def test_postparsing_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postparsing_hook(app.postparse_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 1
def test_postparsing_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
assert app.called_postparsing == 1
# register another function and make sure it gets called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
assert app.called_postparsing == 2
def test_postparsing_hook_exception(capsys):
app = PluggedApp()
app.register_postparsing_hook(app.postparse_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_postparsing_hook(app.postparse_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert err
assert app.called_postparsing == 1
###
#
# test precmd hooks
#
#####
def test_register_precmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_too_many_parameters)
def test_register_precmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_parameter_annotation)
def test_register_precmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_parameter_annotation)
def test_register_precmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_no_return_annotation)
def test_register_precmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_precmd_hook(app.precmd_hook_wrong_return_annotation)
def test_precmd_hook(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, precmd() should be called
assert app.called_precmd == 1
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_precmd == 3
def test_precmd_hook_emptystatement_first(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_precmd_hook(app.precmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents precmd() from being
# called
assert app.called_precmd == 1
def test_precmd_hook_emptystatement_second(capsys):
app = PluggedApp()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_precmd == 2
# register another function and make sure it gets called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook_emptystatement)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# since the registered hooks are called before precmd(), if a registered
# hook throws an exception, precmd() is never called
assert app.called_precmd == 2
# register a third function which shouldn't be called
app.reset_counters()
app.register_precmd_hook(app.precmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert not out
assert not err
# the exception raised by the second hook should prevent the third
# hook from being called. since the registered hooks are called before precmd(),
# if a registered hook throws an exception, precmd() is never called
assert app.called_precmd == 2
###
#
# test postcmd hooks
#
####
def test_register_postcmd_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_too_many_parameters)
def test_register_postcmd_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_parameter_annotation)
def test_register_postcmd_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_parameter_annotation)
def test_register_postcmd_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_no_return_annotation)
def test_register_postcmd_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_postcmd_hook(app.postcmd_hook_wrong_return_annotation)
def test_postcmd(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# without registering any hooks, postcmd() should be called
assert app.called_postcmd == 1
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with one hook registered, we should get precmd() and the hook
assert app.called_postcmd == 2
# register the function again, so it should be called twice
app.reset_counters()
app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
# with two hooks registered, we should get precmd() and both hooks
assert app.called_postcmd == 3
def test_postcmd_exception_first(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# since the registered hooks are called before postcmd(), if a registered
# hook throws an exception, postcmd() is never called. So we should have
# a count of one because we called the hook that raised the exception
assert app.called_postcmd == 1
# register another function but it shouldn't be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called
assert app.called_postcmd == 1
def test_postcmd_exception_second(capsys):
app = PluggedApp()
app.register_postcmd_hook(app.postcmd_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert not err
# with one hook registered, we should get the hook and postcmd()
assert app.called_postcmd == 2
# register another function which should be called
app.reset_counters()
stop = app.register_postcmd_hook(app.postcmd_hook_exception)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
# the exception raised by the first hook should prevent the second
# hook from being called, and it also prevents postcmd() from being
# called. So we have the first hook, and the second hook, which raised
# the exception
assert app.called_postcmd == 2
##
#
# command finalization
#
###
def test_register_cmdfinalization_hook_parameter_count():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_not_enough_parameters)
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_too_many_parameters)
def test_register_cmdfinalization_hook_no_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_parameter_annotation)
def test_register_cmdfinalization_hook_wrong_parameter_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_parameter_annotation)
def test_register_cmdfinalization_hook_no_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_no_return_annotation)
def test_register_cmdfinalization_hook_wrong_return_annotation():
app = PluggedApp()
with pytest.raises(TypeError):
app.register_cmdfinalization_hook(app.cmdfinalization_hook_wrong_return_annotation)
def test_cmdfinalization(capsys):
app = PluggedApp()
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 0
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 1
# register the function again, so it should be called twice
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
def test_cmdfinalization_stop_first(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_stop_second(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
app.register_cmdfinalization_hook(app.cmdfinalization_hook_stop)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert out == 'hello\n'
assert not err
assert app.called_cmdfinalization == 2
assert stop
def test_cmdfinalization_hook_exception(capsys):
app = PluggedApp()
app.register_cmdfinalization_hook(app.cmdfinalization_hook_exception)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
# register another function, but it shouldn't be called
app.reset_counters()
app.register_cmdfinalization_hook(app.cmdfinalization_hook)
stop = app.onecmd_plus_hooks('say hello')
out, err = capsys.readouterr()
assert not stop
assert out == 'hello\n'
assert err
assert app.called_cmdfinalization == 1
|
import json
import os
from FastAutoAugment.common.common import get_logger, common_init, expdir_abspath
from FastAutoAugment.data_aug.train import train_and_eval
if __name__ == '__main__':
conf = common_init(config_filepath='confs/aug_train_cifar.yaml',
param_args=["--autoaug.loader.aug", "fa_reduced_cifar10",
"--common.experiment_name", "autoaug_train"])
logger = get_logger()
import time
t = time.time()
save_path = expdir_abspath('model.pth')
# result = train_and_eval(conf, val_ratio=conf['val_ratio'], val_fold=conf['val_fold'],
# save_path=save_path, only_eval=conf['only_eval'], metric='test')
# TODO: Will fail if val_ratio=0 since we are not using latest training infrastructure
# TODO: Move val_ratio, val_fold, metric to config file
result = train_and_eval(conf, val_ratio=0.2, val_fold=0,
save_path=save_path, only_eval=False, metric='test')
elapsed = time.time() - t
logger.info('training done.')
logger.info('model: %s' % conf['autoaug']['model'])
logger.info('augmentation: %s' % conf['autoaug']['loader']['aug'])
logger.info('\n' + json.dumps(result, indent=4))
logger.info('elapsed time: %.3f Hours' % (elapsed / 3600.))
logger.info('top1 error in testset: %.4f' % (1. - result['top1_test']))
logger.info('Save path: %s' % save_path)
|
# coding: utf-8
"""
ESPER API REFERENCE
OpenAPI spec version: 1.0.0
Contact: developer@esper.io
---------------------------------------------------------
Copyright 2019 Shoonya Enterprises Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pprint
import re
import six
from esperclient.models.app_install import AppInstall
class InlineResponse2005(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[AppInstall]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None):
"""InlineResponse2005 - a model defined in Swagger"""
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse2005.
:return: The count of this InlineResponse2005.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse2005.
:param count: The count of this InlineResponse2005.
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`")
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse2005.
:return: The next of this InlineResponse2005.
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse2005.
:param next: The next of this InlineResponse2005.
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse2005.
:return: The previous of this InlineResponse2005.
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse2005.
:param previous: The previous of this InlineResponse2005.
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse2005.
:return: The results of this InlineResponse2005.
:rtype: list[AppInstall]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse2005.
:param results: The results of this InlineResponse2005.
:type: list[AppInstall]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2005, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2005):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from . import Link
def iterate_words(lines):
for line in lines:
words = line.split()
if len(words) == 0:
continue
for word in words[:-1]:
yield word, is_stop_word(word)
yield words[-1], True # EOL is considered a stop word
def is_stop_word(word):
return any(word.endswith(stopchar) for stopchar in '.;?!')
def tokenize(source, link_length):
head = []
end = []
is_start = True
words_iter = iterate_words(source)
while len(head) < link_length - 1:
word, is_end = next(words_iter)
head += [word]
end += [is_end]
for word, is_end in iterate_words(source):
yield Link(head, word, is_start, is_end)
head = head[1:] + [word]
# If the start of the current link is a stop word, the next link
# is a starting link
is_start = end[0]
end = end[1:] + [is_end]
|
from django.shortcuts import render
from catalog.models import Book, Author, BookInstance, Genre
from django.contrib.auth.mixins import LoginRequiredMixin
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
# The 'all()' is implied by default.
num_authors = Author.objects.count()
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 1)
request.session['num_visits'] = num_visits + 1
context = {
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_visits': num_visits,
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
from django.views import generic
class BookListView(generic.ListView):
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
model = Book
def book_detail_view(request, primary_key):
try:
book = Book.objects.get(pk=primary_key)
except Book.DoesNotExist:
raise Http404('Book does not exist')
return render(request, 'catalog/book_detail.html', context={'book': book})
class AuthorListView(generic.ListView):
model = Author
paginate_by = 2
class AuthorDetailView(generic.DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""Generic class-based view listing books on loan to current user."""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 2
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
# Added as part of challenge!
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from django.contrib.auth.decorators import login_required, permission_required
# from .forms import RenewBookForm
from catalog.forms import RenewBookForm
@login_required
@permission_required('catalog.can_mark_returned', raise_exception=True)
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian."""
book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_instance.due_back = form.cleaned_data['renewal_date']
book_instance.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed'))
# If this is a GET (or any other method) create the default form
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
'book_instance': book_instance,
}
return render(request, 'catalog/book_renew_librarian.html', context)
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from catalog.models import Author
class AuthorCreate(CreateView):
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
initial = {'date_of_death': '11/06/2020'}
class AuthorUpdate(UpdateView):
model = Author
fields = '__all__' # Not recommended (potential security issue if more fields added)
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = ['title', 'author', 'summary', 'isbn', 'genre', 'language']
# initial = {'date_of_death': '11/06/2020'}
|
import enum
@enum.unique
class Flag(enum.IntEnum):
NOT_NULL = 1
PRI_KEY = 2
UNIQUE_KEY = 4
MULTIPLE_KEY = 8
BLOB = 16
UNSIGNED = 32
ZEROFILL = 64
BINARY = 128
ENUM = 256
AUTO_INCREMENT = 512
TIMESTAMP = 1024
SET = 2048
PART_KEY = 16384
GROUP = 32767
UNIQUE = 65536
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
from distutils.version import StrictVersion
import logging as log
import os
import re
import shlex
import subprocess
import sys
# Display INFO log messages and up.
log.basicConfig(level=log.INFO, format="%(levelname)s: %(message)s")
def get_tool_requirements_path():
'''Return the path to tool_requirements.py, at the top of the repo'''
# top_src_dir is the top of the repository
top_src_dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'..'))
return os.path.join(top_src_dir, 'tool_requirements.py')
class ReqErr(Exception):
def __init__(self, path, msg):
self.path = path
self.msg = msg
def __str__(self):
return ('Error parsing tool requirements from {!r}: {}'
.format(self.path, self.msg))
class ToolReq:
# A subclass can set this to configure the command that's run to get the
# version of a tool. If tool_cmd is None, get_version will call "self.tool
# --version".
tool_cmd = None
# Used by get_version. If not None, this is a dictionary that's added to
# the environment when running the command.
tool_env = None
# A subclass can set this to configure _parse_version_output. If set, it
# should be a Regex object with a single capturing group that captures the
# version.
version_regex = None
def __init__(self, tool, min_version):
self.tool = tool
self.min_version = min_version
self.optional = False
def _get_tool_cmd(self):
'''Return the command to run to get the installed version'''
return self.tool_cmd or [self.tool, '--version']
def _get_version(self):
'''Run the tool to get the installed version.
Raises a RuntimeError on failure. The default version uses the class
variable tool_cmd to figure out what to run.
'''
def _parse_version_output(self, stdout):
'''Parse the nonempty stdout to get a version number
Raises a ValueError on failure. The default implementation returns the
last word of the first line if version_regex is None or the first match
for version_regex if it is not None.
'''
if self.version_regex is None:
line0 = stdout.split('\n', 1)[0]
words = line0.rsplit(None, 1)
if not words:
raise ValueError('Empty first line.')
return words[-1]
for line in stdout.split('\n'):
match = self.version_regex.match(line.rstrip())
if match is not None:
return match.group(1)
raise ValueError('No line matched version regex.')
def get_version(self):
'''Run the tool to get a version.
Returns a version string on success. Raises a RuntimeError on failure.
The default version uses the class variable tool_cmd to figure out what
to run.
'''
cmd = self._get_tool_cmd()
cmd_txt = ' '.join(shlex.quote(w) for w in cmd)
env = None
if self.tool_env is not None:
env = os.environ.copy()
env.update(self.tool_env)
try:
proc = subprocess.run(cmd,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True,
env=env)
except (subprocess.CalledProcessError, FileNotFoundError) as err:
env_msg = ('' if not self.tool_env else
' (with environment overrides: {})'
.format(', '.join('{}={}'.format(k, v)
for k, v in self.tool_env.items())))
raise RuntimeError('Failed to run {!r}{} to check version: {}'
.format(cmd_txt, env_msg, err))
if not proc.stdout:
raise RuntimeError('No output from running {!r} to check version.'
.format(cmd_txt))
try:
return self._parse_version_output(proc.stdout)
except ValueError as err:
raise RuntimeError('Bad output from running {!r} '
'to check version: {}'
.format(cmd_txt, err))
def to_semver(self, version, from_req):
'''Convert a tool version to semantic versioning format
If from_req is true, this version comes from the requirements file
(rather than being reported from an installed application). That might
mean stricter checking. If version is not a known format, raises a
ValueError.
'''
return version
def check(self):
'''Get the installed version and check it matches the requirements
Returns (is_good, msg). is_good is true if we matched the requirements
and false otherwise. msg describes what happened (an error message on
failure, or extra information on success).
'''
try:
min_semver = self.to_semver(self.min_version, True)
except ValueError as err:
return (False,
'Failed to convert requirement to semantic version: {}'
.format(err))
try:
min_sv = StrictVersion(min_semver)
except ValueError as err:
return (False,
'Bad semver inferred from required version ({}): {}'
.format(min_semver, err))
try:
actual_version = self.get_version()
except RuntimeError as err:
return (False, str(err))
try:
actual_semver = self.to_semver(actual_version, False)
except ValueError as err:
return (False,
'Failed to convert installed to semantic version: {}'
.format(err))
try:
actual_sv = StrictVersion(actual_semver)
except ValueError as err:
return (False,
'Bad semver inferred from installed version ({}): {}'
.format(actual_semver, err))
if actual_sv < min_sv:
return (False,
'Installed version is too old: '
'found version {}, but need at least {}'
.format(actual_version, self.min_version))
return (True,
'Sufficiently recent version (found {}; needed {})'
.format(actual_version, self.min_version))
class VerilatorToolReq(ToolReq):
def get_version(self):
try:
# Note: "verilator" needs to be called through a shell and with all
# arguments in a string, as it doesn't have a shebang, but instead
# relies on perl magic to parse command line arguments.
version_str = subprocess.run('verilator --version', shell=True,
check=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise RuntimeError('Unable to call Verilator to check version: {}'
.format(err)) from None
return version_str.stdout.split(' ')[1].strip()
class VeribleToolReq(ToolReq):
tool_cmd = ['verible-verilog-lint', '--version']
def to_semver(self, version, from_req):
# Drop the hash suffix and convert into version string that
# is compatible with StrictVersion in check_version below.
# Example: v0.0-808-g1e17daa -> 0.0.808
m = re.fullmatch(r'v([0-9]+)\.([0-9]+)-([0-9]+)-g[0-9a-f]+$', version)
if m is None:
raise ValueError("{} has invalid version string format."
.format(version))
return '.'.join(m.group(1, 2, 3))
class VivadoToolReq(ToolReq):
tool_cmd = ['vivado', '-version']
version_regex = re.compile(r'Vivado v(.*)\s')
def to_semver(self, version, from_req):
# Regular Vivado releases just have a major and minor version.
# In this case, we set the patch level to 0.
m = re.fullmatch(r'([0-9]+)\.([0-9]+)(?:\.([0-9]+))?', version)
if m is None:
raise ValueError("{} has invalid version string format."
.format(version))
return '.'.join((m.group(1), m.group(2), m.group(3) or '0'))
class VcsToolReq(ToolReq):
tool_cmd = ['vcs', '-full64', '-ID']
tool_env = {'VCS_ARCH_OVERRIDE': 'linux'}
version_regex = re.compile(r'Compiler version = VCS [A-Z]-(.*)')
def to_semver(self, version, from_req):
# VCS has a rather strange numbering scheme, where the most general
# versions look something like this:
#
# Q-2020.03-SP1-2
#
# Our version_regex strips out the "Q" part (a "platform prefix")
# already. A version always has the "2020.03" (year and month) part,
# and may also have an -SP<n> and/or -<patch> suffix.
#
# Since StrictVersion expects a 3 digit versioning scheme, we multiply
# any SP number by 100, which should work as long as the patch version
# isn't greater than 99.
#
# Some VCS builds also report other cruft (like _Full64) after this
# version number. If from_req is False, allow (and ignore) that too.
regex = r'([0-9]+).([0-9]+)(?:-SP([0-9]+))?(?:-([0-9]+))?'
if from_req:
regex += '$'
match = re.match(regex, version)
if match is None:
raise ValueError("{!r} is not a recognised VCS version string."
.format(version))
major = match.group(1)
minor = match.group(2)
sp = int(match.group(3) or 0)
patch = int(match.group(4) or 0)
comb = str(sp * 100 + patch)
return '{}.{}{}'.format(major, minor, comb)
class PyModuleToolReq(ToolReq):
'''A tool in a Python module (its version can be found by running pip)'''
version_regex = re.compile(r'Version: (.*)')
def _get_tool_cmd(self):
return ['pip3', 'show', self.tool]
def dict_to_tool_req(path, tool, raw):
'''Parse a dict (as read from Python) as a ToolReq
Required keys: version. Optional keys: as_needed.
'''
where = 'Dict for {} in __TOOL_REQUIREMENTS__'.format(tool)
# We operate in place on the dictionary. Take a copy to avoid an
# obnoxious API.
raw = raw.copy()
if 'min_version' not in raw:
raise ReqErr(path,
'{} is missing required key: "min_version".'
.format(where))
min_version = raw['min_version']
if not isinstance(min_version, str):
raise ReqErr(path,
'{} has min_version that is not a string.'
.format(where))
del raw['min_version']
as_needed = False
if 'as_needed' in raw:
as_needed = raw['as_needed']
if not isinstance(as_needed, bool):
raise ReqErr(path,
'{} has as_needed that is not a bool.'
.format(where))
del raw['as_needed']
if raw:
raise ReqErr(path,
'{} has unexpected keys: {}.'
.format(where, ', '.join(raw.keys())))
classes = {
'edalize': PyModuleToolReq,
'vcs': VcsToolReq,
'verible': VeribleToolReq,
'verilator': VerilatorToolReq,
'vivado': VivadoToolReq,
}
cls = classes.get(tool, ToolReq)
ret = cls(tool, min_version)
ret.as_needed = as_needed
return ret
def read_tool_requirements(path=None):
'''Read tool requirements from a Python file'''
if path is None:
path = get_tool_requirements_path()
with open(path, 'r') as pyfile:
globs = {}
exec(pyfile.read(), globs)
# We expect the exec call to have populated globs with a
# __TOOL_REQUIREMENTS__ dictionary.
raw = globs.get('__TOOL_REQUIREMENTS__')
if raw is None:
raise ReqErr(path,
'The Python file at did not define '
'__TOOL_REQUIREMENTS__.')
# raw should be a dictionary (keyed by tool name)
if not isinstance(raw, dict):
raise ReqErr(path, '__TOOL_REQUIREMENTS__ is not a dict.')
reqs = {}
for tool, raw_val in raw.items():
if not isinstance(tool, str):
raise ReqErr(path,
'Invalid key in __TOOL_REQUIREMENTS__: {!r}'
.format(tool))
if isinstance(raw_val, str):
# Shorthand notation: value is just a string, which we
# interpret as a minimum version
raw_val = {'min_version': raw_val}
if not isinstance(raw_val, dict):
raise ReqErr(path,
'Value for {} in __TOOL_REQUIREMENTS__ '
'is not a string or dict.'.format(tool))
reqs[tool] = dict_to_tool_req(path, tool, raw_val)
return reqs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tool', nargs='*')
args = parser.parse_args()
# Get tool requirements
try:
tool_requirements = read_tool_requirements()
except ReqErr as err:
log.error(str(err))
return 1
pending_tools = set(args.tool)
missing_tools = []
for tool, req in tool_requirements.items():
if req.as_needed and tool not in pending_tools:
continue
pending_tools.discard(tool)
good, msg = req.check()
if not good:
log.error('Failed tool requirement for {}: {}'
.format(tool, msg))
missing_tools.append(tool)
else:
log.info('Tool {} present: {}'
.format(tool, msg))
all_good = True
if missing_tools:
log.error("Tool requirements not fulfilled. "
"Please update tools ({}) and retry."
.format(', '.join(missing_tools)))
all_good = False
if pending_tools:
log.error("Some tools specified on command line don't appear in "
"tool requirements file: {}"
.format(', '.join(sorted(pending_tools))))
all_good = False
return 0 if all_good else 1
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/11 上午10:58
# @Author : TaoWang
# @Description : 参数配置
import argparse
def ArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('--embed_size', type=int, default=300, help="embedding size of word embedding")
parser.add_argument("--epoch",type=int,default=1,help="epoch of training")
parser.add_argument("--cuda",type=bool,default=True,help="whether use gpu")
parser.add_argument("--gpu",type=int,default=0,help="whether use gpu")
parser.add_argument("--learning_rate",type=float,default=0.001,help="learning rate during training")
parser.add_argument("--batch_size",type=int,default=32,help="batch size during training")
parser.add_argument("--min_count",type=int,default=20,help="min count of words")
parser.add_argument("--window_size",type=int,default=2,help="min count of words")
parser.add_argument("--x_max",type=int,default=100,help="x_max of glove")
parser.add_argument("--alpha",type=float,default=0.75,help="alpha of glove")
return parser.parse_args(args=[])
|
"""
The ``mlflow.pytorch`` module provides an API for logging and loading PyTorch models. This module
exports PyTorch models with the following flavors:
PyTorch (native) format
This is the main flavor that can be loaded back into PyTorch.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import logging
import os
import yaml
import cloudpickle
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
import posixpath
import mlflow
import shutil
import mlflow.pyfunc.utils as pyfunc_utils
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, ModelSignature
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST
from mlflow.pytorch import pickle_module as mlflow_pytorch_pickle_module
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.annotations import experimental
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import _copy_file_or_tree, TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.utils.autologging_utils import autologging_integration, safe_patch
FLAVOR_NAME = "pytorch"
_SERIALIZED_TORCH_MODEL_FILE_NAME = "model.pth"
_PICKLE_MODULE_INFO_FILE_NAME = "pickle_module_info.txt"
_EXTRA_FILES_KEY = "extra_files"
_REQUIREMENTS_FILE_KEY = "requirements_file"
_logger = logging.getLogger(__name__)
def get_default_conda_env():
"""
:return: The default Conda environment as a dictionary for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
.. code-block:: python
:caption: Example
import mlflow.pytorch
# Log PyTorch model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# Fetch the associated conda environment
env = mlflow.pytorch.get_default_conda_env()
print("conda env: {}".format(env))
.. code-block:: text
:caption: Output
conda env {'name': 'mlflow-env',
'channels': ['defaults', 'conda-forge', 'pytorch'],
'dependencies': ['python=3.7.5', 'pytorch=1.5.1',
'torchvision=0.6.1',
'pip', {'pip': ['mlflow', 'cloudpickle==1.6.0']}]}
"""
import torch
import torchvision
return _mlflow_conda_env(
additional_conda_deps=[
"pytorch={}".format(torch.__version__),
"torchvision={}".format(torchvision.__version__),
],
additional_pip_deps=[
# We include CloudPickle in the default environment because
# it's required by the default pickle module used by `save_model()`
# and `log_model()`: `mlflow.pytorch.pickle_module`.
"cloudpickle=={}".format(cloudpickle.__version__)
],
additional_conda_channels=["pytorch"],
)
def log_model(
pytorch_model,
artifact_path,
conda_env=None,
code_paths=None,
pickle_module=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
requirements_file=None,
extra_files=None,
**kwargs
):
"""
Log a PyTorch model as an MLflow artifact for the current run.
:param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of
``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``
or ``torch.jit.trace``.
The model accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
If saving an eager model, any code dependencies of the
model's class, including the class definition itself, should be
included in one of the following locations:
- The package(s) listed in the model's Conda environment, specified
by the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
:param artifact_path: Run-relative artifact path.
:param conda_env: Path to a Conda environment file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pytorch=0.4.1',
'torchvision=0.2.1'
]
}
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified
``pytorch_model``. This is passed as the ``pickle_module`` parameter
to ``torch.save()``. By default, this module is also used to
deserialize ("unpickle") the PyTorch model at load time.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param requirements_file: A string containing the path to requirements file. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``requirements_file`` string -
requirements_file = "s3://my-bucket/path/to/my_file"
In this case, the ``"my_file"`` requirements file is downloaded from S3.
If ``None``, no requirements file is added to the model.
:param extra_files: A list containing the paths to corresponding extra files. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``extra_files`` list -
extra_files = ["s3://my-bucket/path/to/my_file1",
"s3://my-bucket/path/to/my_file2"]
In this case, the ``"my_file1 & my_file2"`` extra file is downloaded from S3.
If ``None``, no extra files are added to the model.
:param kwargs: kwargs to pass to ``torch.save`` method.
.. code-block:: python
:caption: Example
import numpy as np
import torch
import mlflow.pytorch
class LinearNNModel(torch.nn.Module):
def __init__(self):
super(LinearNNModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # One in and one out
def forward(self, x):
y_pred = self.linear(x)
return y_pred
def gen_data():
# Example linear model modified to use y = 2x
# from https://github.com/hunkim/PyTorchZeroToAll
# X training data, y labels
X = torch.arange(1.0, 25.0).view(-1, 1)
y = torch.from_numpy(np.array([x * 2 for x in X])).view(-1, 1)
return X, y
# Define model, loss, and optimizer
model = LinearNNModel()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
# Training loop
epochs = 250
X, y = gen_data()
for epoch in range(epochs):
# Forward pass: Compute predicted y by passing X to the model
y_pred = model(X)
# Compute the loss
loss = criterion(y_pred, y)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Log the model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# convert to scripted model and log the model
scripted_pytorch_model = torch.jit.script(model)
mlflow.pytorch.log_model(scripted_pytorch_model, "scripted_model")
# Fetch the logged model artifacts
print("run_id: {}".format(run.info.run_id))
for artifact_path in ["model/data", "scripted_model/data"]:
artifacts = [f.path for f in MlflowClient().list_artifacts(run.info.run_id,
artifact_path)]
print("artifacts: {}".format(artifacts))
.. code-block:: text
:caption: Output
run_id: 1a1ec9e413ce48e9abf9aec20efd6f71
artifacts: ['model/data/model.pth',
'model/data/pickle_module_info.txt']
artifacts: ['scripted_model/data/model.pth',
'scripted_model/data/pickle_module_info.txt']
.. figure:: ../_static/images/pytorch_logged_models.png
PyTorch logged models
"""
pickle_module = pickle_module or mlflow_pytorch_pickle_module
Model.log(
artifact_path=artifact_path,
flavor=mlflow.pytorch,
pytorch_model=pytorch_model,
conda_env=conda_env,
code_paths=code_paths,
pickle_module=pickle_module,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
requirements_file=requirements_file,
extra_files=extra_files,
**kwargs,
)
def save_model(
pytorch_model,
path,
conda_env=None,
mlflow_model=None,
code_paths=None,
pickle_module=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
requirements_file=None,
extra_files=None,
**kwargs
):
"""
Save a PyTorch model to a path on the local file system.
:param pytorch_model: PyTorch model to be saved. Can be either an eager model (subclass of
``torch.nn.Module``) or scripted model prepared via ``torch.jit.script``
or ``torch.jit.trace``.
The model accept a single ``torch.FloatTensor`` as
input and produce a single output tensor.
If saving an eager model, any code dependencies of the
model's class, including the class definition itself, should be
included in one of the following locations:
- The package(s) listed in the model's Conda environment, specified
by the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model. The
following is an *example* dictionary representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pytorch=0.4.1',
'torchvision=0.2.1'
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path when the model is loaded.
:param pickle_module: The module that PyTorch should use to serialize ("pickle") the specified
``pytorch_model``. This is passed as the ``pickle_module`` parameter
to ``torch.save()``. By default, this module is also used to
deserialize ("unpickle") the PyTorch model at load time.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param requirements_file: A string containing the path to requirements file. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``requirements_file`` string -
requirements_file = "s3://my-bucket/path/to/my_file"
In this case, the ``"my_file"`` requirements file is downloaded from S3.
If ``None``, no requirements file is added to the model.
:param extra_files: A list containing the paths to corresponding extra files. Remote URIs
are resolved to absolute filesystem paths.
For example, consider the following ``extra_files`` list -
extra_files = ["s3://my-bucket/path/to/my_file1",
"s3://my-bucket/path/to/my_file2"]
In this case, the ``"my_file1 & my_file2"`` extra file is downloaded from S3.
If ``None``, no extra files are added to the model.
:param kwargs: kwargs to pass to ``torch.save`` method.
.. code-block:: python
:caption: Example
import os
import torch
import mlflow.pytorch
# Class defined here
class LinearNNModel(torch.nn.Module):
...
# Initialize our model, criterion and optimizer
...
# Training loop
...
# Save PyTorch models to current working directory
with mlflow.start_run() as run:
mlflow.pytorch.save_model(model, "model")
# Convert to a scripted model and save it
scripted_pytorch_model = torch.jit.script(model)
mlflow.pytorch.save_model(scripted_pytorch_model, "scripted_model")
# Load each saved model for inference
for model_path in ["model", "scripted_model"]:
model_uri = "{}/{}".format(os.getcwd(), model_path)
loaded_model = mlflow.pytorch.load_model(model_uri)
print("Loaded {}:".format(model_path))
for x in [6.0, 8.0, 12.0, 30.0]:
X = torch.Tensor([[x]])
y_pred = loaded_model(X)
print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item()))
print("--")
.. code-block:: text
:caption: Output
Loaded model:
predict X: 6.0, y_pred: 11.90
predict X: 8.0, y_pred: 15.92
predict X: 12.0, y_pred: 23.96
predict X: 30.0, y_pred: 60.13
--
Loaded scripted_model:
predict X: 6.0, y_pred: 11.90
predict X: 8.0, y_pred: 15.92
predict X: 12.0, y_pred: 23.96
predict X: 30.0, y_pred: 60.13
"""
import torch
pickle_module = pickle_module or mlflow_pytorch_pickle_module
if not isinstance(pytorch_model, torch.nn.Module):
raise TypeError("Argument 'pytorch_model' should be a torch.nn.Module")
if code_paths is not None:
if not isinstance(code_paths, list):
raise TypeError("Argument code_paths should be a list, not {}".format(type(code_paths)))
path = os.path.abspath(path)
if os.path.exists(path):
raise RuntimeError("Path '{}' already exists".format(path))
if mlflow_model is None:
mlflow_model = Model()
os.makedirs(path)
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
model_data_subpath = "data"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(model_data_path)
# Persist the pickle module name as a file in the model's `data` directory. This is necessary
# because the `data` directory is the only available parameter to `_load_pyfunc`, and it
# does not contain the MLmodel configuration; therefore, it is not sufficient to place
# the module name in the MLmodel
#
# TODO: Stop persisting this information to the filesystem once we have a mechanism for
# supplying the MLmodel configuration to `mlflow.pytorch._load_pyfunc`
pickle_module_path = os.path.join(model_data_path, _PICKLE_MODULE_INFO_FILE_NAME)
with open(pickle_module_path, "w") as f:
f.write(pickle_module.__name__)
# Save pytorch model
model_path = os.path.join(model_data_path, _SERIALIZED_TORCH_MODEL_FILE_NAME)
if isinstance(pytorch_model, torch.jit.ScriptModule):
torch.jit.ScriptModule.save(pytorch_model, model_path)
else:
torch.save(pytorch_model, model_path, pickle_module=pickle_module, **kwargs)
torchserve_artifacts_config = {}
if requirements_file:
if not isinstance(requirements_file, str):
raise TypeError("Path to requirements file should be a string")
with TempDir() as tmp_requirements_dir:
_download_artifact_from_uri(
artifact_uri=requirements_file, output_path=tmp_requirements_dir.path()
)
rel_path = os.path.basename(requirements_file)
torchserve_artifacts_config[_REQUIREMENTS_FILE_KEY] = {"path": rel_path}
shutil.move(tmp_requirements_dir.path(rel_path), path)
if extra_files:
torchserve_artifacts_config[_EXTRA_FILES_KEY] = []
if not isinstance(extra_files, list):
raise TypeError("Extra files argument should be a list")
with TempDir() as tmp_extra_files_dir:
for extra_file in extra_files:
_download_artifact_from_uri(
artifact_uri=extra_file, output_path=tmp_extra_files_dir.path()
)
rel_path = posixpath.join(_EXTRA_FILES_KEY, os.path.basename(extra_file),)
torchserve_artifacts_config[_EXTRA_FILES_KEY].append({"path": rel_path})
shutil.move(
tmp_extra_files_dir.path(), posixpath.join(path, _EXTRA_FILES_KEY),
)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if code_paths is not None:
code_dir_subpath = "code"
for code_path in code_paths:
_copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)
else:
code_dir_subpath = None
mlflow_model.add_flavor(
FLAVOR_NAME,
model_data=model_data_subpath,
pytorch_version=torch.__version__,
**torchserve_artifacts_config,
)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.pytorch",
data=model_data_subpath,
pickle_module_name=pickle_module.__name__,
code=code_dir_subpath,
env=conda_env_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
def _load_model(path, **kwargs):
"""
:param path: The path to a serialized PyTorch model.
:param kwargs: Additional kwargs to pass to the PyTorch ``torch.load`` function.
"""
import torch
if os.path.isdir(path):
# `path` is a directory containing a serialized PyTorch model and a text file containing
# information about the pickle module that should be used by PyTorch to load it
model_path = os.path.join(path, "model.pth")
pickle_module_path = os.path.join(path, _PICKLE_MODULE_INFO_FILE_NAME)
with open(pickle_module_path, "r") as f:
pickle_module_name = f.read()
if "pickle_module" in kwargs and kwargs["pickle_module"].__name__ != pickle_module_name:
_logger.warning(
"Attempting to load the PyTorch model with a pickle module, '%s', that does not"
" match the pickle module that was used to save the model: '%s'.",
kwargs["pickle_module"].__name__,
pickle_module_name,
)
else:
try:
kwargs["pickle_module"] = importlib.import_module(pickle_module_name)
except ImportError as exc:
raise MlflowException(
message=(
"Failed to import the pickle module that was used to save the PyTorch"
" model. Pickle module name: `{pickle_module_name}`".format(
pickle_module_name=pickle_module_name
)
),
error_code=RESOURCE_DOES_NOT_EXIST,
) from exc
else:
model_path = path
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
return torch.load(model_path, **kwargs)
else:
try:
# load the model as an eager model.
return torch.load(model_path, **kwargs)
except Exception: # pylint: disable=broad-except
# If fails, assume the model as a scripted model
return torch.jit.load(model_path)
def load_model(model_uri, **kwargs):
"""
Load a PyTorch model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model, for example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param kwargs: kwargs to pass to ``torch.load`` method.
:return: A PyTorch model.
.. code-block:: python
:caption: Example
import torch
import mlflow.pytorch
# Class defined here
class LinearNNModel(torch.nn.Module):
...
# Initialize our model, criterion and optimizer
...
# Training loop
...
# Log the model
with mlflow.start_run() as run:
mlflow.pytorch.log_model(model, "model")
# Inference after loading the logged model
model_uri = "runs:/{}/model".format(run.info.run_id)
loaded_model = mlflow.pytorch.load_model(model_uri)
for x in [4.0, 6.0, 30.0]:
X = torch.Tensor([[x]])
y_pred = loaded_model(X)
print("predict X: {}, y_pred: {:.2f}".format(x, y_pred.data.item()))
.. code-block:: text
:caption: Output
predict X: 4.0, y_pred: 7.57
predict X: 6.0, y_pred: 11.64
predict X: 30.0, y_pred: 60.48
"""
import torch
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
try:
pyfunc_conf = _get_flavor_configuration(
model_path=local_model_path, flavor_name=pyfunc.FLAVOR_NAME
)
except MlflowException:
pyfunc_conf = {}
code_subpath = pyfunc_conf.get(pyfunc.CODE)
if code_subpath is not None:
pyfunc_utils._add_code_to_system_path(
code_path=os.path.join(local_model_path, code_subpath)
)
pytorch_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
if torch.__version__ != pytorch_conf["pytorch_version"]:
_logger.warning(
"Stored model version '%s' does not match installed PyTorch version '%s'",
pytorch_conf["pytorch_version"],
torch.__version__,
)
torch_model_artifacts_path = os.path.join(local_model_path, pytorch_conf["model_data"])
return _load_model(path=torch_model_artifacts_path, **kwargs)
def _load_pyfunc(path, **kwargs):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``pytorch`` flavor.
"""
return _PyTorchWrapper(_load_model(path, **kwargs))
class _PyTorchWrapper(object):
"""
Wrapper class that creates a predict function such that
predict(data: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)
"""
def __init__(self, pytorch_model):
self.pytorch_model = pytorch_model
def predict(self, data, device="cpu"):
import torch
if not isinstance(data, pd.DataFrame):
raise TypeError("Input data should be pandas.DataFrame")
self.pytorch_model.to(device)
self.pytorch_model.eval()
with torch.no_grad():
input_tensor = torch.from_numpy(data.values.astype(np.float32)).to(device)
preds = self.pytorch_model(input_tensor)
if not isinstance(preds, torch.Tensor):
raise TypeError(
"Expected PyTorch model to output a single output tensor, "
"but got output of type '{}'".format(type(preds))
)
predicted = pd.DataFrame(preds.numpy())
predicted.index = data.index
return predicted
@experimental
@autologging_integration(FLAVOR_NAME)
def autolog(log_every_n_epoch=1, log_models=True, disable=False): # pylint: disable=unused-argument
"""
Enables (or disables) and configures autologging from `PyTorch Lightning
<https://pytorch-lightning.readthedocs.io/en/latest>`_ to MLflow.
Autologging is performed when you call the `fit` method of
`pytorch_lightning.Trainer() \
<https://pytorch-lightning.readthedocs.io/en/latest/trainer.html#>`_.
Explore the complete `PyTorch MNIST \
<https://github.com/mlflow/mlflow/tree/master/examples/pytorch/MNIST/example1>`_ for
an expansive example with implementation of additional lightening steps.
**Note**: Autologging is only supported for PyTorch Lightning models,
i.e., models that subclass
`pytorch_lightning.LightningModule \
<https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html>`_.
In particular, autologging support for vanilla PyTorch models that only subclass
`torch.nn.Module <https://pytorch.org/docs/stable/generated/torch.nn.Module.html>`_
is not yet available.
:param log_every_n_epoch: If specified, logs metrics once every `n` epochs. By default, metrics
are logged after every epoch.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
:param disable: If ``True``, disables all supported autologging integrations. If ``False``,
enables all supported autologging integrations.
.. code-block:: python
:caption: Example
import os
import pytorch_lightning as pl
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from pytorch_lightning.metrics.functional import accuracy
import mlflow.pytorch
from mlflow.tracking import MlflowClient
# For brevity, here is the simplest most minimal example with just a training
# loop step, (no validation, no testing). It illustrates how you can use MLflow
# to auto log parameters, metrics, and models.
class MNISTModel(pl.LightningModule):
def __init__(self):
super(MNISTModel, self).__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.cross_entropy(self(x), y)
acc = accuracy(loss, y)
# Use the current of PyTorch logger
self.log("train_loss", loss, on_epoch=True)
self.log("acc", acc, on_epoch=True)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
# Initialize our model
mnist_model = MNISTModel()
# Initialize DataLoader from MNIST Dataset
train_ds = MNIST(os.getcwd(), train=True,
download=True, transform=transforms.ToTensor())
train_loader = DataLoader(train_ds, batch_size=32)
# Initialize a trainer
trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20)
# Auto log all MLflow entities
mlflow.pytorch.autolog()
# Train the model
with mlflow.start_run() as run:
trainer.fit(mnist_model, train_loader)
# fetch the auto logged parameters and metrics
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
.. code-block:: text
:caption: Output
run_id: 42caa17b60cb489c8083900fb52506a7
artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data']
params: {'betas': '(0.9, 0.999)',
'weight_decay': '0',
'epochs': '20',
'eps': '1e-08',
'lr': '0.02',
'optimizer_name': 'Adam', '
amsgrad': 'False'}
metrics: {'acc_step': 0.0,
'train_loss_epoch': 1.0917967557907104,
'train_loss_step': 1.0794280767440796,
'train_loss': 1.0794280767440796,
'acc_epoch': 0.0033333334140479565,
'acc': 0.0}
tags: {'Mode': 'training'}
.. figure:: ../_static/images/pytorch_lightening_autolog.png
PyTorch autologged MLflow entities
"""
import pytorch_lightning as pl
from mlflow.pytorch._pytorch_autolog import _create_patch_fit
fit = _create_patch_fit(log_every_n_epoch=log_every_n_epoch, log_models=log_models)
safe_patch(FLAVOR_NAME, pl.Trainer, "fit", fit, manage_run=True)
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Placeholder Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from feature_segwit import send_to_witness
from test_framework.test_framework import PlacehTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(PlacehTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
["-prematurewitness", "-walletprematurewitness", "-mempoolreplacement", "-walletrbf={}".format(i)] for i in
range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
"""The Amazon Redshift dialect.
This is based on postgres dialect, since it was initially based off of Postgres 8.
We should monitor in future and see if it should be rebased off of ANSI
"""
from sqlfluff.core.parser import (
OneOf,
AnyNumberOf,
AnySetOf,
Anything,
Ref,
Sequence,
Bracketed,
BaseSegment,
Delimited,
Nothing,
OptionallyBracketed,
Matchable,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.dialects.dialect_redshift_keywords import (
redshift_reserved_keywords,
redshift_unreserved_keywords,
)
postgres_dialect = load_raw_dialect("postgres")
ansi_dialect = load_raw_dialect("ansi")
redshift_dialect = postgres_dialect.copy_as("redshift")
# Set Keywords
redshift_dialect.sets("unreserved_keywords").clear()
redshift_dialect.sets("unreserved_keywords").update(
[n.strip().upper() for n in redshift_unreserved_keywords.split("\n")]
)
redshift_dialect.sets("reserved_keywords").clear()
redshift_dialect.sets("reserved_keywords").update(
[n.strip().upper() for n in redshift_reserved_keywords.split("\n")]
)
redshift_dialect.sets("bare_functions").clear()
redshift_dialect.sets("bare_functions").update(["current_date", "sysdate"])
redshift_dialect.sets("date_part_function_name").update(
["DATEADD", "DATEDIFF", "EXTRACT", "DATE_PART"]
)
# Add datetime units
# https://docs.aws.amazon.com/redshift/latest/dg/r_Dateparts_for_datetime_functions.html
redshift_dialect.sets("datetime_units").update(
[
# millenium
"MILLENNIUM",
"MILLENNIA",
"MIL",
"MILS",
# century
"CENTURY",
"CENTURIES",
"C",
"CENT",
"CENTS",
# decade
"DECADE",
"DECADES",
"DEC",
"DECS",
# epoch
"EPOCH",
# year
"YEAR",
"YEARS",
"Y",
"YR",
"YRS",
# quarter
"QUARTER",
"QUARTERS",
"QTR",
"QTRS",
# month
"MONTH",
"MONTHS",
"MON",
"MONS",
# week
"WEEK",
"WEEKS",
"W",
# day of week
"DAYOFWEEK",
"DOW",
"DW",
"WEEKDAY",
# day of year
"DAYOFYEAR",
"DOY",
"DY",
"YEARDAY",
# day
"DAY",
"DAYS",
"D",
# hour
"HOUR",
"HOURS",
"H",
"HR",
"HRS",
# minute
"MINUTE",
"MINUTES",
"M",
"MIN",
"MINS",
# second
"SECOND",
"SECONDS",
"S",
"SEC",
"SECS",
# millisec
"MILLISECOND",
"MILLISECONDS",
"MS",
"MSEC",
"MSECS",
"MSECOND",
"MSECONDS",
"MILLISEC",
"MILLISECS",
"MILLISECON",
# microsec
"MICROSECOND",
"MICROSECONDS",
"MICROSEC",
"MICROSECS",
"MICROSECOND",
"USECOND",
"USECONDS",
"US",
"USEC",
"USECS",
# timezone
"TIMEZONE",
"TIMEZONE_HOUR",
"TIMEZONE_MINUTE",
]
)
redshift_dialect.replace(
WellKnownTextGeometrySegment=Nothing(),
JoinLikeClauseGrammar=Sequence(
AnySetOf(
Ref("FromPivotExpressionSegment"),
Ref("FromUnpivotExpressionSegment"),
min_times=1,
),
Ref("AliasExpressionSegment", optional=True),
),
)
ObjectReferenceSegment = redshift_dialect.get_segment("ObjectReferenceSegment")
redshift_dialect.add(
CompressionTypeGrammar=OneOf(
"BZIP2",
"GZIP",
"LZOP",
"ZSTD",
),
ArgModeGrammar=OneOf(
"IN",
"OUT",
"INOUT",
),
ColumnEncodingGrammar=OneOf(
"RAW",
"AZ64",
"BYTEDICT",
"DELTA",
"DELTA32K",
"LZO",
"MOSTLY8",
"MOSTLY16",
"MOSTLY32",
"RUNLENGTH",
"TEXT255",
"TEXT32K",
"ZSTD",
),
)
# need to ignore type due to mypy rules on type variables
# see https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
# for details
@redshift_dialect.segment(replace=True)
class ColumnReferenceSegment(ObjectReferenceSegment): # type: ignore
"""A reference to column, field or alias.
Adjusted to support column references for Redshift's SUPER data type
(https://docs.aws.amazon.com/redshift/latest/dg/super-overview.html), which
uses a subset of the PartiQL language (https://partiql.org/) to reference
columns.
"""
type = "column_reference"
match_grammar: Matchable = Delimited(
Sequence(
Ref("SingleIdentifierGrammar"),
AnyNumberOf(Ref("ArrayAccessorSegment")),
Ref("TimeZoneGrammar", optional=True),
),
delimiter=OneOf(
Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment"))
),
terminator=OneOf(
"ON",
"AS",
"USING",
Ref("CommaSegment"),
Ref("CastOperatorSegment"),
Ref("BinaryOperatorGrammar"),
Ref("ColonSegment"),
Ref("DelimiterSegment"),
Ref("JoinLikeClauseGrammar"),
),
allow_gaps=False,
)
@redshift_dialect.segment()
class FromUnpivotExpressionSegment(BaseSegment):
"""An UNPIVOT expression.
See
https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html
for details.
"""
type = "from_unpivot_expression"
match_grammar = Sequence(
"UNPIVOT",
Sequence(
OneOf("INCLUDE", "EXCLUDE"),
"NULLS",
optional=True,
),
Bracketed(
Sequence(
Ref("ColumnReferenceSegment"),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("AliasExpressionSegment", optional=True),
)
),
),
),
),
)
@redshift_dialect.segment()
class FromPivotExpressionSegment(BaseSegment):
"""A PIVOT expression.
See
https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html
for details.
"""
type = "from_pivot_expression"
match_grammar = Sequence(
"PIVOT",
Bracketed(
Sequence(
OptionallyBracketed(Ref("FunctionSegment")),
Ref("AliasExpressionSegment", optional=True),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(
Delimited(
Sequence(
Ref("ExpressionSegment"),
Ref("AliasExpressionSegment", optional=True),
),
),
),
),
),
)
@redshift_dialect.segment(replace=True)
class DateTimeTypeIdentifier(BaseSegment):
"""A Date Time type."""
type = "datetime_type_identifier"
match_grammar = OneOf(
"DATE",
"DATETIME",
Sequence(
OneOf("TIME", "TIMESTAMP"),
Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True),
),
OneOf("TIMETZ", "TIMESTAMPTZ"),
# INTERVAL types are not Datetime types under Redshift:
# https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html
)
@redshift_dialect.segment(replace=True)
class DatatypeSegment(BaseSegment):
"""A data type segment.
Indicates a data type.
https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html
"""
type = "data_type"
match_grammar = OneOf(
# numeric types
"SMALLINT",
"INT2",
"INTEGER",
"INT",
"INT4",
"BIGINT",
"INT8",
"REAL",
"FLOAT4",
Sequence("DOUBLE", "PRECISION"),
"FLOAT8",
"FLOAT",
# numeric types [precision ["," scale])]
Sequence(
OneOf("DECIMAL", "NUMERIC"),
Bracketed(
Delimited(Ref("NumericLiteralSegment")),
optional=True,
),
),
# character types
OneOf(
Sequence(
OneOf(
"CHAR",
"CHARACTER",
"NCHAR",
"VARCHAR",
Sequence("CHARACTER", "VARYING"),
"NVARCHAR",
),
Bracketed(
OneOf(
Ref("NumericLiteralSegment"),
"MAX",
),
optional=True,
),
),
"BPCHAR",
"TEXT",
),
Sequence(
Ref("DateTimeTypeIdentifier"),
Ref("TimeZoneGrammar", optional=True),
),
# INTERVAL is a data type *only* for conversion operations
"INTERVAL",
# boolean types
OneOf("BOOLEAN", "BOOL"),
# hllsketch type
"HLLSKETCH",
# super type
"SUPER",
# spatial data
"GEOMETRY",
"GEOGRAPHY",
# binary type
Sequence(
OneOf(
"VARBYTE",
"VARBINARY",
Sequence("BINARY", "VARYING"),
),
Bracketed(
Ref("NumericLiteralSegment"),
optional=True,
),
),
)
@redshift_dialect.segment()
class DataFormatSegment(BaseSegment):
"""DataFormat segment.
Indicates data format available for COPY commands.
https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html
"""
type = "data_format_segment"
match_grammar = Sequence(
Sequence(
"FORMAT",
Ref.keyword("AS", optional=True),
optional=True,
),
OneOf(
Sequence(
"CSV",
Sequence(
"QUOTE",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
Sequence(
"SHAPEFILE",
Sequence(
"SIMPLIFY",
Ref.keyword("AUTO", optional=True),
Ref("NumericLiteralSegment", optional=True),
optional=True,
),
),
Sequence(
OneOf("AVRO", "JSON"),
Sequence(
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
"PARQUET",
"ORC",
"RCFILE",
"SEQUENCEFILE",
),
)
@redshift_dialect.segment()
class AuthorizationSegment(BaseSegment):
"""Authorization segment.
Specifies authorization to access data in another AWS resource.
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html
"""
type = "authorization_segment"
match_grammar = AnySetOf(
OneOf(
Sequence(
"IAM_ROLE",
OneOf(
"DEFAULT",
Ref("QuotedLiteralSegment"),
),
),
Sequence(
Ref.keyword("WITH", optional=True),
"CREDENTIALS",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"ACCESS_KEY_ID",
Ref("QuotedLiteralSegment"),
"SECRET_ACCESS_KEY",
Ref("QuotedLiteralSegment"),
Sequence(
"SESSION_TOKEN",
Ref("QuotedLiteralSegment"),
optional=True,
),
),
optional=False,
),
Sequence(
"KMS_KEY_ID",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"MASTER_SYMMETRIC_KEY",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class ColumnAttributeSegment(BaseSegment):
"""Redshift specific column attributes.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "column_attribute_segment"
match_grammar = AnySetOf(
Sequence("DEFAULT", Ref("ExpressionSegment")),
Sequence(
"IDENTITY",
Bracketed(Delimited(Ref("NumericLiteralSegment"))),
),
Sequence(
"GENERATED",
"BY",
"DEFAULT",
"AS",
"IDENTITY",
Bracketed(Delimited(Ref("NumericLiteralSegment"))),
),
Sequence("ENCODE", Ref("ColumnEncodingGrammar")),
"DISTKEY",
"SORTKEY",
Sequence("COLLATE", OneOf("CASE_SENSITIVE", "CASE_INSENSITIVE")),
)
@redshift_dialect.segment(replace=True)
class ColumnConstraintSegment(BaseSegment):
"""Redshift specific column constraints.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "column_constraint_segment"
match_grammar = AnySetOf(
OneOf(Sequence("NOT", "NULL"), "NULL"),
OneOf("UNIQUE", Sequence("PRIMARY", "KEY")),
Sequence(
"REFERENCES",
Ref("TableReferenceSegment"),
Bracketed(Ref("ColumnReferenceSegment"), optional=True),
),
)
@redshift_dialect.segment()
class TableAttributeSegment(BaseSegment):
"""Redshift specific table attributes.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "table_constraint"
match_grammar = AnySetOf(
Sequence("DISTSTYLE", OneOf("AUTO", "EVEN", "KEY", "ALL"), optional=True),
Sequence("DISTKEY", Bracketed(Ref("ColumnReferenceSegment")), optional=True),
OneOf(
Sequence(
OneOf("COMPOUND", "INTERLEAVED", optional=True),
"SORTKEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
),
Sequence("SORTKEY", "AUTO"),
optional=True,
),
Sequence("ENCODE", "AUTO", optional=True),
)
@redshift_dialect.segment(replace=True)
class TableConstraintSegment(BaseSegment):
"""Redshift specific table constraints.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "table_constraint"
match_grammar = AnySetOf(
Sequence("UNIQUE", Bracketed(Delimited(Ref("ColumnReferenceSegment")))),
Sequence(
"PRIMARY",
"KEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
),
Sequence(
"FOREIGN",
"KEY",
Bracketed(Delimited(Ref("ColumnReferenceSegment"))),
"REFERENCES",
Ref("TableReferenceSegment"),
Sequence(Bracketed(Ref("ColumnReferenceSegment"))),
),
)
@redshift_dialect.segment(replace=True)
class LikeOptionSegment(BaseSegment):
"""Like Option Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "like_option_segment"
match_grammar = Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS")
@redshift_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
"""
type = "create_table_statement"
match_grammar = Sequence(
"CREATE",
Ref.keyword("LOCAL", optional=True),
Ref("TemporaryGrammar", optional=True),
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
Bracketed(
OneOf(
# Columns and comment syntax:
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
AnyNumberOf(Ref("ColumnAttributeSegment"), optional=True),
AnyNumberOf(Ref("ColumnConstraintSegment"), optional=True),
),
Ref("TableConstraintSegment", optional=True),
),
Sequence(
"LIKE",
Ref("TableReferenceSegment"),
AnyNumberOf(Ref("LikeOptionSegment"), optional=True),
),
)
),
Sequence("BACKUP", OneOf("YES", "NO", optional=True), optional=True),
AnyNumberOf(Ref("TableAttributeSegment"), optional=True),
)
@redshift_dialect.segment(replace=True)
class CreateTableAsStatementSegment(BaseSegment):
"""A `CREATE TABLE AS` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_AS.html
"""
type = "create_table_as_statement"
match_grammar = Sequence(
"CREATE",
Sequence(
Ref.keyword("LOCAL", optional=True),
OneOf("TEMPORARY", "TEMP"),
optional=True,
),
"TABLE",
Ref("ObjectReferenceSegment"),
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
optional=True,
),
Sequence("BACKUP", OneOf("YES", "NO"), optional=True),
Ref("TableAttributeSegment", optional=True),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
)
@redshift_dialect.segment(replace=True)
class CreateModelStatementSegment(BaseSegment):
"""A `CREATE MODEL` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_MODEL.html
NB: order of keywords matter
"""
type = "create_model_statement"
match_grammar = Sequence(
"CREATE",
"MODEL",
Ref("ObjectReferenceSegment"),
Sequence(
"FROM",
OneOf(
Ref("QuotedLiteralSegment"),
Bracketed(Ref("SelectableGrammar")),
Ref("ObjectReferenceSegment"),
),
optional=True,
),
Sequence(
"TARGET",
Ref("ColumnReferenceSegment"),
optional=True,
),
Sequence(
"FUNCTION",
Ref("ObjectReferenceSegment"),
Bracketed(
Delimited(Ref("DatatypeSegment")),
optional=True,
),
),
Sequence(
"RETURNS",
Ref("DatatypeSegment"),
optional=True,
),
Sequence(
"SAGEMAKER",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"IAM_ROLE",
OneOf(
"DEFAULT",
Ref("QuotedLiteralSegment"),
),
),
Sequence(
"AUTO",
OneOf(
"ON",
"OFF",
),
optional=True,
),
Sequence(
"MODEL_TYPE",
OneOf(
"XGBOOST",
"MLP",
"KMEANS",
),
optional=True,
),
Sequence(
"PROBLEM_TYPE",
OneOf(
"REGRESSION",
"BINARY_CLASSIFICATION",
"MULTICLASS_CLASSIFICATION",
),
optional=True,
),
Sequence(
"OBJECTIVE",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"PREPROCESSORS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"HYPERPARAMETERS",
"DEFAULT",
Sequence(
"EXCEPT",
Bracketed(
Delimited(
Anything(),
),
),
optional=True,
),
optional=True,
),
Sequence(
"SETTINGS",
Bracketed(
Sequence(
"S3_BUCKET",
Ref("QuotedLiteralSegment"),
Sequence(
"KMS_KEY_ID",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"S3_GARBAGE_COLLECT",
OneOf(
"ON",
"OFF",
),
optional=True,
),
Sequence(
"MAX_CELLS",
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"MAX_RUNTIME",
Ref("NumericLiteralSegment"),
optional=True,
),
),
),
optional=True,
),
)
@redshift_dialect.segment()
class ShowModelStatementSegment(BaseSegment):
"""A `SHOW MODEL` statement.
As specified in: https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_MODEL.html
"""
type = "show_model_statement"
match_grammar = Sequence(
"SHOW",
"MODEL",
OneOf(
"ALL",
Ref("ObjectReferenceSegment"),
),
)
@redshift_dialect.segment()
class CreateExternalTableStatementSegment(BaseSegment):
"""A `CREATE EXTERNAL TABLE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "create_external_table_statement"
match_grammar = Sequence(
"CREATE",
"EXTERNAL",
"TABLE",
Ref("TableReferenceSegment"),
Bracketed(
# Columns and comment syntax:
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
),
),
),
Ref("PartitionedBySegment", optional=True),
Sequence(
"ROW",
"FORMAT",
OneOf(
Sequence(
"DELIMITED",
Ref("RowFormatDelimitedSegment"),
),
Sequence(
"SERDE",
Ref("QuotedLiteralSegment"),
Sequence(
"WITH",
"SERDEPROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
),
),
optional=True,
),
"STORED",
"AS",
OneOf(
"PARQUET",
"RCFILE",
"SEQUENCEFILE",
"TEXTFILE",
"ORC",
"AVRO",
Sequence(
"INPUTFORMAT",
Ref("QuotedLiteralSegment"),
"OUTPUTFORMAT",
Ref("QuotedLiteralSegment"),
),
),
"LOCATION",
Ref("QuotedLiteralSegment"),
Sequence(
"TABLE",
"PROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
)
@redshift_dialect.segment()
class CreateExternalTableAsStatementSegment(BaseSegment):
"""A `CREATE EXTERNAL TABLE AS` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "create_external_table_statement"
match_grammar = Sequence(
"CREATE",
"EXTERNAL",
"TABLE",
Ref("TableReferenceSegment"),
Ref("PartitionedBySegment", optional=True),
Sequence(
"ROW",
"FORMAT",
"DELIMITED",
Ref("RowFormatDelimitedSegment"),
optional=True,
),
"STORED",
"AS",
OneOf(
"PARQUET",
"TEXTFILE",
),
"LOCATION",
Ref("QuotedLiteralSegment"),
Sequence(
"TABLE",
"PROPERTIES",
Bracketed(
Delimited(
Sequence(
Ref("QuotedLiteralSegment"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
optional=True,
),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
)
@redshift_dialect.segment()
class CreateLibraryStatementSegment(BaseSegment):
"""A `CREATE LIBRARY` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_LIBRARY.html
"""
type = "create_library_statement"
match_grammar = Sequence(
"CREATE",
Sequence(
"OR",
"REPLACE",
optional=True,
),
"LIBRARY",
Ref("ObjectReferenceSegment"),
"LANGUAGE",
"PLPYTHONU",
"FROM",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
),
)
@redshift_dialect.segment()
class UnloadStatementSegment(BaseSegment):
"""A `UNLOAD` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
"""
type = "unload_statement"
match_grammar = Sequence(
"UNLOAD",
Bracketed(Ref("QuotedLiteralSegment")),
"TO",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref("CompressionTypeGrammar", optional=True),
Sequence(
Sequence(
"FORMAT",
Ref.keyword("AS", optional=True),
optional=True,
),
OneOf(
"CSV",
"JSON",
"PARQUET",
),
optional=True,
),
Sequence(
"PARTITION",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
Ref.keyword("INCLUDE", optional=True),
),
Sequence(
"PARALLEL",
OneOf(
"PRESET",
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
OneOf(
Sequence(
"DELIMITER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"FIXEDWIDTH",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Sequence(
"MANIFEST",
Ref.keyword("VERBOSE", optional=True),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
AnySetOf(
OneOf(
"MAXFILESIZE",
"ROWGROUPSIZE",
),
Ref.keyword("AS", optional=True),
Ref("NumericLiteralSegment"),
OneOf(
"MB",
"GB",
),
optional=True,
),
Sequence(
"ENCRYPTED",
Ref.keyword("AUTO", optional=True),
optional=True,
),
Ref.keyword("ALLOWOVERWRITE", optional=True),
Ref.keyword("CLEANPATH", optional=True),
Ref.keyword("ESCAPE", optional=True),
Ref.keyword("ADDQUOTES", optional=True),
Ref.keyword("HEADER", optional=True),
),
)
@redshift_dialect.segment(replace=True)
class CopyStatementSegment(
postgres_dialect.get_segment("CopyStatementSegment") # type: ignore
):
"""A `COPY` statement.
:
- https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
- https://docs.aws.amazon.com/redshift/latest/dg/r_COPY-parameters.html
"""
type = "copy_statement"
match_grammar = Sequence(
"COPY",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
"FROM",
Ref("QuotedLiteralSegment"),
AnySetOf(
Ref("AuthorizationSegment", optional=False),
Sequence(
"REGION",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref("CompressionTypeGrammar", optional=True),
Ref("DataFormatSegment", optional=True),
OneOf(
Sequence(
"DELIMITER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
Sequence(
"FIXEDWIDTH",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Sequence(
"ENCRYPTED",
Ref.keyword("AUTO", optional=True),
optional=True,
),
Ref.keyword("MANIFEST", optional=True),
Sequence(
"COMPROWS",
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"MAXERROR",
Ref.keyword("AS", optional=True),
Ref("NumericLiteralSegment"),
optional=True,
),
Sequence(
"COMPUPDATE",
OneOf(
"PRESET",
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
Sequence(
"STATUPDATE",
OneOf(
"ON",
"OFF",
"TRUE",
"FALSE",
optional=True,
),
optional=True,
),
Ref.keyword("NOLOAD", optional=True),
Ref.keyword("ACCEPTANYDATE", optional=True),
Sequence(
"ACCEPTINVCHARS",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Ref.keyword("BLANKSASNULL", optional=True),
Sequence(
"DATEFORMAT",
Ref.keyword("AS", optional=True),
OneOf(
"AUTO",
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Ref.keyword("EMPTYASNULL", optional=True),
Sequence(
"ENCODING",
Ref.keyword("AS", optional=True),
OneOf(
"UTF8",
"UTF16",
"UTF16BE",
"UTF16LE",
),
optional=True,
),
Ref.keyword("ESCAPE", optional=True),
Ref.keyword("EXPLICIT_IDS", optional=True),
Ref.keyword("FILLRECORD", optional=True),
Ref.keyword("IGNOREBLANKLINES", optional=True),
Sequence(
"IGNOREHEADER",
Ref.keyword("AS", optional=True),
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"NULL",
"AS",
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
"READRATIO",
Ref("NumericLiteralSegment"),
optional=True,
),
Ref.keyword("REMOVEQUOTES", optional=True),
Ref.keyword("ROUNDEC", optional=True),
Sequence(
"TIMEFORMAT",
Ref.keyword("AS", optional=True),
OneOf(
"AUTO",
"EPOCHSECS",
"EPOCHMILLISECS",
Ref("QuotedLiteralSegment"),
),
optional=True,
),
Ref.keyword("TRIMBLANKS", optional=True),
Ref.keyword("TRUNCATECOLUMNS", optional=True),
),
)
@redshift_dialect.segment(replace=True)
class InsertStatementSegment(BaseSegment):
"""An`INSERT` statement.
Redshift has two versions of insert statements:
- https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_30.html
- https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_external_table.html
"""
# TODO: This logic can be streamlined. However, there are some odd parsing issues.
# See https://github.com/sqlfluff/sqlfluff/pull/1896
type = "insert_statement"
match_grammar = Sequence(
"INSERT",
"INTO",
Ref("TableReferenceSegment"),
OneOf(
OptionallyBracketed(Ref("SelectableGrammar")),
Sequence("DEFAULT", "VALUES"),
Sequence(
Ref("BracketedColumnReferenceListGrammar", optional=True),
OneOf(
Ref("ValuesClauseSegment"),
OptionallyBracketed(Ref("SelectableGrammar")),
),
),
),
)
@redshift_dialect.segment(replace=True)
class CreateSchemaStatementSegment(BaseSegment):
"""A `CREATE SCHEMA` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_SCHEMA.html
TODO: support optional SCHEMA_ELEMENT
"""
type = "create_schema_statement"
match_grammar = Sequence(
"CREATE",
"SCHEMA",
OneOf(
Sequence(
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
Sequence(
"AUTHORIZATION",
Ref("ObjectReferenceSegment"),
optional=True,
),
),
Sequence(
"AUTHORIZATION",
Ref("ObjectReferenceSegment"),
),
),
Sequence(
"QUOTA",
OneOf(
Sequence(
Ref("NumericLiteralSegment"),
OneOf(
"MB",
"GB",
"TB",
),
),
"UNLIMITED",
),
optional=True,
),
)
@redshift_dialect.segment()
class ProcedureParameterListSegment(BaseSegment):
"""The parameters for a procedure.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_PROCEDURE.html
"""
type = "procedure_parameter_list"
# Odd syntax, but prevents eager parameters being confused for data types
_param_type = OneOf("REFCURSOR", Ref("DatatypeSegment"))
match_grammar = Bracketed(
Sequence(
AnyNumberOf(
OneOf(
Ref("ParameterNameSegment"),
exclude=OneOf(_param_type, Ref("ArgModeGrammar")),
optional=True,
),
Ref("ArgModeGrammar", optional=True),
max_times_per_element=1,
),
_param_type,
AnyNumberOf(
Sequence(
Ref("CommaSegment"),
AnyNumberOf(
OneOf(
Ref("ParameterNameSegment"),
exclude=OneOf(_param_type, Ref("ArgModeGrammar")),
optional=True,
),
Ref("ArgModeGrammar", optional=True),
max_times_per_element=1,
),
_param_type,
),
),
optional=True,
),
)
@redshift_dialect.segment(replace=True)
class CreateProcedureStatementSegment(BaseSegment):
"""A `CREATE PROCEDURE` statement.
https://www.postgresql.org/docs/14/sql-createprocedure.html
TODO: Just a basic statement for now, without full syntax.
based on CreateFunctionStatementSegment without a return type.
"""
type = "create_procedure_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
"PROCEDURE",
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment"),
Ref("FunctionDefinitionGrammar"),
)
@redshift_dialect.segment()
class AlterProcedureStatementSegment(BaseSegment):
"""An `ALTER PROCEDURE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_PROCEDURE.html
"""
type = "alter_procedure_statement"
match_grammar = Sequence(
"ALTER",
"PROCEDURE",
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment", optional=True),
OneOf(
Sequence("RENAME", "TO", Ref("FunctionNameSegment")),
Sequence(
"OWNER",
"TO",
OneOf(
OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")),
"CURRENT_USER",
"SESSION_USER",
),
),
),
)
@redshift_dialect.segment(replace=True)
class DropProcedureStatementSegment(BaseSegment):
"""An `DROP PROCEDURE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_PROCEDURE.html
"""
type = "drop_procedure_statement"
match_grammar = Sequence(
"DROP",
"PROCEDURE",
Ref("IfExistsGrammar", optional=True),
Delimited(
Sequence(
Ref("FunctionNameSegment"),
Ref("ProcedureParameterListSegment", optional=True),
),
),
)
@redshift_dialect.segment()
class DeclareStatementSegment(BaseSegment):
"""A `DECLARE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/declare.html
"""
type = "declare_statement"
match_grammar = Sequence(
"DECLARE",
Ref("ObjectReferenceSegment"),
"CURSOR",
"FOR",
Ref("SelectableGrammar"),
)
@redshift_dialect.segment()
class FetchStatementSegment(BaseSegment):
"""A `FETCH` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/fetch.html
"""
type = "fetch_statement"
match_grammar = Sequence(
"fetch",
OneOf(
"NEXT",
"ALL",
Sequence(
"FORWARD",
OneOf(
"ALL",
Ref("NumericLiteralSegment"),
),
),
),
"FROM",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class CloseStatementSegment(BaseSegment):
"""A `CLOSE` statement.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/close.html
"""
type = "close_statement"
match_grammar = Sequence(
"CLOSE",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class AltereDatashareStatementSegment(BaseSegment):
"""An `ALTER DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DATASHARE.html
"""
type = "create_datashare_statement"
match_grammar = Sequence(
"ALTER",
"DATASHARE",
Ref("ObjectReferenceSegment"),
OneOf(
# add or remove objects to the datashare
Sequence(
OneOf(
"ADD",
"REMOVE",
),
OneOf(
Sequence(
"TABLE",
Delimited(Ref("TableReferenceSegment")),
),
Sequence(
"SCHEMA",
Delimited(Ref("SchemaReferenceSegment")),
),
Sequence(
"FUNCTION",
Delimited(Ref("FunctionNameSegment")),
),
Sequence(
"ALL",
OneOf("TABLES", "FUNCTIONS"),
"IN",
"SCHEMA",
Delimited(Ref("SchemaReferenceSegment")),
),
),
),
# configure the properties of the datashare
Sequence(
"SET",
OneOf(
Sequence(
"PUBLICACCESSIBLE",
Ref("EqualsSegment", optional=True),
Ref("BooleanLiteralGrammar"),
),
Sequence(
"INCLUDENEW",
Ref("EqualsSegment", optional=True),
Ref("BooleanLiteralGrammar"),
"FOR",
"SCHEMA",
Ref("SchemaReferenceSegment"),
),
),
),
),
)
@redshift_dialect.segment()
class CreateDatashareStatementSegment(BaseSegment):
"""A `CREATE DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_DATASHARE.html
"""
type = "create_datashare_statement"
match_grammar = Sequence(
"CREATE",
"DATASHARE",
Ref("ObjectReferenceSegment"),
Sequence(
Ref.keyword("SET", optional=True),
"PUBLICACCESSIBLE",
Ref("EqualsSegment", optional=True),
OneOf(
"TRUE",
"FALSE",
),
optional=True,
),
)
@redshift_dialect.segment()
class DescDatashareStatementSegment(BaseSegment):
"""A `DESC DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DESC_DATASHARE.html
"""
type = "desc_datashare_statement"
match_grammar = Sequence(
"DESC",
"DATASHARE",
Ref("ObjectReferenceSegment"),
Sequence(
"OF",
Sequence(
"ACCOUNT",
Ref("QuotedLiteralSegment"),
optional=True,
),
"NAMESPACE",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class DropDatashareStatementSegment(BaseSegment):
"""A `DROP DATASHARE` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_DATASHARE.html
"""
type = "drop_datashare_statement"
match_grammar = Sequence(
"DROP",
"DATASHARE",
Ref("ObjectReferenceSegment"),
)
@redshift_dialect.segment()
class ShowDatasharesStatementSegment(BaseSegment):
"""A `SHOW DATASHARES` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_DATASHARES.html
"""
type = "show_datashares_statement"
match_grammar = Sequence(
"SHOW",
"DATASHARES",
Sequence(
"LIKE",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
@redshift_dialect.segment()
class AnalyzeCompressionStatementSegment(BaseSegment):
"""An `ANALYZE COMPRESSION` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ANALYZE_COMPRESSION.html
"""
type = "analyze_compression_statement"
match_grammar = Sequence(
OneOf("ANALYZE", "ANALYSE"),
"COMPRESSION",
Sequence(
Ref("TableReferenceSegment"),
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
optional=True,
),
Sequence(
"COMPROWS",
Ref("NumericLiteralSegment"),
optional=True,
),
optional=True,
),
)
@redshift_dialect.segment()
class VacuumStatementSegment(BaseSegment):
"""A `VACUUM` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_VACUUM_command.html
"""
type = "vacuum_statement"
match_grammar = Sequence(
"VACUUM",
OneOf(
"FULL",
"REINDEX",
"RECLUSTER",
Sequence(
OneOf(
"SORT",
"DELETE",
),
"ONLY",
),
optional=True,
),
Ref("TableReferenceSegment", optional=True),
Sequence(
"TO",
Ref("NumericLiteralSegment"),
"PERCENT",
optional=True,
),
Ref.keyword("BOOST", optional=True),
)
# Adding Redshift specific statements
@redshift_dialect.segment(replace=True)
class StatementSegment(
postgres_dialect.get_segment("StatementSegment") # type: ignore
):
"""A generic segment, to any of its child subsegments."""
type = "statement"
parse_grammar = redshift_dialect.get_segment("StatementSegment").parse_grammar.copy(
insert=[
Ref("CreateLibraryStatementSegment"),
Ref("CreateUserStatementSegment"),
Ref("CreateGroupStatementSegment"),
Ref("AlterUserStatementSegment"),
Ref("AlterGroupStatementSegment"),
Ref("CreateExternalTableAsStatementSegment"),
Ref("CreateExternalTableStatementSegment"),
Ref("DataFormatSegment"),
Ref("UnloadStatementSegment"),
Ref("CopyStatementSegment"),
Ref("ShowModelStatementSegment"),
Ref("CreateDatashareStatementSegment"),
Ref("DescDatashareStatementSegment"),
Ref("DropDatashareStatementSegment"),
Ref("ShowDatasharesStatementSegment"),
Ref("AltereDatashareStatementSegment"),
Ref("DeclareStatementSegment"),
Ref("FetchStatementSegment"),
Ref("CloseStatementSegment"),
Ref("AnalyzeCompressionStatementSegment"),
Ref("VacuumStatementSegment"),
Ref("AlterProcedureStatementSegment"),
],
)
match_grammar = redshift_dialect.get_segment(
"StatementSegment"
).match_grammar.copy()
@redshift_dialect.segment()
class PartitionedBySegment(BaseSegment):
"""Partitioned By Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "partitioned_by_segment"
match_grammar = Sequence(
Ref.keyword("PARTITIONED"),
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("DatatypeSegment"),
),
),
),
)
@redshift_dialect.segment()
class RowFormatDelimitedSegment(BaseSegment):
"""Row Format Delimited Segment.
As specified in
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html
"""
type = "row_format_deimited_segment"
match_grammar = AnySetOf(
Sequence(
"FIELDS",
"TERMINATED",
"BY",
Ref("QuotedLiteralSegment"),
),
Sequence(
"LINES",
"TERMINATED",
"BY",
Ref("QuotedLiteralSegment"),
),
optional=True,
)
@redshift_dialect.segment()
class CreateUserStatementSegment(BaseSegment):
"""`CREATE USER` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html
"""
type = "create_user"
match_grammar = Sequence(
"CREATE",
"USER",
Ref("ObjectReferenceSegment"),
Ref.keyword("WITH", optional=True),
"PASSWORD",
OneOf(Ref("QuotedLiteralSegment"), "DISABLE"),
AnySetOf(
OneOf(
"CREATEDB",
"NOCREATEDB",
),
OneOf(
"CREATEUSER",
"NOCREATEUSER",
),
Sequence(
"SYSLOG",
"ACCESS",
OneOf(
"RESTRICTED",
"UNRESTRICTED",
),
),
Sequence("IN", "GROUP", Delimited(Ref("ObjectReferenceSegment"))),
Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")),
Sequence(
"CONNECTION",
"LIMIT",
OneOf(
Ref("NumericLiteralSegment"),
"UNLIMITED",
),
),
Sequence(
"SESSION",
"TIMEOUT",
Ref("NumericLiteralSegment"),
),
),
)
@redshift_dialect.segment()
class CreateGroupStatementSegment(BaseSegment):
"""`CREATE GROUP` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_GROUP.html
"""
type = "create_group"
match_grammar = Sequence(
"CREATE",
"GROUP",
Ref("ObjectReferenceSegment"),
Sequence(
Ref.keyword("WITH", optional=True),
"USER",
Delimited(
Ref("ObjectReferenceSegment"),
),
optional=True,
),
)
@redshift_dialect.segment()
class AlterUserStatementSegment(BaseSegment):
"""`ALTER USER` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_USER.html
"""
type = "alter_user"
match_grammar = Sequence(
"ALTER",
"USER",
Ref("ObjectReferenceSegment"),
Ref.keyword("WITH", optional=True),
AnySetOf(
OneOf(
"CREATEDB",
"NOCREATEDB",
),
OneOf(
"CREATEUSER",
"NOCREATEUSER",
),
Sequence(
"SYSLOG",
"ACCESS",
OneOf(
"RESTRICTED",
"UNRESTRICTED",
),
),
Sequence(
"PASSWORD",
OneOf(
Ref("QuotedLiteralSegment"),
"DISABLE",
),
Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment"), optional=True),
),
Sequence(
"RENAME",
"TO",
Ref("ObjectReferenceSegment"),
),
Sequence(
"CONNECTION",
"LIMIT",
OneOf(
Ref("NumericLiteralSegment"),
"UNLIMITED",
),
),
OneOf(
Sequence(
"SESSION",
"TIMEOUT",
Ref("NumericLiteralSegment"),
),
Sequence(
"RESET",
"SESSION",
"TIMEOUT",
),
),
OneOf(
Sequence(
"SET",
Ref("ObjectReferenceSegment"),
OneOf(
"TO",
Ref("EqualsSegment"),
),
OneOf(
"DEFAULT",
Ref("LiteralGrammar"),
),
),
Sequence(
"RESET",
Ref("ObjectReferenceSegment"),
),
),
min_times=1,
),
)
@redshift_dialect.segment()
class AlterGroupStatementSegment(BaseSegment):
"""`ALTER GROUP` statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_GROUP.html
"""
type = "alter_group"
match_grammar = Sequence(
"ALTER",
"GROUP",
Ref("ObjectReferenceSegment"),
OneOf(
Sequence(
OneOf("ADD", "DROP"),
"USER",
Delimited(
Ref("ObjectReferenceSegment"),
),
),
Sequence(
"RENAME",
"TO",
Ref("ObjectReferenceSegment"),
),
),
)
@redshift_dialect.segment(replace=True)
class TransactionStatementSegment(BaseSegment):
"""A `BEGIN|START`, `COMMIT|END` or `ROLLBACK|ABORT` transaction statement.
https://docs.aws.amazon.com/redshift/latest/dg/r_BEGIN.html
"""
type = "transaction_statement"
match_grammar = Sequence(
OneOf("BEGIN", "START", "COMMIT", "END", "ROLLBACK", "ABORT"),
OneOf("TRANSACTION", "WORK", optional=True),
Sequence(
"ISOLATION",
"LEVEL",
OneOf(
"SERIALIZABLE",
Sequence("READ", "COMMITTED"),
Sequence("READ", "UNCOMMITTED"),
Sequence("REPEATABLE", "READ"),
),
optional=True,
),
OneOf(
Sequence("READ", "ONLY"),
Sequence("READ", "WRITE"),
optional=True,
),
)
|
import os
import cv2
import imutils
import numpy as np
from imutils import contours
from imutils import perspective
from scipy.spatial import distance as dist
def detect_shape(filepath, min_width=15, debug=False):
image = cv2.imread(filepath, 0)
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
'''
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
'''
gray = cv2.bilateralFilter(resized, 1, 10, 120 )
edges = cv2.Canny( gray, 10, 250 )
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
closed = cv2.morphologyEx( edges, cv2.MORPH_CLOSE, kernel )
'''
cnts = cv2.findContours( closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )
gray = cv2.GaussianBlur(resized, (7, 7), 0)
edged = cv2.Canny(gray, 10, 250)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
'''
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
shapes = dict()
print(len(cnts))
for idx, c in enumerate(cnts):
try :
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * perimeter, True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
shapes["rect_{}".format(idx)] = (x, y, w, h)
if(debug == True):
M = cv2.moments(c)
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
c = c.astype("float")
c *= ratio
c = c.astype("int")
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.putText(image, "square", (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 300,300)
cv2.imshow("image", image)
cv2.waitKey(0)
except :
pass
return shapes
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def min_dif(list1, list2):
min_d, ind = 1000000, -1
for i in range(0, len(list1)):
for j in range(0, len(list2)):
if(list1[i]-list2[j] < min_d):
ind = j
min_d = list1[i]-list2[j]
return ind
def object_size(filepath, left_width=15):
image = cv2.imread(filepath, 0)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(image, (7, 7), 0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# NOTE : Contour - Outlines
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
dimensions = list()
for c in cnts:
if cv2.contourArea(c) < 100:
continue
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
if pixelsPerMetric is None:
pixelsPerMetric = dB / left_width
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
cv2.putText(orig, "{:.1f}in".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}in".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 300,300)
cv2.imshow("image", orig)
cv2.waitKey(0)
dimensions.append((dimA, dimB))
max_dim = [-1, -1]
for dims in dimensions:
if(dims[0] * dims[1] > max_dim[0] * max_dim[1] and left_width not in dims):
max_dim[0] = dims[0]
max_dim[1] = dims[1]
return max_dim
def weight(file1, file2, left_width=21, const_div=6000.0): # left_width = A4 Size
size1 = object_size(file1, left_width)
size2 = object_size(file2, left_width)
rem_ind = min_dif(size1, size2)
weight = (size1[0] * size1[1] * size2[1-rem_ind]) / const_div
return weight
if __name__ == '__main__':
print(detect_shape("img.jpg", debug=True))
|
import numpy as np
def euclidean_distance(p1,p2):
"""
returns euclidean distance between matrices
@params:
p1, p2: np.ndarray
matrices to perform operation to.
"""
return np.sqrt(np.sum((p1-p2)**2, axis=1))
def entropy(p):
"""
Will be our measurement for uncertainty in our construction
of descision tree
@params:
p: float
"""
if p == 0:
return 0
elif p == 1:
return 0
else:
return -(p * np.log2(p) + (1 - p) * np.log2(1 - p))
def information_gain(left_child, right_child):
"""
measurement of how much info we gained when splitting a node
using our entropy method.
@def:
takes in a list of classes from left and right child to return
the information gain of our curr split
@params:
left_child: np.ndarray
curr left child arr
right_child: np.ndarray
curr left child arr
"""
parent = left_child + right_child
p_par = parent.count(1) / len(parent) if len(parent) > 0 else 0
p_left = left_child.count(1) / len(left_child) if len(left_child) \
> 0 else 0
p_right = right_child.count(1) / len(right_child) if len(right_child) \
> 0 else 0
infogain_p = self.entropy(p_par)
infogain_l = self.entropy(p_left)
infogain_r = self.entropy(p_right)
return infogain_p - len(left_child) / len(parent) * infogain_l - \
len(right_child) / len(parent) * infogain_r
|
from django.contrib import admin
from friends.models import FriendRequest
# Register your models here.
admin.site.register(FriendRequest)
|
import _plotly_utils.basevalidators
class ConnectgapsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="connectgaps", parent_name="scattermapbox", **kwargs
):
super(ConnectgapsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
#!/bin/python3
__author__ = "Adam Karl"
"""Find the sum of all primes less than or equal to N"""
#https://projecteuler.net/problem=10
from math import sqrt
isPrime = []
def sieve(n):
"""fills isPrime array with booleans for whether the number at isPrime[i] is prime or not"""
"""uses a process known as the sieve of eratosthenes"""
global isPrime
isPrime = [True for i in range(n+1)] #for numbers from 0 to n inclusive
isPrime[0] = False
isPrime[1] = False
index = 2
while index <= n:
if isPrime[index]: #found a prime number
multiplier = 2
while index * multiplier <= n:
isPrime[index * multiplier] = False #all multiples of the prime are not prime
multiplier += 1
index += 1
return isPrime
def sumPrimes(n):
"""given a list of n booleans on whether an index is prime or not,
return the sum of all primes <= index"""
s = 0
for index in range(1, n+1):
if isPrime[index]:
s += index
return s
def main():
print("Find the sum of all primes below: ", end="")
n = int(input().strip())
isPrime = sieve(n) #generate isPrime
print("Sum = %d" % sumPrimes(n))
if __name__ == "__main__":
main()
|
import torch
from syft.generic import object_storage
def test_clear_objects():
obj_storage = object_storage.ObjectStorage()
x = torch.tensor(1)
obj_storage.set_obj(x)
objs = obj_storage.current_objects()
assert len(objs) == 1
assert objs[x.id] == x
ret_val = obj_storage.clear_objects()
objs = obj_storage.current_objects()
assert len(objs) == 0
assert ret_val == obj_storage
def test_clear_objects_return_None():
obj_storage = object_storage.ObjectStorage()
x = torch.tensor(1)
obj_storage.set_obj(x)
objs = obj_storage.current_objects()
assert len(objs) == 1
assert objs[x.id] == x
ret_val = obj_storage.clear_objects(return_self=False)
objs = obj_storage.current_objects()
assert len(objs) == 0
assert ret_val is None
|
from ApiManager.utils.operation import add_project_data, add_module_data, add_case_data, add_config_data, \
add_register_data, bulk_import_data
from ApiManager.models import ModuleInfo
import yaml
'''前端test信息转字典'''
def key_value_dict(mode=3, **kwargs):
if not kwargs:
return None
sorted_kwargs = sorted(kwargs.items())
kwargs.clear()
if mode == 3:
half_index = len(sorted_kwargs) // 3
for value in range(half_index):
key = sorted_kwargs[value][1]
data_type = sorted_kwargs[value + 2 * half_index][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
try:
if data_type == 'string':
value = str(value)
elif data_type == 'float':
value = float(value)
elif data_type == 'int':
value = int(value)
else:
value = bool(value)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
if key != '' and value != '':
kwargs.setdefault(key, value)
else:
half_index = len(sorted_kwargs) // 2
for value in range(half_index):
key = sorted_kwargs[value][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
kwargs.setdefault(key, value)
return kwargs
'''前端test信息转列表'''
def key_value_list(mode=4, **kwargs):
if not kwargs:
return None
sorted_kwargs = sorted(kwargs.items())
lists = []
if mode == 4:
half_index = len(sorted_kwargs) // 4
for value in range(half_index):
check = sorted_kwargs[value][1]
expected = sorted_kwargs[value + half_index][1]
comparator = sorted_kwargs[value + 2 * half_index][1]
data_type = sorted_kwargs[value + 3 * half_index][1]
if check != '' and expected != '':
try:
if data_type == 'string':
expected = str(expected)
elif data_type == 'float':
expected = float(expected)
elif data_type == 'int':
expected = int(expected)
else:
expected = bool(expected)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
lists.append({'check': check, 'comparator': comparator, 'expected': expected})
elif mode == 3:
half_index = len(sorted_kwargs) // 3
for value in range(half_index):
key = sorted_kwargs[value][1]
data_type = sorted_kwargs[value + 2 * half_index][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
try:
if data_type == 'string':
value = str(value)
elif data_type == 'float':
value = float(value)
elif data_type == 'int':
value = int(value)
else:
value = bool(value)
except ValueError: # 如果类型转换失败,默认字符串保存
pass
lists.append({key: value})
else:
half_index = len(sorted_kwargs) // 2
for value in range(half_index):
key = sorted_kwargs[value][1]
value = sorted_kwargs[half_index + value][1]
if key != '' and value != '':
lists.append({key: value})
if not lists:
return None
return lists
'''动态加载模块'''
def load_modules(**kwargs):
belong_project = kwargs.get('name').get('project')
module_info = list(ModuleInfo.objects.get_module_info(belong_project))
string = ''
for value in module_info:
string = string + value + 'replaceFlag'
return string[:len(string) - 11]
'''模块信息逻辑及落地'''
def module_info_logic(type=True, **kwargs):
if kwargs.get('module_name') is '':
return '模块名称不能为空'
if kwargs.get('belong_project') is '':
return '请先添加项目'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
if kwargs.get('lifting_time') is '':
return '提测时间不能为空'
return add_module_data(type, **kwargs)
'''项目信息逻辑及落地'''
def project_info_logic(type=True, **kwargs):
if kwargs.get('project_name') is '':
return '项目名称不能为空'
if kwargs.get('responsible_name') is '':
return '负责人不能为空'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
if kwargs.get('dev_user') is '':
return '开发人员不能为空'
if kwargs.get('publish_app') is '':
return '发布应用不能为空'
return add_project_data(type, **kwargs)
'''用例信息逻辑及落地'''
def case_info_logic(type=True, **kwargs):
test = kwargs.pop('test')
'''
动态展示模块
'''
if 'request' not in test.keys():
return load_modules(**test)
else:
if test.get('name').get('case_name') is '':
return '用例名称不可为空'
if test.get('name').get('project') is None or test.get('name').get('project') is '':
return '请先添加项目'
if test.get('name').get('module') is None or test.get('name').get('module') is '':
return '请先添加模块'
if test.get('name').get('author') is '':
return '创建者不能为空'
if test.get('request').get('url') is '':
return '接口地址不能为空'
if not test.get('validate'):
return '至少需要一个结果校验!'
name = test.pop('name')
test.setdefault('name', name.pop('case_name'))
test.setdefault('case_info', name)
validate = test.pop('validate')
test.setdefault('validate', key_value_list(**validate))
extract = test.pop('extract')
if extract:
test.setdefault('extract', key_value_list(mode=2, **extract))
request_data = test.get('request').pop('request_data')
date_type = test.get('request').pop('type')
if request_data and date_type:
test.get('request').setdefault(date_type, key_value_dict(**request_data))
headers = test.get('request').pop('headers')
if headers:
test.get('request').setdefault('headers', key_value_dict(mode=2, **headers))
variables = test.pop('variables')
if variables:
test.setdefault('variables', key_value_list(mode=3, **variables))
setup = test.pop('setup')
if setup:
test.setdefault('setup', key_value_list(mode=2, **setup))
teardown = test.pop('teardown')
if teardown:
test.setdefault('teardown', key_value_list(mode=2, **teardown))
kwargs.setdefault('test', test)
return add_case_data(type, **kwargs)
'''模块信息逻辑及落地'''
def config_info_logic(type=True, **kwargs):
config = kwargs.pop('config')
'''
动态展示模块
'''
if 'request' not in config.keys():
return load_modules(**config)
else:
if config.get('name').get('config_name') is '':
return '配置名称不可为空'
if config.get('name').get('project') is None or config.get('name').get('project') is '':
return '请先添加项目'
if config.get('name').get('config_module') is None or config.get('name').get('config_module') is '':
return '请先添加模块'
if config.get('name').get('config_author') is '':
return '创建者不能为空'
name = config.pop('name')
config.setdefault('name', name.pop('config_name'))
config.setdefault('config_info', name)
request_data = config.get('request').pop('request_data')
data_type = config.get('request').pop('type')
if request_data and data_type:
config.get('request').setdefault(data_type, key_value_dict(**request_data))
headers = config.get('request').pop('headers')
if headers:
config.get('request').setdefault('headers', key_value_dict(mode=2, **headers))
variables = config.pop('variables')
if variables:
config.setdefault('variables', key_value_list(mode=3, **variables))
kwargs.setdefault('config', config)
return add_config_data(type, **kwargs)
'''查询session'''
def set_filter_session(request):
filter_query = {'filter': '1', 'user': '', 'name': ''}
if request.method == 'POST':
request.session['filter'] = request.POST.get('filter')
request.session['user'] = request.POST.get('user')
request.session['name'] = request.POST.get('name')
try:
filter_query = {'filter': request.session['filter'], 'user': request.session['user'],
'name': request.session['name']}
except KeyError:
pass
return filter_query
'''ajax异步提示'''
def get_ajax_msg(msg, success):
if msg is 'ok':
return success
else:
return msg
'''注册信息逻辑判断'''
def register_info_logic(**kwargs):
return add_register_data(**kwargs)
'''上传yml文件内容转列表'''
def yml_parser(file_path):
with open(file_path, 'r') as f:
s = yaml.load(f)
data = {'case_info': s}
bulk_import_data(**data)
return s
|
import tensorflow as tf
#from tensorflow.python.ops.rnn_cell import *
#from tensorflow.python.ops.rnn_cell_impl import _Linear
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import *
#from tensorflow import keras
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
#from keras import backend as K
def din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print ("query_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
if mask is not None:
mask = tf.equal(mask, tf.ones_like(mask))
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
class VecAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)
pos, neg = 0., 0.
for record in arr:
if record[1] == 1.:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
for record in arr:
if record[1] == 1.:
tp += 1
else:
fp += 1
xy_arr.append([fp/neg, tp/pos])
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def calc_gauc(raw_arr, nick_index):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
last_index = 0
gauc = 0.
pv_sum = 0
for idx in xrange(len(nick_index)):
if nick_index[idx] != nick_index[last_index]:
input_arr = raw_arr[last_index:idx]
auc_val=calc_auc(input_arr)
if auc_val >= 0.0:
gauc += auc_val * len(input_arr)
pv_sum += len(input_arr)
else:
pv_sum += len(input_arr)
last_index = idx
return gauc / pv_sum
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
|
import sys
# Alternatively just load env variables via your env/bin/activate script
if sys.platform.startswith('darwin') or sys.platform.startswith('win'):
import json
path = "Gigger/utilities/env_local.json"
with open(path) as json_file:
global CONFIG
CONFIG = json.load(json_file)
else:
import os
global CONFIG
CONFIG = {
"DEPLOYMENT": os.environ['DEPLOYMENT'],
"DB": {
"HOST": os.environ['DB_HOST'],
"USER": os.environ['DB_USER'],
"PW": os.environ['DB_PW'],
"SCHEMA": os.environ['DB_SCHEMA'],
},
"AWS": True,
"FB_APP_ID": os.environ['FB_APP_ID']
}
|
import logging
import six
import ddtrace
from ddtrace.compat import StringIO
from ddtrace.constants import ENV_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.contrib.logging import patch
from ddtrace.contrib.logging import unpatch
from ddtrace.contrib.logging.patch import RECORD_ATTR_SPAN_ID
from ddtrace.contrib.logging.patch import RECORD_ATTR_TRACE_ID
from ddtrace.vendor import wrapt
from tests.utils import TracerTestCase
logger = logging.getLogger()
logger.level = logging.INFO
DEFAULT_FORMAT = (
"%(message)s - dd.service=%(dd.service)s dd.version=%(dd.version)s dd.env=%(dd.env)s"
" dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s"
)
def current_span(tracer=None):
if not tracer:
tracer = ddtrace.tracer
return tracer.current_span()
class AssertFilter(logging.Filter):
def filter(self, record):
trace_id = getattr(record, RECORD_ATTR_TRACE_ID)
assert isinstance(trace_id, six.string_types)
span_id = getattr(record, RECORD_ATTR_SPAN_ID)
assert isinstance(span_id, six.string_types)
return True
def capture_function_log(func, fmt=DEFAULT_FORMAT, logger_override=None):
if logger_override is not None:
logger_to_capture = logger_override
else:
logger_to_capture = logger
# add stream handler to capture output
out = StringIO()
sh = logging.StreamHandler(out)
try:
formatter = logging.Formatter(fmt)
sh.setFormatter(formatter)
logger_to_capture.addHandler(sh)
assert_filter = AssertFilter()
logger_to_capture.addFilter(assert_filter)
result = func()
finally:
logger_to_capture.removeHandler(sh)
logger_to_capture.removeFilter(assert_filter)
return out.getvalue().strip(), result
class LoggingTestCase(TracerTestCase):
def setUp(self):
patch()
super(LoggingTestCase, self).setUp()
def tearDown(self):
unpatch()
super(LoggingTestCase, self).tearDown()
def test_patch(self):
"""
Confirm patching was successful
"""
log = logging.getLogger()
self.assertTrue(isinstance(log.makeRecord, wrapt.BoundFunctionWrapper))
unpatch()
log = logging.getLogger()
self.assertFalse(isinstance(log.makeRecord, wrapt.BoundFunctionWrapper))
def _test_logging(self, create_span, service="", version="", env=""):
def func():
span = create_span()
logger.info("Hello!")
if span:
span.finish()
return span
with self.override_config("logging", dict(tracer=self.tracer)):
# with format string for trace info
output, span = capture_function_log(func)
trace_id = 0
span_id = 0
if span:
trace_id = span.trace_id
span_id = span.span_id
assert output == "Hello! - dd.service={} dd.version={} dd.env={} dd.trace_id={} dd.span_id={}".format(
service, version, env, trace_id, span_id
)
# without format string
output, _ = capture_function_log(func, fmt="%(message)s")
assert output == "Hello!"
def test_log_trace(self):
"""
Check logging patched and formatter including trace info
"""
def create_span():
return self.tracer.trace("test.logging")
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_trace_service(self):
def create_span():
return self.tracer.trace("test.logging", service="logging")
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TAGS="service:ddtagservice,env:ddenv,version:ddversion"))
def test_log_DD_TAGS(self):
def create_span():
return self.tracer.trace("test.logging")
self._test_logging(create_span=create_span, service="ddtagservice", version="ddversion", env="ddenv")
def test_log_trace_version(self):
def create_span():
span = self.tracer.trace("test.logging")
span.set_tag(VERSION_KEY, "manual.version")
return span
self._test_logging(create_span=create_span, version="")
# Setting global config version and overriding with span specific value
# We always want the globals in the logs
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_trace_env(self):
"""
Check logging patched and formatter including trace info
"""
def create_span():
span = self.tracer.trace("test.logging")
span.set_tag(ENV_KEY, "manual.env")
return span
self._test_logging(create_span=create_span, env="")
# Setting global config env and overriding with span specific value
# We always want the globals in the logs
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
def test_log_no_trace(self):
"""
Check traced funclogging patched and formatter not including trace info
"""
def create_span():
return None
self._test_logging(create_span=create_span)
with self.override_global_config(dict(version="global.version", env="global.env")):
self._test_logging(create_span=create_span, version="global.version", env="global.env")
|
# -*- coding: utf-8 -*-
'''
Helpful decorators for module writing
'''
# Import python libs
from __future__ import absolute_import
import inspect
import logging
import time
from functools import wraps
from collections import defaultdict
# Import salt libs
import salt.utils
import salt.utils.args
from salt.exceptions import CommandNotFoundError, CommandExecutionError, SaltConfigurationError
from salt.version import SaltStackVersion, __saltstack_version__
from salt.log import LOG_LEVELS
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
class Depends(object):
'''
This decorator will check the module when it is loaded and check that the
dependencies passed in are in the globals of the module. If not, it will
cause the function to be unloaded (or replaced)
'''
# kind -> Dependency -> list of things that depend on it
dependency_dict = defaultdict(lambda: defaultdict(dict))
def __init__(self, *dependencies, **kwargs):
'''
The decorator is instantiated with a list of dependencies (string of
global name)
An example use of this would be:
@depends('modulename')
def test():
return 'foo'
OR
@depends('modulename', fallback_function=function)
def test():
return 'foo'
'''
log.trace(
'Depends decorator instantiated with dep list of {0}'.format(
dependencies
)
)
self.dependencies = dependencies
self.fallback_function = kwargs.get('fallback_function')
def __call__(self, function):
'''
The decorator is "__call__"d with the function, we take that function
and determine which module and function name it is to store in the
class wide depandancy_dict
'''
try:
# This inspect call may fail under certain conditions in the loader. Possibly related to
# a Python bug here:
# http://bugs.python.org/issue17735
frame = inspect.stack()[1][0]
# due to missing *.py files under esky we cannot use inspect.getmodule
# module name is something like salt.loaded.int.modules.test
_, kind, mod_name = frame.f_globals['__name__'].rsplit('.', 2)
fun_name = function.__name__
for dep in self.dependencies:
self.dependency_dict[kind][dep][(mod_name, fun_name)] = \
(frame, self.fallback_function)
except Exception as exc:
log.error('Exception encountered when attempting to inspect frame in '
'dependency decorator: {0}'.format(exc))
return function
@classmethod
def enforce_dependencies(cls, functions, kind):
'''
This is a class global method to enforce the dependencies that you
currently know about.
It will modify the "functions" dict and remove/replace modules that
are missing dependencies.
'''
for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]):
for (mod_name, func_name), (frame, fallback_function) in six.iteritems(dependent_dict):
# check if dependency is loaded
if dependency is True:
log.trace(
'Dependency for {0}.{1} exists, not unloading'.format(
mod_name,
func_name
)
)
continue
# check if you have the dependency
if dependency in frame.f_globals \
or dependency in frame.f_locals:
log.trace(
'Dependency ({0}) already loaded inside {1}, '
'skipping'.format(
dependency,
mod_name
)
)
continue
log.trace(
'Unloading {0}.{1} because dependency ({2}) is not '
'imported'.format(
mod_name,
func_name,
dependency
)
)
# if not, unload the function
if frame:
try:
func_name = frame.f_globals['__func_alias__'][func_name]
except (AttributeError, KeyError):
pass
mod_key = '{0}.{1}'.format(mod_name, func_name)
# if we don't have this module loaded, skip it!
if mod_key not in functions:
continue
try:
if fallback_function is not None:
functions[mod_key] = fallback_function
else:
del functions[mod_key]
except AttributeError:
# we already did???
log.trace('{0} already removed, skipping'.format(mod_key))
continue
depends = Depends
def timing(function):
'''
Decorator wrapper to log execution time, for profiling purposes
'''
@wraps(function)
def wrapped(*args, **kwargs):
start_time = time.time()
ret = function(*args, **salt.utils.clean_kwargs(**kwargs))
end_time = time.time()
if function.__module__.startswith('salt.loaded.int.'):
mod_name = function.__module__[16:]
else:
mod_name = function.__module__
log.profile(
'Function {0}.{1} took {2:.20f} seconds to execute'.format(
mod_name,
function.__name__,
end_time - start_time
)
)
return ret
return wrapped
def which(exe):
'''
Decorator wrapper for salt.utils.which
'''
def wrapper(function):
def wrapped(*args, **kwargs):
if salt.utils.which(exe) is None:
raise CommandNotFoundError(
'The \'{0}\' binary was not found in $PATH.'.format(exe)
)
return function(*args, **kwargs)
return identical_signature_wrapper(function, wrapped)
return wrapper
def which_bin(exes):
'''
Decorator wrapper for salt.utils.which_bin
'''
def wrapper(function):
def wrapped(*args, **kwargs):
if salt.utils.which_bin(exes) is None:
raise CommandNotFoundError(
'None of provided binaries({0}) was not found '
'in $PATH.'.format(
['\'{0}\''.format(exe) for exe in exes]
)
)
return function(*args, **kwargs)
return identical_signature_wrapper(function, wrapped)
return wrapper
def identical_signature_wrapper(original_function, wrapped_function):
'''
Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``.
'''
context = {'__wrapped__': wrapped_function}
function_def = compile(
'def {0}({1}):\n'
' return __wrapped__({2})'.format(
# Keep the original function name
original_function.__name__,
# The function signature including defaults, i.e., 'timeout=1'
inspect.formatargspec(
*salt.utils.args.get_function_argspec(original_function)
)[1:-1],
# The function signature without the defaults
inspect.formatargspec(
formatvalue=lambda val: '',
*salt.utils.args.get_function_argspec(original_function)
)[1:-1]
),
'<string>',
'exec'
)
six.exec_(function_def, context)
return wraps(original_function)(context[original_function.__name__])
def memoize(func):
'''
Memoize aka cache the return output of a function
given a specific set of arguments
.. versionedited:: 2016.3.4
Added **kwargs support.
'''
cache = {}
@wraps(func)
def _memoize(*args, **kwargs):
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(str(arg))
else:
str_args.append(arg)
args_ = ','.join(list(str_args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)])
if args_ not in cache:
cache[args_] = func(*args, **kwargs)
return cache[args_]
return _memoize
class _DeprecationDecorator(object):
'''
Base mix-in class for the deprecation decorator.
Takes care of a common functionality, used in its derivatives.
'''
OPT_IN = 1
OPT_OUT = 2
def __init__(self, globals, version):
'''
Constructor.
:param globals: Module globals. Important for finding out replacement functions
:param version: Expiration version
:return:
'''
self._globals = globals
self._exp_version_name = version
self._exp_version = SaltStackVersion.from_name(self._exp_version_name)
self._curr_version = __saltstack_version__.info
self._raise_later = None
self._function = None
self._orig_f_name = None
def _get_args(self, kwargs):
'''
Extract function-specific keywords from all of the kwargs.
:param kwargs:
:return:
'''
_args = list()
_kwargs = dict()
if '__pub_arg' in kwargs: # For modules
for arg_item in kwargs.get('__pub_arg', list()):
if type(arg_item) == dict:
_kwargs.update(arg_item.copy())
else:
_args.append(arg_item)
else:
_kwargs = kwargs.copy() # For states
return _args, _kwargs
def _call_function(self, kwargs):
'''
Call target function that has been decorated.
:return:
'''
if self._raise_later:
raise self._raise_later # pylint: disable=E0702
if self._function:
args, kwargs = self._get_args(kwargs)
try:
return self._function(*args, **kwargs)
except TypeError as error:
error = str(error).replace(self._function, self._orig_f_name) # Hide hidden functions
log.error('Function "{f_name}" was not properly called: {error}'.format(f_name=self._orig_f_name,
error=error))
return self._function.__doc__
except Exception as error:
log.error('Unhandled exception occurred in '
'function "{f_name}: {error}'.format(f_name=self._function.__name__,
error=error))
raise error
else:
raise CommandExecutionError("Function is deprecated, but the successor function was not found.")
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
self._function = function
self._orig_f_name = self._function.__name__
class _IsDeprecated(_DeprecationDecorator):
'''
This decorator should be used only with the deprecated functions
to mark them as deprecated and alter its behavior a corresponding way.
The usage is only suitable if deprecation process is renaming
the function from one to another. In case function name or even function
signature stays the same, please use 'with_deprecated' decorator instead.
It has the following functionality:
1. Put a warning level message to the log, informing that
the deprecated function has been in use.
2. Raise an exception, if deprecated function is being called,
but the lifetime of it already expired.
3. Point to the successor of the deprecated function in the
log messages as well during the blocking it, once expired.
Usage of this decorator as follows. In this example no successor
is mentioned, hence the function "foo()" will be logged with the
warning each time is called and blocked completely, once EOF of
it is reached:
from salt.util.decorators import is_deprecated
@is_deprecated(globals(), "Beryllium")
def foo():
pass
In the following example a successor function is mentioned, hence
every time the function "bar()" is called, message will suggest
to use function "baz()" instead. Once EOF is reached of the function
"bar()", an exception will ask to use function "baz()", in order
to continue:
from salt.util.decorators import is_deprecated
@is_deprecated(globals(), "Beryllium", with_successor="baz")
def bar():
pass
def baz():
pass
'''
def __init__(self, globals, version, with_successor=None):
'''
Constructor of the decorator 'is_deprecated'.
:param globals: Module globals
:param version: Version to be deprecated
:param with_successor: Successor function (optional)
:return:
'''
_DeprecationDecorator.__init__(self, globals, version)
self._successor = with_successor
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
_DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'''
Decorator function.
:param args:
:param kwargs:
:return:
'''
if self._curr_version < self._exp_version:
msg = ['The function "{f_name}" is deprecated and will '
'expire in version "{version_name}".'.format(f_name=self._function.__name__,
version_name=self._exp_version_name)]
if self._successor:
msg.append('Use successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
else:
msg = ['The lifetime of the function "{f_name}" expired.'.format(f_name=self._function.__name__)]
if self._successor:
msg.append('Please use its successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
return _decorate
is_deprecated = _IsDeprecated
class _WithDeprecated(_DeprecationDecorator):
'''
This decorator should be used with the successor functions
to mark them as a new and alter its behavior in a corresponding way.
It is used alone if a function content or function signature
needs to be replaced, leaving the name of the function same.
In case function needs to be renamed or just dropped, it has
to be used in pair with 'is_deprecated' decorator.
It has the following functionality:
1. Put a warning level message to the log, in case a component
is using its deprecated version.
2. Switch between old and new function in case an older version
is configured for the desired use.
3. Raise an exception, if deprecated version reached EOL and
point out for the new version.
Usage of this decorator as follows. If 'with_name' is not specified,
then the name of the deprecated function is assumed with the "_" prefix.
In this case, in order to deprecate a function, it is required:
- Add a prefix "_" to an existing function. E.g.: "foo()" to "_foo()".
- Implement a new function with exactly the same name, just without
the prefix "_".
Example:
from salt.util.decorators import with_deprecated
@with_deprecated(globals(), "Beryllium")
def foo():
"This is a new function"
def _foo():
"This is a deprecated function"
In case there is a need to deprecate a function and rename it,
the decorator should be used with the 'with_name' parameter. This
parameter is pointing to the existing deprecated function. In this
case deprecation process as follows:
- Leave a deprecated function without changes, as is.
- Implement a new function and decorate it with this decorator.
- Set a parameter 'with_name' to the deprecated function.
- If a new function has a different name than a deprecated,
decorate a deprecated function with the 'is_deprecated' decorator
in order to let the function have a deprecated behavior.
Example:
from salt.util.decorators import with_deprecated
@with_deprecated(globals(), "Beryllium", with_name="an_old_function")
def a_new_function():
"This is a new function"
@is_deprecated(globals(), "Beryllium", with_successor="a_new_function")
def an_old_function():
"This is a deprecated function"
'''
MODULE_NAME = '__virtualname__'
CFG_USE_DEPRECATED = 'use_deprecated'
CFG_USE_SUPERSEDED = 'use_superseded'
def __init__(self, globals, version, with_name=None, policy=_DeprecationDecorator.OPT_OUT):
'''
Constructor of the decorator 'with_deprecated'
:param globals:
:param version:
:param with_name:
:param policy:
:return:
'''
_DeprecationDecorator.__init__(self, globals, version)
self._with_name = with_name
self._policy = policy
def _set_function(self, function):
'''
Based on the configuration, set to execute an old or a new function.
:return:
'''
full_name = "{m_name}.{f_name}".format(
m_name=self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[-1],
f_name=function.__name__)
if full_name.startswith("."):
self._raise_later = CommandExecutionError('Module not found for function "{f_name}"'.format(
f_name=function.__name__))
opts = self._globals.get('__opts__', '{}')
pillar = self._globals.get('__pillar__', '{}')
use_deprecated = (full_name in opts.get(self.CFG_USE_DEPRECATED, list()) or
full_name in pillar.get(self.CFG_USE_DEPRECATED, list()))
use_superseded = (full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) or
full_name in pillar.get(self.CFG_USE_SUPERSEDED, list()))
if use_deprecated and use_superseded:
raise SaltConfigurationError("Function '{0}' is mentioned both in deprecated "
"and superseded sections. Please remove any of that.".format(full_name))
old_function = self._globals.get(self._with_name or "_{0}".format(function.__name__))
if self._policy == self.OPT_IN:
self._function = function if use_superseded else old_function
else:
self._function = old_function if use_deprecated else function
def _is_used_deprecated(self):
'''
Returns True, if a component configuration explicitly is
asking to use an old version of the deprecated function.
:return:
'''
func_path = "{m_name}.{f_name}".format(
m_name=self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[-1],
f_name=self._orig_f_name)
return func_path in self._globals.get('__opts__').get(
self.CFG_USE_DEPRECATED, list()) or func_path in self._globals.get('__pillar__').get(
self.CFG_USE_DEPRECATED, list()) or (self._policy == self.OPT_IN
and not (func_path in self._globals.get('__opts__', {}).get(
self.CFG_USE_SUPERSEDED, list()))
and not (func_path in self._globals.get('__pillar__', {}).get(
self.CFG_USE_SUPERSEDED, list()))), func_path
def __call__(self, function):
'''
Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:
'''
_DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'''
Decorator function.
:param args:
:param kwargs:
:return:
'''
self._set_function(function)
is_deprecated, func_path = self._is_used_deprecated()
if is_deprecated:
if self._curr_version < self._exp_version:
msg = list()
if self._with_name:
msg.append('The function "{f_name}" is deprecated and will '
'expire in version "{version_name}".'.format(
f_name=self._with_name.startswith("_") and self._orig_f_name or self._with_name,
version_name=self._exp_version_name))
msg.append('Use its successor "{successor}" instead.'.format(successor=self._orig_f_name))
else:
msg.append('The function "{f_name}" is using its deprecated version and will '
'expire in version "{version_name}".'.format(f_name=func_path,
version_name=self._exp_version_name))
log.warning(' '.join(msg))
else:
msg_patt = 'The lifetime of the function "{f_name}" expired.'
if '_' + self._orig_f_name == self._function.__name__:
msg = [msg_patt.format(f_name=self._orig_f_name),
'Please turn off its deprecated version in the configuration']
else:
msg = ['Although function "{f_name}" is called, an alias "{f_alias}" '
'is configured as its deprecated version.'.format(
f_name=self._orig_f_name, f_alias=self._with_name or self._orig_f_name),
msg_patt.format(f_name=self._with_name or self._orig_f_name),
'Please use its successor "{successor}" instead.'.format(successor=self._orig_f_name)]
log.error(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
_decorate.__doc__ = self._function.__doc__
return _decorate
with_deprecated = _WithDeprecated
def ignores_kwargs(*kwarg_names):
'''
Decorator to filter out unexpected keyword arguments from the call
kwarg_names:
List of argument names to ignore
'''
def _ignores_kwargs(fn):
def __ignores_kwargs(*args, **kwargs):
kwargs_filtered = kwargs.copy()
for name in kwarg_names:
if name in kwargs_filtered:
del kwargs_filtered[name]
return fn(*args, **kwargs_filtered)
return __ignores_kwargs
return _ignores_kwargs
|
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
import matplotlib.pyplot as plt
CAMPAIGN_NAME = "Conduction."
def refine_sampling_plan(campaign, analysis, number_of_refinements):
"""
Refine the sampling plan.
Parameters
----------
number_of_refinements (int)
The number of refinement iterations that must be performed.
Returns
-------
None. The new accepted indices are stored in analysis.l_norm and the admissible indices
in sampler.admissible_idx.
"""
sampler = campaign.get_active_sampler()
for _ in range(number_of_refinements):
# compute the admissible indices
sampler.look_ahead(analysis.l_norm)
print(f"Code will be evaluated {sampler.n_new_points[-1]} times")
# run the ensemble
campaign.execute().collate(progress_bar=True)
# accept one of the multi indices of the new admissible set
data_frame = campaign.get_collation_result()
analysis.adapt_dimension("T", data_frame)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
def plot_grid_2D(campaign, analysis, i, filename="out.pdf"):
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
accepted_grid = campaign.get_active_sampler().generate_grid(analysis.l_norm)
ax1.plot(accepted_grid[:, 0], accepted_grid[:, 1], "o")
ax2.plot(accepted_grid[:, 2], accepted_grid[:, 3], "o")
ax1.set_title(f"iteration {i}")
fig.tight_layout()
fig.savefig(filename)
def custom_moments_plot(results, filename, i):
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.set_title("iteration " + str(i))
ax.legend()
fig.savefig(filename)
def first_time_setup():
encoder = boutvecma.BOUTEncoder(
template_input="../../models/conduction/data/BOUT.inp"
)
# decoder = boutvecma.LogDataBOUTDecoder(variables=["T"])
decoder = boutvecma.SimpleBOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
actions = uq.actions.local_execute(
encoder,
os.path.abspath(
"../../build/models/conduction/conduction -q -q -q -q -d . |& tee run.log"
),
decoder,
root=".",
)
campaign = uq.Campaign(name=CAMPAIGN_NAME, actions=actions, params=params)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.5, 1.5),
"T:gauss_centre": chaospy.Uniform(0.5 * np.pi, 1.5 * np.pi),
}
sampler = uq.sampling.SCSampler(
vary=vary,
polynomial_order=1,
quadrature_rule="C",
sparse=True,
growth=True,
midpoint_level1=True,
dimension_adaptive=True,
)
campaign.set_sampler(sampler)
print(f"Output will be in {campaign.campaign_dir}")
sampler = campaign.get_active_sampler()
print(f"Computing {sampler.n_samples} samples")
time_start = time.time()
campaign.execute().collate(progress_bar=True)
# Create an analysis class and run the analysis.
analysis = create_analysis(campaign)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
plot_grid_2D(campaign, analysis, 0, f"{campaign.campaign_dir}/grid0.png")
for i in np.arange(1, 10):
refine_once(campaign, analysis, i)
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
return campaign
def create_analysis(campaign):
return uq.analysis.SCAnalysis(sampler=campaign.get_active_sampler(), qoi_cols=["T"])
def refine_once(campaign, analysis, iteration):
refine_sampling_plan(campaign, analysis, 1)
campaign.apply_analysis(analysis)
analysis.save_state(f"{campaign.campaign_dir}/analysis.state")
results = campaign.last_analysis
plot_grid_2D(
campaign,
analysis,
iteration,
f"{campaign.campaign_dir}/grid{iteration:02}.png",
)
moment_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"moments{iteration:02}.png"
)
sobols_plot_filename = os.path.join(
f"{campaign.campaign_dir}", f"sobols_first{iteration:02}.png"
)
results.plot_sobols_first(
"T",
ylabel=f"iteration{iteration}",
xlabel=r"$\rho$",
filename=sobols_plot_filename,
)
plt.ylim(0, 1)
plt.savefig(f"{campaign.campaign_dir}/sobols{iteration:02}.png")
custom_moments_plot(results, moment_plot_filename, iteration)
with open(f"{campaign.campaign_dir}/last_iteration", "w") as f:
f.write(f"{iteration}")
def plot_results(campaign, moment_plot_filename, sobols_plot_filename):
results = campaign.get_last_analysis()
results.plot_sobols_first("T", xlabel=r"$\rho$", filename=sobols_plot_filename)
fig, ax = plt.subplots()
xvalues = np.arange(len(results.describe("T", "mean")))
ax.fill_between(
xvalues,
results.describe("T", "mean") - results.describe("T", "std"),
results.describe("T", "mean") + results.describe("T", "std"),
label="std",
alpha=0.2,
)
ax.plot(xvalues, results.describe("T", "mean"), label="mean")
try:
ax.plot(xvalues, results.describe("T", "1%"), "--", label="1%", color="black")
ax.plot(xvalues, results.describe("T", "99%"), "--", label="99%", color="black")
except RuntimeError:
pass
ax.grid(True)
ax.set_ylabel("T")
ax.set_xlabel(r"$\rho$")
ax.legend()
fig.savefig(moment_plot_filename)
print(f"Results are in:\n\t{moment_plot_filename}\n\t{sobols_plot_filename}")
def reload_campaign(directory):
"""Reload a campaign from a directory
Returns the campaign, analysis, and last iteration number
"""
campaign = uq.Campaign(
name=CAMPAIGN_NAME,
db_location=f"sqlite:///{os.path.abspath(directory)}/campaign.db",
)
analysis = create_analysis(campaign)
analysis.load_state(f"{campaign.campaign_dir}/analysis.state")
with open(f"{campaign.campaign_dir}/last_iteration", "r") as f:
iteration = int(f.read())
return campaign, analysis, iteration
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"conduction_sc",
description="Adaptive dimension refinement for 1D conduction model",
)
parser.add_argument(
"--restart", type=str, help="Restart previous campaign", default=None
)
parser.add_argument(
"-n", "--refinement-num", type=int, default=1, help="Number of refinements"
)
args = parser.parse_args()
if args.restart is None:
first_time_setup()
else:
campaign, analysis, last_iteration = reload_campaign(args.restart)
for iteration in range(
last_iteration + 1, last_iteration + args.refinement_num + 1
):
refine_once(campaign, analysis, iteration)
|
import machine, time
from machine import Pin
__version__ = '0.2.0'
__author__ = 'Roberto Sánchez'
__license__ = "Apache License 2.0. https://www.apache.org/licenses/LICENSE-2.0"
class HCSR04:
"""
Driver to use the untrasonic sensor HC-SR04.
The sensor range is between 2cm and 4m.
The timeouts received listening to echo pin are converted to OSError('Out of range')
"""
# echo_timeout_us is based in chip range limit (400cm)
def __init__(self, trigger_pin, echo_pin, echo_timeout_us=500*2*30):
"""
trigger_pin: Output pin to send pulses
echo_pin: Readonly pin to measure the distance. The pin should be protected with 1k resistor
echo_timeout_us: Timeout in microseconds to listen to echo pin.
By default is based in sensor limit range (4m)
"""
self.echo_timeout_us = echo_timeout_us
# Init trigger pin (out)
self.trigger = Pin(trigger_pin, mode=Pin.OUT, pull=None)
self.trigger.value(0)
# Init echo pin (in)
self.echo = Pin(echo_pin, mode=Pin.IN, pull=None)
def _send_pulse_and_wait(self):
"""
Send the pulse to trigger and listen on echo pin.
We use the method `machine.time_pulse_us()` to get the microseconds until the echo is received.
"""
self.trigger.value(0) # Stabilize the sensor
time.sleep_us(5)
self.trigger.value(1)
# Send a 10us pulse.
time.sleep_us(10)
self.trigger.value(0)
try:
pulse_time = machine.time_pulse_us(self.echo, 1, self.echo_timeout_us)
return pulse_time
except OSError as ex:
if ex.args[0] == 110: # 110 = ETIMEDOUT
raise OSError('Out of range')
raise ex
def distance_mm(self):
"""
Get the distance in milimeters without floating point operations.
"""
pulse_time = self._send_pulse_and_wait()
# To calculate the distance we get the pulse_time and divide it by 2
# (the pulse walk the distance twice) and by 29.1 becasue
# the sound speed on air (343.2 m/s), that It's equivalent to
# 0.34320 mm/us that is 1mm each 2.91us
# pulse_time // 2 // 2.91 -> pulse_time // 5.82 -> pulse_time * 100 // 582
mm = pulse_time * 100 // 582
return mm
def distance_cm(self):
"""
Get the distance in centimeters with floating point operations.
It returns a float
"""
pulse_time = self._send_pulse_and_wait()
# To calculate the distance we get the pulse_time and divide it by 2
# (the pulse walk the distance twice) and by 29.1 becasue
# the sound speed on air (343.2 m/s), that It's equivalent to
# 0.034320 cm/us that is 1cm each 29.1us
cms = (pulse_time / 2) / 29.1
return cms
|
#!/usr/bin/env python
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import time
from distutils import sysconfig
import setuptools
# Flush output on newlines
sys.stdout.reconfigure(line_buffering=True)
os_name = platform.system()
# Work around breaking change in setuptools 60
setup_py_flags = []
if int(setuptools.__version__.split(".")[0]) >= 60:
setup_py_flags = ["--single-version-externally-managed", "--root=/"]
class BooleanFlag(argparse.Action):
def __init__(
self,
option_strings,
dest,
default,
required=False,
help="",
metavar=None,
):
assert all(not opt.startswith("--no") for opt in option_strings)
def flatten(list):
return [item for sublist in list for item in sublist]
option_strings = flatten(
[
[opt, "--no-" + opt[2:], "--no" + opt[2:]]
if opt.startswith("--")
else [opt]
for opt in option_strings
]
)
super().__init__(
option_strings,
dest,
nargs=0,
const=None,
default=default,
type=bool,
choices=None,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string):
setattr(namespace, self.dest, not option_string.startswith("--no"))
required_thrust_version = "cuda-11.2"
# Global variable for verbose installation
verbose_global = False
def verbose_check_call(*args, **kwargs):
if verbose_global:
print('Executing: "', " ".join(*args), '" with ', kwargs)
subprocess.check_call(*args, **kwargs)
def verbose_check_output(*args, **kwargs):
if verbose_global:
print('Executing: "', " ".join(*args), '" with ', kwargs)
return subprocess.check_output(*args, **kwargs)
def find_active_python_version_and_path():
# Launching a sub-process to do this in a general way seems hard
version = (
str(sys.version_info.major)
+ "."
+ str(sys.version_info.minor)
+ "."
+ str(sys.version_info.micro)
)
cv = sysconfig.get_config_vars()
paths = [os.path.join(cv[p], cv["LDLIBRARY"]) for p in ("LIBDIR", "LIBPL")]
# ensure that static libraries are replaced with the dynamic version
paths = [
os.path.splitext(p)[0] + (".dylib" if os_name == "Darwin" else ".so")
for p in paths
]
paths = [p for p in paths if os.path.isfile(p)]
e = "Error: could not auto-locate python library."
assert paths, e
return version, paths[0]
def git_clone(repo_dir, url, branch=None, tag=None, commit=None):
assert branch is not None or tag is not None or commit is not None
if branch is not None:
verbose_check_call(
["git", "clone", "--recursive", "-b", branch, url, repo_dir]
)
elif commit is not None:
verbose_check_call(["git", "clone", "--recursive", url, repo_dir])
verbose_check_call(["git", "checkout", commit], cwd=repo_dir)
verbose_check_call(
["git", "submodule", "update", "--init"], cwd=repo_dir
)
git_reset(repo_dir, commit)
else:
verbose_check_call(
[
"git",
"clone",
"--recursive",
"--single-branch",
"-b",
tag,
url,
repo_dir,
]
)
verbose_check_call(["git", "checkout", "-b", "master"], cwd=repo_dir)
def git_reset(repo_dir, refspec):
verbose_check_call(["git", "reset", "--hard", refspec], cwd=repo_dir)
def git_update(repo_dir, branch=None, tag=None, commit=None):
if branch is not None:
verbose_check_call(["git", "fetch"], cwd=repo_dir)
verbose_check_call(["git", "checkout", branch], cwd=repo_dir)
verbose_check_call(["git", "pull", "--ff-only"], cwd=repo_dir)
else:
verbose_check_call(["git", "fetch"], cwd=repo_dir)
verbose_check_call(["git", "checkout", commit or tag], cwd=repo_dir)
def load_json_config(filename):
try:
with open(filename, "r") as f:
return json.load(f)
except IOError:
return None
def dump_json_config(filename, value):
with open(filename, "w") as f:
return json.dump(value, f)
def symlink(from_path, to_path):
if not os.path.lexists(to_path):
os.symlink(from_path, to_path)
def install_gasnet(gasnet_dir, conduit, thread_count):
print("Legate is installing GASNet into a local directory...")
temp_dir = tempfile.mkdtemp()
git_clone(
temp_dir,
url="https://github.com/StanfordLegion/gasnet.git",
branch="master",
)
# Update the configuration file with the prefix for our output
# Then we can invoke make
verbose_check_call(
[
"make",
"-j",
str(thread_count),
"CONDUIT=" + str(conduit),
"GASNET_INSTALL_DIR=" + str(gasnet_dir),
],
cwd=temp_dir,
)
shutil.rmtree(temp_dir)
def install_legion(legion_src_dir, branch, commit=None):
print("Legate is installing Legion into a local directory...")
# For now all we have to do is clone legion since we build it with Legate
git_clone(
legion_src_dir,
url="https://gitlab.com/StanfordLegion/legion.git",
branch=branch,
commit=commit,
)
def install_thrust(thrust_dir):
print("Legate is installing Thrust into a local directory...")
git_clone(
thrust_dir,
url="https://github.com/thrust/thrust.git",
tag=required_thrust_version,
)
def update_legion(legion_src_dir, branch, commit=None):
# Make sure we are on the right branch for single/multi-node
git_update(legion_src_dir, branch=branch, commit=commit)
def build_legion(
legion_src_dir,
install_dir,
cmake,
cmake_exe,
cuda_dir,
debug,
debug_release,
check_bounds,
cuda,
arch,
openmp,
march,
llvm,
hdf,
spy,
gasnet,
gasnet_dir,
conduit,
pyversion,
pylib_name,
maxdim,
maxfields,
clean_first,
extra_flags,
thread_count,
verbose,
):
no_hijack = True
if cuda and os.environ.get("USE_CUDART_HIJACK", "0") == "1":
print(
"""
#####################################################################
Warning: Realm's CUDA runtime hijack is incompatible with NCCL.
Please note that your code will crash catastrophically as soon as it
calls into NCCL either directly or through some other Legate library.
#####################################################################
"""
)
time.sleep(10)
no_hijack = False
if cmake:
build_dir = os.path.join(legion_src_dir, "build")
try:
shutil.rmtree(build_dir)
except FileNotFoundError:
pass
if not os.path.exists(build_dir):
os.mkdir(build_dir)
flags = (
[
"-DCMAKE_BUILD_TYPE=%s"
% (
"Debug"
if debug
else "RelWithDebInfo"
if debug_release
else "Release"
),
"-DLegion_MAX_DIM=%s" % (str(maxdim)),
"-DLegion_MAX_FIELDS=%s" % (str(maxfields)),
"-DLegion_USE_CUDA=%s" % ("ON" if cuda else "OFF"),
"-DLegion_GPU_ARCH=%s" % arch,
"-DLegion_USE_OpenMP=%s" % ("ON" if openmp else "OFF"),
"-DBUILD_MARCH=%s" % march,
"-DLegion_USE_LLVM=%s" % ("ON" if llvm else "OFF"),
"-DLegion_USE_GASNet=%s" % ("ON" if gasnet else "OFF"),
"-DLegion_USE_HDF5=%s" % ("ON" if hdf else "OFF"),
"-DCMAKE_INSTALL_PREFIX=%s" % (os.path.realpath(install_dir)),
"-DLegion_USE_Python=On",
"-DLegion_Python_Version=%s" % pyversion,
"-DLegion_REDOP_COMPLEX=On",
"-DLegion_REDOP_HALF=On",
"-DBUILD_SHARED_LIBS=ON",
"-DLegion_BUILD_BINDINGS=On",
]
+ extra_flags
+ (["-DLegion_BOUNDS_CHECKS=On"] if check_bounds else [])
+ (["-DLegion_HIJACK_CUDART=Off"] if no_hijack else [])
+ (
["-DGASNet_ROOT_DIR=%s" % gasnet_dir]
if gasnet_dir is not None
else []
)
+ (
["-DGASNet_CONDUIT=%s" % conduit]
if conduit is not None
else []
)
+ (
["-DCUDA_TOOLKIT_ROOT_DIR=%s" % cuda_dir]
if cuda_dir is not None
else []
)
+ (
["-DCMAKE_CXX_COMPILER=%s" % os.environ["CXX"]]
if "CXX" in os.environ
else []
)
+ (
["-DCMAKE_CXX_FLAGS=%s" % os.environ["CC_FLAGS"]]
if "CC_FLAGS" in os.environ
else []
)
)
make_flags = ["VERBOSE=1"] if verbose else []
make_flags += ["-C", os.path.realpath(build_dir)]
if spy:
raise NotImplementedError("Need support for Legion Spy with cmake")
try:
subprocess.check_output([cmake_exe, "--version"])
except OSError:
print(
"Error: CMake is not installed or otherwise not executable. "
"Please check"
)
print(
"your CMake installation and try again. You can use the "
"--with-cmake flag"
)
print("to specify the CMake executable if it is not on PATH.")
print()
print("Attempted to execute: %s" % cmake_exe)
sys.exit(1)
verbose_check_call(
[cmake_exe] + flags + [legion_src_dir], cwd=build_dir
)
verbose_check_call(
["make"] + make_flags + ["-j", str(thread_count), "install"],
cwd=build_dir,
)
# TODO: install legion spy and legion prof
else:
version = pyversion.split(".")
flags = (
[
"LG_RT_DIR=%s" % (os.path.join(legion_src_dir, "runtime")),
"DEBUG=%s" % (1 if debug else 0),
"DEBUG_RELEASE=%s" % (1 if debug_release else 0),
"MAX_DIM=%s" % (str(maxdim)),
"MAX_FIELDS=%s" % (str(maxfields)),
"USE_CUDA=%s" % (1 if cuda else 0),
"GPU_ARCH=%s" % arch,
"USE_OPENMP=%s" % (1 if openmp else 0),
"MARCH=%s" % march,
"USE_LLVM=%s" % (1 if llvm else 0),
"USE_GASNET=%s" % (1 if gasnet else 0),
"USE_HDF=%s" % (1 if hdf else 0),
"PREFIX=%s" % (os.path.realpath(install_dir)),
"PYTHON_VERSION_MAJOR=%s" % version[0],
"PYTHON_VERSION_MINOR=%s" % version[1],
"PYTHON_LIB=%s" % pylib_name,
"FORCE_PYTHON=1",
"USE_COMPLEX=1",
"USE_HALF=1",
"USE_SPY=%s" % (1 if spy else 0),
"REALM_USE_CUDART_HIJACK=%s" % (1 if not no_hijack else 0),
]
+ extra_flags
+ (["BOUNDS_CHECKS=1"] if check_bounds else [])
+ (["GASNET=%s" % gasnet_dir] if gasnet_dir is not None else [])
+ (["CONDUIT=%s" % conduit] if conduit is not None else [])
+ (["CUDA=%s" % cuda_dir] if cuda_dir is not None else [])
)
legion_python_dir = os.path.join(legion_src_dir, "bindings", "python")
if clean_first:
verbose_check_call(
["make"] + flags + ["clean"], cwd=legion_python_dir
)
# Explicitly ask for C++17, otherwise the Legion build will use C++11.
env = dict(os.environ.items())
env["CXXFLAGS"] = "-std=c++17 " + env.get("CXXFLAGS", "")
verbose_check_call(
["make"] + flags + ["-j", str(thread_count), "install"],
cwd=legion_python_dir,
env=env,
)
verbose_check_call(
[
sys.executable,
"setup.py",
"install",
"--prefix",
str(os.path.realpath(install_dir)),
]
+ setup_py_flags,
cwd=legion_python_dir,
)
verbose_check_call(
[
"cp",
"legion_spy.py",
os.path.join(install_dir, "share", "legate", "legion_spy.py"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_prof.py",
os.path.join(install_dir, "share", "legate", "legion_prof.py"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_serializer.py",
os.path.join(
install_dir, "share", "legate", "legion_serializer.py"
),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"legion_prof_copy.html.template",
os.path.join(
install_dir,
"share",
"legate",
"legion_prof_copy.html.template",
),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
verbose_check_call(
[
"cp",
"-r",
"legion_prof_files",
os.path.join(install_dir, "share", "legate", "legion_prof_files"),
],
cwd=os.path.join(legion_src_dir, "tools"),
)
def build_legate_core(
install_dir,
legate_core_dir,
cmake,
cmake_exe,
cuda_dir,
nccl_dir,
debug,
debug_release,
cuda,
arch,
openmp,
march,
spy,
gasnet,
clean_first,
thread_count,
verbose,
unknown,
):
src_dir = os.path.join(legate_core_dir, "src")
if cmake:
print("Warning: CMake is currently not supported for Legate build.")
print("Using GNU Make for now.")
make_flags = [
"LEGATE_DIR=%s" % install_dir,
"DEBUG=%s" % (1 if debug else 0),
"DEBUG_RELEASE=%s" % (1 if debug_release else 0),
"USE_CUDA=%s" % (1 if cuda else 0),
"USE_OPENMP=%s" % (1 if openmp else 0),
"MARCH=%s" % march,
"GPU_ARCH=%s" % arch,
"PREFIX=%s" % str(install_dir),
"USE_GASNET=%s" % (1 if gasnet else 0),
"NCCL_DIR=%s" % nccl_dir,
] + (["CUDA=%s" % cuda_dir] if cuda_dir is not None else [])
if clean_first:
verbose_check_call(["make"] + make_flags + ["clean"], cwd=src_dir)
verbose_check_call(
["make"] + make_flags + ["-j", str(thread_count), "install"],
cwd=src_dir,
)
# Fill in config.mk.in and copy it to the target destination
with open(os.path.join(src_dir, "config.mk.in")) as f:
content = f.read()
content = content.format(
debug=repr(1 if debug else 0),
debug_release=repr(1 if debug_release else 0),
cuda=repr(1 if cuda else 0),
arch=(arch if arch is not None else ""),
cuda_dir=(cuda_dir if cuda_dir is not None else ""),
openmp=repr(1 if openmp else 0),
march=march,
gasnet=repr(1 if gasnet else 0),
)
with open(os.path.join(src_dir, "config.mk"), "wb") as f:
f.write(content.encode("utf-8"))
cmd = ["cp", "config.mk", os.path.join(install_dir, "share", "legate")]
verbose_check_call(cmd, cwd=src_dir)
# Then run setup.py
cmd = [
sys.executable,
"setup.py",
"install",
"--recurse",
] + setup_py_flags
if unknown is not None:
try:
prefix_loc = unknown.index("--prefix")
cmd.extend(unknown[prefix_loc : prefix_loc + 2])
except ValueError:
cmd += ["--prefix", str(install_dir)]
else:
cmd += ["--prefix", str(install_dir)]
verbose_check_call(cmd, cwd=legate_core_dir)
def install(
gasnet,
cuda,
arch,
openmp,
march,
hdf,
llvm,
spy,
conduit,
nccl_dir,
cmake,
cmake_exe,
install_dir,
gasnet_dir,
pylib_name,
cuda_dir,
maxdim,
maxfields,
debug,
debug_release,
check_bounds,
clean_first,
extra_flags,
thread_count,
verbose,
thrust_dir,
legion_branch,
unknown,
):
global verbose_global
verbose_global = verbose
legate_core_dir = os.path.dirname(os.path.realpath(__file__))
cmake_config = os.path.join(legate_core_dir, ".cmake.json")
dump_json_config(cmake_config, cmake)
if pylib_name is None:
pyversion, pylib_name = find_active_python_version_and_path()
else:
f_name = os.path.split(pylib_name)[-1]
match = re.match(r"^libpython(\d\d?\.\d\d?)", f_name)
e = "Unable to get version from library name {}".format(pylib_name)
assert match, e
pyversion = match.group(1)
print("Using python lib and version: {}, {}".format(pylib_name, pyversion))
install_dir_config = os.path.join(legate_core_dir, ".install-dir.json")
if install_dir is None:
install_dir = load_json_config(install_dir_config)
if install_dir is None:
install_dir = os.path.join(legate_core_dir, "install")
install_dir = os.path.realpath(install_dir)
dump_json_config(install_dir_config, install_dir)
os.makedirs(os.path.join(install_dir, "share", "legate"), exist_ok=True)
if thread_count is None:
thread_count = multiprocessing.cpu_count()
# Save the maxdim config
maxdim_config = os.path.join(legate_core_dir, ".maxdim.json")
# Check the max dimensions
if maxdim < 1 or maxdim > 9:
raise Exception(
"The maximum number of Legate dimensions must be between 1 and 9 "
"inclusive"
)
dump_json_config(maxdim_config, str(maxdim))
# Save the maxfields config
maxfields_config = os.path.join(legate_core_dir, ".maxfields.json")
# Check that max fields is between 32 and 4096 and is a power of 2
if maxfields not in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
raise Exception(
"The maximum number of Legate fields must be a power of 2 between "
"32 and 4096 inclusive"
)
dump_json_config(maxfields_config, str(maxfields))
# If the user asked for a conduit and we don't have gasnet then install it
if gasnet:
conduit_config = os.path.join(legate_core_dir, ".conduit.json")
if conduit is None:
conduit = load_json_config(conduit_config)
if conduit is None:
raise Exception(
"The first time you use GASNet you need to tell us "
'which conduit to use with the "--conduit" flag'
)
dump_json_config(conduit_config, conduit)
gasnet_config = os.path.join(
legate_core_dir, ".gasnet" + str(conduit) + ".json"
)
if gasnet_dir is None:
gasnet_dir = load_json_config(gasnet_config)
if gasnet_dir is None:
gasnet_dir = os.path.join(install_dir, "gasnet")
if not os.path.exists(gasnet_dir):
install_gasnet(gasnet_dir, conduit, thread_count)
dump_json_config(gasnet_config, gasnet_dir)
# If the user asked for CUDA, make sure we know where the install
# directory is
if cuda:
cuda_config = os.path.join(legate_core_dir, ".cuda.json")
if cuda_dir is None:
cuda_dir = load_json_config(cuda_config)
if cuda_dir is None:
raise Exception(
"The first time you use CUDA you need to tell Legate "
'where CUDA is installed with the "--with-cuda" flag.'
)
dump_json_config(cuda_config, cuda_dir)
arch_config = os.path.join(legate_core_dir, ".arch.json")
if arch is None:
arch = load_json_config(arch_config)
if arch is None:
try:
import pynvml
pynvml.nvmlInit()
major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(
pynvml.nvmlDeviceGetHandleByIndex(0)
)
arch = f"{major}{minor}"
pynvml.nvmlShutdown()
except Exception as exc:
raise Exception(
"Could not auto-detect CUDA GPU architecture, please "
"specify the target architecture using --arch"
) from exc
dump_json_config(arch_config, arch)
nccl_config = os.path.join(legate_core_dir, ".nccl.json")
if nccl_dir is None:
nccl_dir = load_json_config(nccl_config)
if nccl_dir is None:
raise Exception(
"The first time you use CUDA you need to tell Legate "
'where NCCL is installed with the "--with-nccl" flag.'
)
dump_json_config(nccl_config, nccl_dir)
# install a stable version of Thrust
thrust_config = os.path.join(legate_core_dir, ".thrust.json")
if thrust_dir is None:
thrust_dir = load_json_config(thrust_config)
if thrust_dir is None:
thrust_dir = os.path.join(install_dir, "thrust")
thrust_dir = os.path.realpath(thrust_dir)
if not os.path.exists(thrust_dir):
install_thrust(thrust_dir)
# Simply put Thrust into the environment.
os.environ["CXXFLAGS"] = (
"-I" + thrust_dir + " " + os.environ.get("CXXFLAGS", "")
)
dump_json_config(thrust_config, thrust_dir)
# Build Legion from scratch.
legion_src_dir = os.path.join(legate_core_dir, "legion")
if not os.path.exists(legion_src_dir):
install_legion(legion_src_dir, branch=legion_branch)
elif clean_first:
update_legion(legion_src_dir, branch=legion_branch)
build_legion(
legion_src_dir,
install_dir,
cmake,
cmake_exe,
cuda_dir,
debug,
debug_release,
check_bounds,
cuda,
arch,
openmp,
march,
llvm,
hdf,
spy,
gasnet,
gasnet_dir,
conduit,
pyversion,
pylib_name,
maxdim,
maxfields,
clean_first,
extra_flags,
thread_count,
verbose,
)
build_legate_core(
install_dir,
legate_core_dir,
cmake,
cmake_exe,
cuda_dir,
nccl_dir,
debug,
debug_release,
cuda,
arch,
openmp,
march,
spy,
gasnet,
clean_first,
thread_count,
verbose,
unknown,
)
# Copy any executables that we need for legate functionality
verbose_check_call(
["cp", "legate.py", os.path.join(install_dir, "bin", "legate")],
cwd=legate_core_dir,
)
verbose_check_call(
[
"cp",
"scripts/lgpatch.py",
os.path.join(install_dir, "bin", "lgpatch"),
],
cwd=legate_core_dir,
)
verbose_check_call(
["cp", "bind.sh", os.path.join(install_dir, "bin", "bind.sh")],
cwd=legate_core_dir,
)
if cuda:
# Copy CUDA configuration that the launcher needs to find CUDA path
verbose_check_call(
[
"cp",
".cuda.json",
os.path.join(install_dir, "share", "legate", ".cuda.json"),
],
cwd=legate_core_dir,
)
# Record the path to NCCL that was used in this build
libs_path = os.path.join(install_dir, "share", ".legate-libs.json")
try:
with open(libs_path, "r") as f:
libs_config = json.load(f)
except (FileNotFoundError, IOError, json.JSONDecodeError):
libs_config = {}
libs_config["nccl"] = nccl_dir
with open(libs_path, "w") as f:
json.dump(libs_config, f)
# Copy thrust configuration
verbose_check_call(
[
"cp",
thrust_config,
os.path.join(install_dir, "share", "legate"),
],
cwd=legate_core_dir,
)
def driver():
parser = argparse.ArgumentParser(description="Install Legate front end.")
parser.add_argument(
"--install-dir",
dest="install_dir",
metavar="DIR",
required=False,
help="Path to install all Legate-related software",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
required=False,
default=os.environ.get("DEBUG", "0") == "1",
help="Build Legate and Legion with no optimizations, and full "
"debugging checks.",
)
parser.add_argument(
"--debug-release",
dest="debug_release",
action="store_true",
required=False,
default=os.environ.get("DEBUG_RELEASE", "0") == "1",
help="Build Legate and Legion with optimizations enabled, but include "
"debugging symbols.",
)
parser.add_argument(
"--check-bounds",
dest="check_bounds",
action="store_true",
required=False,
default=os.environ.get("CHECK_BOUNDS", "0") == "1",
help="Build Legion with bounds checking enabled (warning: expensive).",
)
parser.add_argument(
"--max-dim",
dest="maxdim",
type=int,
default=int(os.environ.get("LEGION_MAX_DIM", 4)),
help="Maximum number of dimensions that Legate will support",
)
parser.add_argument(
"--max-fields",
dest="maxfields",
type=int,
default=int(os.environ.get("LEGION_MAX_FIELDS", 256)),
help="Maximum number of fields that Legate will support",
)
parser.add_argument(
"--gasnet",
dest="gasnet",
action="store_true",
required=False,
default=os.environ.get("USE_GASNET", "0") == "1",
help="Build Legate with GASNet.",
)
parser.add_argument(
"--with-gasnet",
dest="gasnet_dir",
metavar="DIR",
required=False,
default=os.environ.get("GASNET"),
help="Path to GASNet installation directory.",
)
parser.add_argument(
"--cuda",
action=BooleanFlag,
default=os.environ.get("USE_CUDA", "0") == "1",
help="Build Legate with CUDA support.",
)
parser.add_argument(
"--with-cuda",
dest="cuda_dir",
metavar="DIR",
required=False,
default=os.environ.get("CUDA"),
help="Path to CUDA installation directory.",
)
parser.add_argument(
"--arch",
dest="arch",
action="store",
required=False,
default=None,
help="Specify the target GPU architecture.",
)
parser.add_argument(
"--openmp",
action=BooleanFlag,
default=os.environ.get("USE_OPENMP", "0") == "1",
help="Build Legate with OpenMP support.",
)
parser.add_argument(
"--march",
dest="march",
required=False,
default="native",
help="Specify the target CPU architecture.",
)
parser.add_argument(
"--llvm",
dest="llvm",
action="store_true",
required=False,
default=os.environ.get("USE_LLVM", "0") == "1",
help="Build Legate with LLVM support.",
)
parser.add_argument(
"--hdf5",
"--hdf",
dest="hdf",
action="store_true",
required=False,
default=os.environ.get("USE_HDF", "0") == "1",
help="Build Legate with HDF support.",
)
parser.add_argument(
"--spy",
dest="spy",
action="store_true",
required=False,
default=os.environ.get("USE_SPY", "0") == "1",
help="Build Legate with detailed Legion Spy enabled.",
)
parser.add_argument(
"--conduit",
dest="conduit",
action="store",
required=False,
choices=["ibv", "ucx", "aries", "mpi", "udp"],
default=os.environ.get("CONDUIT"),
help="Build Legate with specified GASNet conduit.",
)
parser.add_argument(
"--with-nccl",
dest="nccl_dir",
metavar="DIR",
required=False,
default=os.environ.get("NCCL_PATH"),
help="Path to NCCL installation directory.",
)
parser.add_argument(
"--python-lib",
dest="pylib_name",
action="store",
required=False,
default=None,
help=(
"Build Legate against the specified Python shared library. "
"Default is to use the Python library currently executing this "
"install script."
),
)
parser.add_argument(
"--cmake",
action=BooleanFlag,
default=os.environ.get("USE_CMAKE", "0") == "1",
help="Build Legate with CMake instead of GNU Make.",
)
parser.add_argument(
"--with-cmake",
dest="cmake_exe",
metavar="EXE",
required=False,
default="cmake",
help="Path to CMake executable (if not on PATH).",
)
parser.add_argument(
"--clean",
dest="clean_first",
action=BooleanFlag,
default=True,
help="Clean before build, and pull latest Legion.",
)
parser.add_argument(
"--extra",
dest="extra_flags",
action="append",
required=False,
default=[],
help="Extra flags for make command.",
)
parser.add_argument(
"-j",
dest="thread_count",
nargs="?",
type=int,
help="Number of threads used to compile.",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
required=False,
help="Enable verbose build output.",
)
parser.add_argument(
"--with-thrust",
dest="thrust_dir",
metavar="DIR",
required=False,
default=os.environ.get("THRUST_PATH"),
help="Path to Thrust installation directory. The required version of "
"Thrust is " + required_thrust_version + " or compatible. If not "
"provided, Thrust will be installed automatically.",
)
parser.add_argument(
"--legion-branch",
dest="legion_branch",
required=False,
default="control_replication",
help="Legion branch to build Legate with.",
)
args, unknown = parser.parse_known_args()
install(unknown=unknown, **vars(args))
if __name__ == "__main__":
driver()
|
import pytest
import os
import RaveEngine.projectManager as projectManager
import RaveEngine.botManager as botManager
import RaveEngine.configManager as configManager
import Utils.commandManager as commandManager
from flaky import flaky
import Utils.sad as sad
import Utils.utils as utils
@pytest.fixture(autouse=True)
def setup():
projectManager.createInitProject(createBasicModules=True)
yield
commandManager.runRmDirCommand(sad._CONFIG_DIR_NAME_)
commandManager.runRmDirCommand(sad._LOG_DIR_NAME_)
commandManager.runRmDirCommand(sad._MODULES_DIR_)
commandManager.runRmDirCommand(sad._OUTPUT_BOT_DIR_)
def data_generateHeaders():
return [sad._HEADER_TOKEN_FLAG]
def data_generateBot():
data = [(False, sad._HOSTING_HEROKU_OPTION_), (True, sad._HOSTING_HEROKU_OPTION_)]
data += [(False, sad._HOSTING_GAE_OPTION_), (True, sad._HOSTING_GAE_OPTION_)]
return data
@flaky(3,1)
@pytest.mark.parametrize('testFlag, hosting', data_generateBot())
def test_generateBot(testFlag, hosting):
projectManager.createInitProject(createBasicModules=True, hostingOption=hosting)
if not testFlag:
with pytest.raises(SystemExit) as pytest_wrapped_e:
botManager.generateBot(testFlag=testFlag)
assert pytest_wrapped_e.type == SystemExit
config = configManager.getConfig()
configManager.set(config, sad._CONFIG_RAVEGEN_SECTION_, sad._CONFIG_DEPLOY_URL_OPTION, "www.test.com")
botManager.generateBot(testFlag=testFlag)
assert os.path.exists(sad._OUTPUT_BOT_DIR_)
assert os.path.exists(sad.OUTPUT_BOT_PATH)
headers = utils._getHeaders()
if testFlag:
assert headers[sad._HEADER_TOKEN_FLAG] == sad._STR_TRUE_
else:
assert headers[sad._HEADER_TOKEN_FLAG] == sad._STR_FALSE_
@flaky(3,1)
@pytest.mark.parametrize('header', data_generateHeaders())
def test_generateHeaders(header):
botManager.generateBot()
headers = utils._getHeaders()
assert header in headers
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from conftest import Mock
import responses
class TestIP(object):
@responses.activate
def test_get_ip(self, manager):
data = Mock.mock_get('ip_address/10.1.0.101')
ip_addr = manager.get_ip('10.1.0.101')
assert type(ip_addr).__name__ == 'IPAddress'
assert ip_addr.address == '10.1.0.101'
assert ip_addr.ptr_record == 'a.ptr.record'
@responses.activate
def test_get_ips(self, manager):
data = Mock.mock_get('ip_address')
ip_addrs = manager.get_ips()
for ip_addr in ip_addrs:
assert type(ip_addr).__name__ == 'IPAddress'
@responses.activate
def test_modify_ip_oop(self, manager):
# get ip
data = Mock.mock_get('ip_address/10.1.0.101')
ip_addr = manager.get_ip('10.1.0.101')
# put ip
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr.ptr_record = 'my.ptr.record'
ip_addr.save()
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_modify_ip(self, manager):
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr = manager.modify_ip('10.1.0.101', ptr_record='my.ptr.record')
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_modify_ip(self, manager):
data = Mock.mock_put('ip_address/10.1.0.101')
ip_addr = manager.modify_ip('10.1.0.101', ptr_record='my.ptr.record')
assert ip_addr.ptr_record == 'my.ptr.record'
@responses.activate
def test_ip_delete(self, manager):
Mock.mock_delete('ip_address/10.1.0.101')
res = manager.release_ip('10.1.0.101')
assert res == {}
|
from rpython.rlib.rarithmetic import LONG_BIT, intmask, longlongmask, r_uint, r_ulonglong
from rpython.rlib.rarithmetic import ovfcheck, r_longlong, widen
from rpython.rlib.rarithmetic import most_neg_value_of_same_type
from rpython.rlib.rfloat import isinf, isnan
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.debug import make_sure_not_resized, check_regular_int
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import extregistry
import math, sys
SUPPORT_INT128 = hasattr(rffi, '__INT128_T')
BYTEORDER = sys.byteorder
# note about digit sizes:
# In division, the native integer type must be able to hold
# a sign bit plus two digits plus 1 overflow bit.
#SHIFT = (LONG_BIT // 2) - 1
if SUPPORT_INT128:
SHIFT = 63
UDIGIT_TYPE = r_ulonglong
if LONG_BIT >= 64:
UDIGIT_MASK = intmask
else:
UDIGIT_MASK = longlongmask
LONG_TYPE = rffi.__INT128_T
if LONG_BIT > SHIFT:
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
else:
STORE_TYPE = rffi.LONGLONG
UNSIGNED_TYPE = rffi.ULONGLONG
else:
SHIFT = 31
UDIGIT_TYPE = r_uint
UDIGIT_MASK = intmask
STORE_TYPE = lltype.Signed
UNSIGNED_TYPE = lltype.Unsigned
LONG_TYPE = rffi.LONGLONG
MASK = int((1 << SHIFT) - 1)
FLOAT_MULTIPLIER = float(1 << SHIFT)
# For BIGINT and INT mix.
#
# The VALID range of an int is different than a valid range of a bigint of length one.
# -1 << LONG_BIT is actually TWO digits, because they are stored without the sign.
if SHIFT == LONG_BIT - 1:
MIN_INT_VALUE = -1 << SHIFT
def int_in_valid_range(x):
if x == MIN_INT_VALUE:
return False
return True
else:
# Means we don't have INT128 on 64bit.
def int_in_valid_range(x):
if x > MASK or x < -MASK:
return False
return True
int_in_valid_range._always_inline_ = True
# Debugging digit array access.
#
# False == no checking at all
# True == check 0 <= value <= MASK
# For long multiplication, use the O(N**2) school algorithm unless
# both operands contain more than KARATSUBA_CUTOFF digits (this
# being an internal Python long digit, in base BASE).
# Karatsuba is O(N**1.585)
USE_KARATSUBA = True # set to False for comparison
if SHIFT > 31:
KARATSUBA_CUTOFF = 19
else:
KARATSUBA_CUTOFF = 38
KARATSUBA_SQUARE_CUTOFF = 2 * KARATSUBA_CUTOFF
# For exponentiation, use the binary left-to-right algorithm
# unless the exponent contains more than FIVEARY_CUTOFF digits.
# In that case, do 5 bits at a time. The potential drawback is that
# a table of 2**5 intermediate results is computed.
FIVEARY_CUTOFF = 8
def _mask_digit(x):
return UDIGIT_MASK(x & MASK)
_mask_digit._annspecialcase_ = 'specialize:argtype(0)'
def _widen_digit(x):
return rffi.cast(LONG_TYPE, x)
def _store_digit(x):
return rffi.cast(STORE_TYPE, x)
_store_digit._annspecialcase_ = 'specialize:argtype(0)'
def _load_unsigned_digit(x):
return rffi.cast(UNSIGNED_TYPE, x)
_load_unsigned_digit._always_inline_ = True
NULLDIGIT = _store_digit(0)
ONEDIGIT = _store_digit(1)
def _check_digits(l):
for x in l:
assert type(x) is type(NULLDIGIT)
assert UDIGIT_MASK(x) & MASK == UDIGIT_MASK(x)
class InvalidEndiannessError(Exception):
pass
class InvalidSignednessError(Exception):
pass
class Entry(extregistry.ExtRegistryEntry):
_about_ = _check_digits
def compute_result_annotation(self, s_list):
from rpython.annotator import model as annmodel
assert isinstance(s_list, annmodel.SomeList)
s_DIGIT = self.bookkeeper.valueoftype(type(NULLDIGIT))
assert s_DIGIT.contains(s_list.listdef.listitem.s_value)
def specialize_call(self, hop):
hop.exception_cannot_occur()
class rbigint(object):
"""This is a reimplementation of longs using a list of digits."""
_immutable_ = True
_immutable_fields_ = ["_digits"]
def __init__(self, digits=[NULLDIGIT], sign=0, size=0):
if not we_are_translated():
_check_digits(digits)
make_sure_not_resized(digits)
self._digits = digits
assert size >= 0
self.size = size or len(digits)
self.sign = sign
# __eq__ and __ne__ method exist for testingl only, they are not RPython!
def __eq__(self, other):
# NOT_RPYTHON
if not isinstance(other, rbigint):
return NotImplemented
return self.eq(other)
def __ne__(self, other):
# NOT_RPYTHON
return not (self == other)
def digit(self, x):
"""Return the x'th digit, as an int."""
return self._digits[x]
digit._always_inline_ = True
def widedigit(self, x):
"""Return the x'th digit, as a long long int if needed
to have enough room to contain two digits."""
return _widen_digit(self._digits[x])
widedigit._always_inline_ = True
def udigit(self, x):
"""Return the x'th digit, as an unsigned int."""
return _load_unsigned_digit(self._digits[x])
udigit._always_inline_ = True
def setdigit(self, x, val):
val = _mask_digit(val)
assert val >= 0
self._digits[x] = _store_digit(val)
setdigit._annspecialcase_ = 'specialize:argtype(2)'
setdigit._always_inline_ = True
def numdigits(self):
return self.size
numdigits._always_inline_ = True
@staticmethod
@jit.elidable
def fromint(intval):
# This function is marked as pure, so you must not call it and
# then modify the result.
check_regular_int(intval)
if intval < 0:
sign = -1
ival = -r_uint(intval)
elif intval > 0:
sign = 1
ival = r_uint(intval)
else:
return NULLRBIGINT
carry = ival >> SHIFT
if carry:
return rbigint([_store_digit(ival & MASK),
_store_digit(carry)], sign, 2)
else:
return rbigint([_store_digit(ival & MASK)], sign, 1)
@staticmethod
@jit.elidable
def frombool(b):
# You must not call this function and then modify the result.
if b:
return ONERBIGINT
return NULLRBIGINT
@staticmethod
def fromlong(l):
"NOT_RPYTHON"
return rbigint(*args_from_long(l))
@staticmethod
@jit.elidable
def fromfloat(dval):
""" Create a new bigint object from a float """
# This function is not marked as pure because it can raise
if isinf(dval):
raise OverflowError("cannot convert float infinity to integer")
if isnan(dval):
raise ValueError("cannot convert float NaN to integer")
return rbigint._fromfloat_finite(dval)
@staticmethod
@jit.elidable
def _fromfloat_finite(dval):
sign = 1
if dval < 0.0:
sign = -1
dval = -dval
frac, expo = math.frexp(dval) # dval = frac*2**expo; 0.0 <= frac < 1.0
if expo <= 0:
return NULLRBIGINT
ndig = (expo-1) // SHIFT + 1 # Number of 'digits' in result
v = rbigint([NULLDIGIT] * ndig, sign, ndig)
frac = math.ldexp(frac, (expo-1) % SHIFT + 1)
for i in range(ndig-1, -1, -1):
# use int(int(frac)) as a workaround for a CPython bug:
# with frac == 2147483647.0, int(frac) == 2147483647L
bits = int(int(frac))
v.setdigit(i, bits)
frac -= float(bits)
frac = math.ldexp(frac, SHIFT)
return v
@staticmethod
@jit.elidable
@specialize.argtype(0)
def fromrarith_int(i):
# This function is marked as pure, so you must not call it and
# then modify the result.
return rbigint(*args_from_rarith_int(i))
@staticmethod
@jit.elidable
def fromdecimalstr(s):
# This function is marked as elidable, so you must not call it and
# then modify the result.
return _decimalstr_to_bigint(s)
@staticmethod
@jit.elidable
def fromstr(s, base=0):
"""As string_to_int(), but ignores an optional 'l' or 'L' suffix
and returns an rbigint."""
from rpython.rlib.rstring import NumberStringParser, \
strip_spaces
s = literal = strip_spaces(s)
if (s.endswith('l') or s.endswith('L')) and base < 22:
# in base 22 and above, 'L' is a valid digit! try: long('L',22)
s = s[:-1]
parser = NumberStringParser(s, literal, base, 'long')
return rbigint._from_numberstring_parser(parser)
@staticmethod
def _from_numberstring_parser(parser):
return parse_digit_string(parser)
@staticmethod
@jit.elidable
def frombytes(s, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not s:
return NULLRBIGINT
if byteorder != BYTEORDER:
msb = ord(s[0])
itr = range(len(s)-1, -1, -1)
else:
msb = ord(s[-1])
itr = range(0, len(s))
sign = -1 if msb >= 0x80 and signed else 1
accum = _widen_digit(0)
accumbits = 0
digits = []
carry = 1
for i in itr:
c = _widen_digit(ord(s[i]))
if sign == -1:
c = (0xFF ^ c) + carry
carry = c >> 8
c &= 0xFF
accum |= c << accumbits
accumbits += 8
if accumbits >= SHIFT:
digits.append(_store_digit(intmask(accum & MASK)))
accum >>= SHIFT
accumbits -= SHIFT
if accumbits:
digits.append(_store_digit(intmask(accum)))
result = rbigint(digits[:], sign)
result._normalize()
return result
@jit.elidable
def tobytes(self, nbytes, byteorder, signed):
if byteorder not in ('big', 'little'):
raise InvalidEndiannessError()
if not signed and self.sign == -1:
raise InvalidSignednessError()
bswap = byteorder != BYTEORDER
d = _widen_digit(0)
j = 0
imax = self.numdigits()
accum = _widen_digit(0)
accumbits = 0
result = StringBuilder(nbytes)
carry = 1
for i in range(0, imax):
d = self.widedigit(i)
if self.sign == -1:
d = (d ^ MASK) + carry
carry = d >> SHIFT
d &= MASK
accum |= d << accumbits
if i == imax - 1:
# Avoid bogus 0's
s = d ^ MASK if self.sign == -1 else d
while s:
s >>= 1
accumbits += 1
else:
accumbits += SHIFT
while accumbits >= 8:
if j >= nbytes:
raise OverflowError()
j += 1
result.append(chr(accum & 0xFF))
accum >>= 8
accumbits -= 8
if accumbits:
if j >= nbytes:
raise OverflowError()
j += 1
if self.sign == -1:
# Add a sign bit
accum |= (~_widen_digit(0)) << accumbits
result.append(chr(accum & 0xFF))
if j < nbytes:
signbyte = 0xFF if self.sign == -1 else 0
result.append_multiple_char(chr(signbyte), nbytes - j)
digits = result.build()
if j == nbytes and nbytes > 0 and signed:
# If not already set, we cannot contain the sign bit
msb = digits[-1]
if (self.sign == -1) != (ord(msb) >= 0x80):
raise OverflowError()
if bswap:
# Bah, this is very inefficient. At least it's not
# quadratic.
length = len(digits)
if length >= 0:
digits = ''.join([digits[i] for i in range(length-1, -1, -1)])
return digits
def toint(self):
"""
Get an integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
if self.numdigits() > MAX_DIGITS_THAT_CAN_FIT_IN_INT:
raise OverflowError
return self._toint_helper()
@jit.elidable
def _toint_helper(self):
x = self._touint_helper()
# Haven't lost any bits, but if the sign bit is set we're in
# trouble *unless* this is the min negative number. So,
# trouble iff sign bit set && (positive || some bit set other
# than the sign bit).
sign = self.sign
if intmask(x) < 0 and (sign > 0 or (x << 1) != 0):
raise OverflowError
return intmask(intmask(x) * sign)
@jit.elidable
def tolonglong(self):
return _AsLongLong(self)
def tobool(self):
return self.sign != 0
@jit.elidable
def touint(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return self._touint_helper()
@jit.elidable
def _touint_helper(self):
x = r_uint(0)
i = self.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + self.udigit(i)
if (x >> SHIFT) != prev:
raise OverflowError("long int too large to convert to unsigned int")
i -= 1
return x
@jit.elidable
def toulonglong(self):
if self.sign == -1:
raise ValueError("cannot convert negative integer to unsigned int")
return _AsULonglong_ignore_sign(self)
@jit.elidable
def uintmask(self):
return _AsUInt_mask(self)
@jit.elidable
def ulonglongmask(self):
"""Return r_ulonglong(self), truncating."""
return _AsULonglong_mask(self)
@jit.elidable
def tofloat(self):
return _AsDouble(self)
@jit.elidable
def format(self, digits, prefix='', suffix=''):
# 'digits' is a string whose length is the base to use,
# and where each character is the corresponding digit.
return _format(self, digits, prefix, suffix)
@jit.elidable
def repr(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10, suffix="L")
return str(x) + "L"
@jit.elidable
def str(self):
try:
x = self.toint()
except OverflowError:
return self.format(BASE10)
return str(x)
@jit.elidable
def eq(self, other):
if (self.sign != other.sign or
self.numdigits() != other.numdigits()):
return False
i = 0
ld = self.numdigits()
while i < ld:
if self.digit(i) != other.digit(i):
return False
i += 1
return True
@jit.elidable
def int_eq(self, other):
""" eq with int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.eq(rbigint.fromint(other))
if self.numdigits() > 1:
return False
return (self.sign * self.digit(0)) == other
def ne(self, other):
return not self.eq(other)
def int_ne(self, other):
return not self.int_eq(other)
@jit.elidable
def lt(self, other):
if self.sign > other.sign:
return False
if self.sign < other.sign:
return True
ld1 = self.numdigits()
ld2 = other.numdigits()
if ld1 > ld2:
if other.sign > 0:
return False
else:
return True
elif ld1 < ld2:
if other.sign > 0:
return True
else:
return False
i = ld1 - 1
while i >= 0:
d1 = self.digit(i)
d2 = other.digit(i)
if d1 < d2:
if other.sign > 0:
return True
else:
return False
elif d1 > d2:
if other.sign > 0:
return False
else:
return True
i -= 1
return False
@jit.elidable
def int_lt(self, other):
""" lt where other is an int """
if not int_in_valid_range(other):
# Fallback to Long.
return self.lt(rbigint.fromint(other))
osign = 1
if other == 0:
osign = 0
elif other < 0:
osign = -1
if self.sign > osign:
return False
elif self.sign < osign:
return True
digits = self.numdigits()
if digits > 1:
if osign == 1:
return False
else:
return True
d1 = self.sign * self.digit(0)
if d1 < other:
return True
return False
def le(self, other):
return not other.lt(self)
def int_le(self, other):
# Alternative that might be faster, reimplant this. as a check with other + 1. But we got to check for overflow
# or reduce valid range.
if self.int_eq(other):
return True
return self.int_lt(other)
def gt(self, other):
return other.lt(self)
def int_gt(self, other):
return not self.int_le(other)
def ge(self, other):
return not self.lt(other)
def int_ge(self, other):
return not self.int_lt(other)
@jit.elidable
def hash(self):
return _hash(self)
@jit.elidable
def add(self, other):
if self.sign == 0:
return other
if other.sign == 0:
return self
if self.sign == other.sign:
result = _x_add(self, other)
else:
result = _x_sub(other, self)
result.sign *= other.sign
return result
@jit.elidable
def int_add(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.add(rbigint.fromint(other))
elif self.sign == 0:
return rbigint.fromint(other)
elif other == 0:
return self
sign = -1 if other < 0 else 1
if self.sign == sign:
result = _x_int_add(self, other)
else:
result = _x_int_sub(self, other)
result.sign *= -1
result.sign *= sign
return result
@jit.elidable
def sub(self, other):
if other.sign == 0:
return self
elif self.sign == 0:
return rbigint(other._digits[:other.size], -other.sign, other.size)
elif self.sign == other.sign:
result = _x_sub(self, other)
else:
result = _x_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def int_sub(self, other):
if not int_in_valid_range(other):
# Fallback to long.
return self.sub(rbigint.fromint(other))
elif other == 0:
return self
elif self.sign == 0:
return rbigint.fromint(-other)
elif self.sign == (-1 if other < 0 else 1):
result = _x_int_sub(self, other)
else:
result = _x_int_add(self, other)
result.sign *= self.sign
return result
@jit.elidable
def mul(self, b):
asize = self.numdigits()
bsize = b.numdigits()
a = self
if asize > bsize:
a, b, asize, bsize = b, a, bsize, asize
if a.sign == 0 or b.sign == 0:
return NULLRBIGINT
if asize == 1:
if a._digits[0] == NULLDIGIT:
return NULLRBIGINT
elif a._digits[0] == ONEDIGIT:
return rbigint(b._digits[:b.size], a.sign * b.sign, b.size)
elif bsize == 1:
res = b.widedigit(0) * a.widedigit(0)
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], a.sign * b.sign, 2)
else:
return rbigint([_store_digit(res & MASK)], a.sign * b.sign, 1)
result = _x_mul(a, b, a.digit(0))
elif USE_KARATSUBA:
if a is b:
i = KARATSUBA_SQUARE_CUTOFF
else:
i = KARATSUBA_CUTOFF
if asize <= i:
result = _x_mul(a, b)
"""elif 2 * asize <= bsize:
result = _k_lopsided_mul(a, b)"""
else:
result = _k_mul(a, b)
else:
result = _x_mul(a, b)
result.sign = a.sign * b.sign
return result
@jit.elidable
def int_mul(self, b):
if not int_in_valid_range(b):
# Fallback to long.
return self.mul(rbigint.fromint(b))
if self.sign == 0 or b == 0:
return NULLRBIGINT
asize = self.numdigits()
digit = abs(b)
bsign = -1 if b < 0 else 1
if digit == 1:
return rbigint(self._digits[:self.size], self.sign * bsign, asize)
elif asize == 1:
res = self.widedigit(0) * digit
carry = res >> SHIFT
if carry:
return rbigint([_store_digit(res & MASK), _store_digit(carry)], self.sign * bsign, 2)
else:
return rbigint([_store_digit(res & MASK)], self.sign * bsign, 1)
elif digit & (digit - 1) == 0:
result = self.lqshift(ptwotable[digit])
else:
result = _muladd1(self, digit)
result.sign = self.sign * bsign
return result
@jit.elidable
def truediv(self, other):
div = _bigint_true_divide(self, other)
return div
@jit.elidable
def floordiv(self, other):
if self.sign == 1 and other.numdigits() == 1 and other.sign == 1:
digit = other.digit(0)
if digit == 1:
return rbigint(self._digits[:self.size], 1, self.size)
elif digit and digit & (digit - 1) == 0:
return self.rshift(ptwotable[digit])
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
if div.sign == 0:
return ONENEGATIVERBIGINT
div = div.int_sub(1)
return div
def div(self, other):
return self.floordiv(other)
@jit.elidable
def mod(self, other):
if self.sign == 0:
return NULLRBIGINT
if other.sign != 0 and other.numdigits() == 1:
digit = other.digit(0)
if digit == 1:
return NULLRBIGINT
elif digit == 2:
modm = self.digit(0) & 1
if modm:
return ONENEGATIVERBIGINT if other.sign == -1 else ONERBIGINT
return NULLRBIGINT
elif digit & (digit - 1) == 0:
mod = self.int_and_(digit - 1)
else:
# Perform
size = self.numdigits() - 1
if size > 0:
rem = self.widedigit(size)
size -= 1
while size >= 0:
rem = ((rem << SHIFT) + self.widedigit(size)) % digit
size -= 1
else:
rem = self.digit(0) % digit
if rem == 0:
return NULLRBIGINT
mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1)
else:
div, mod = _divrem(self, other)
if mod.sign * other.sign == -1:
mod = mod.add(other)
return mod
@jit.elidable
def int_mod(self, other):
if self.sign == 0:
return NULLRBIGINT
elif not int_in_valid_range(other):
# Fallback to long.
return self.mod(rbigint.fromint(other))
elif other != 0:
digit = abs(other)
if digit == 1:
return NULLRBIGINT
elif digit == 2:
modm = self.digit(0) & 1
if modm:
return ONENEGATIVERBIGINT if other < 0 else ONERBIGINT
return NULLRBIGINT
elif digit & (digit - 1) == 0:
mod = self.int_and_(digit - 1)
else:
# Perform
size = self.numdigits() - 1
if size > 0:
rem = self.widedigit(size)
size -= 1
while size >= 0:
rem = ((rem << SHIFT) + self.widedigit(size)) % digit
size -= 1
else:
rem = self.digit(0) % digit
if rem == 0:
return NULLRBIGINT
mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 1)
else:
raise ZeroDivisionError("long division or modulo by zero")
if mod.sign * (-1 if other < 0 else 1) == -1:
mod = mod.int_add(other)
return mod
@jit.elidable
def divmod(v, w):
"""
The / and % operators are now defined in terms of divmod().
The expression a mod b has the value a - b*floor(a/b).
The _divrem function gives the remainder after division of
|a| by |b|, with the sign of a. This is also expressed
as a - b*trunc(a/b), if trunc truncates towards zero.
Some examples:
a b a rem b a mod b
13 10 3 3
-13 10 -3 7
13 -10 3 -7
-13 -10 -3 -3
So, to get from rem to mod, we have to add b if a and b
have different signs. We then subtract one from the 'div'
part of the outcome to keep the invariant intact.
"""
div, mod = _divrem(v, w)
if mod.sign * w.sign == -1:
mod = mod.add(w)
if div.sign == 0:
return ONENEGATIVERBIGINT, mod
div = div.int_sub(1)
return div, mod
@jit.elidable
def pow(a, b, c=None):
negativeOutput = False # if x<0 return negative output
# 5-ary values. If the exponent is large enough, table is
# precomputed so that table[i] == a**i % c for i in range(32).
# python translation: the table is computed when needed.
if b.sign < 0: # if exponent is negative
if c is not None:
raise TypeError(
"pow() 2nd argument "
"cannot be negative when 3rd argument specified")
# XXX failed to implement
raise ValueError("bigint pow() too negative")
size_b = b.numdigits()
if c is not None:
if c.sign == 0:
raise ValueError("pow() 3rd argument cannot be 0")
# if modulus < 0:
# negativeOutput = True
# modulus = -modulus
if c.sign < 0:
negativeOutput = True
c = c.neg()
# if modulus == 1:
# return 0
if c.numdigits() == 1 and c._digits[0] == ONEDIGIT:
return NULLRBIGINT
# Reduce base by modulus in some cases:
# 1. If base < 0. Forcing the base non-neg makes things easier.
# 2. If base is obviously larger than the modulus. The "small
# exponent" case later can multiply directly by base repeatedly,
# while the "large exponent" case multiplies directly by base 31
# times. It can be unboundedly faster to multiply by
# base % modulus instead.
# We could _always_ do this reduction, but mod() isn't cheap,
# so we only do it when it buys something.
if a.sign < 0 or a.numdigits() > c.numdigits():
a = a.mod(c)
elif b.sign == 0:
return ONERBIGINT
elif a.sign == 0:
return NULLRBIGINT
elif size_b == 1:
if b._digits[0] == NULLDIGIT:
return ONERBIGINT if a.sign == 1 else ONENEGATIVERBIGINT
elif b._digits[0] == ONEDIGIT:
return a
elif a.numdigits() == 1:
adigit = a.digit(0)
digit = b.digit(0)
if adigit == 1:
if a.sign == -1 and digit % 2:
return ONENEGATIVERBIGINT
return ONERBIGINT
elif adigit & (adigit - 1) == 0:
ret = a.lshift(((digit-1)*(ptwotable[adigit]-1)) + digit-1)
if a.sign == -1 and not digit % 2:
ret.sign = 1
return ret
# At this point a, b, and c are guaranteed non-negative UNLESS
# c is NULL, in which case a may be negative. */
z = rbigint([ONEDIGIT], 1, 1)
# python adaptation: moved macros REDUCE(X) and MULT(X, Y, result)
# into helper function result = _help_mult(x, y, c)
if size_b <= FIVEARY_CUTOFF:
# Left-to-right binary exponentiation (HAC Algorithm 14.79)
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
size_b -= 1
while size_b >= 0:
bi = b.digit(size_b)
j = 1 << (SHIFT-1)
while j != 0:
z = _help_mult(z, z, c)
if bi & j:
z = _help_mult(z, a, c)
j >>= 1
size_b -= 1
else:
# Left-to-right 5-ary exponentiation (HAC Algorithm 14.82)
# This is only useful in the case where c != None.
# z still holds 1L
table = [z] * 32
table[0] = z
for i in range(1, 32):
table[i] = _help_mult(table[i-1], a, c)
# Note that here SHIFT is not a multiple of 5. The difficulty
# is to extract 5 bits at a time from 'b', starting from the
# most significant digits, so that at the end of the algorithm
# it falls exactly to zero.
# m = max number of bits = i * SHIFT
# m+ = m rounded up to the next multiple of 5
# j = (m+) % SHIFT = (m+) - (i * SHIFT)
# (computed without doing "i * SHIFT", which might overflow)
j = size_b % 5
j = _jmapping[j]
if not we_are_translated():
assert j == (size_b*SHIFT+4)//5*5 - size_b*SHIFT
#
accum = r_uint(0)
while True:
j -= 5
if j >= 0:
index = (accum >> j) & 0x1f
else:
# 'accum' does not have enough digit.
# must get the next digit from 'b' in order to complete
if size_b == 0:
break # Done
size_b -= 1
assert size_b >= 0
bi = b.udigit(size_b)
index = ((accum << (-j)) | (bi >> (j+SHIFT))) & 0x1f
accum = bi
j += SHIFT
#
for k in range(5):
z = _help_mult(z, z, c)
if index:
z = _help_mult(z, table[index], c)
#
assert j == -5
if negativeOutput and z.sign != 0:
z = z.sub(c)
return z
@jit.elidable
def neg(self):
return rbigint(self._digits, -self.sign, self.size)
@jit.elidable
def abs(self):
if self.sign != -1:
return self
return rbigint(self._digits, 1, self.size)
@jit.elidable
def invert(self): #Implement ~x as -(x + 1)
if self.sign == 0:
return ONENEGATIVERBIGINT
ret = self.int_add(1)
ret.sign = -ret.sign
return ret
@jit.elidable
def lshift(self, int_other):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
# wordshift, remshift = divmod(int_other, SHIFT)
wordshift = int_other // SHIFT
remshift = int_other - wordshift * SHIFT
if not remshift:
# So we can avoid problems with eq, AND avoid the need for normalize.
if self.sign == 0:
return self
return rbigint([NULLDIGIT] * wordshift + self._digits, self.sign, self.size + wordshift)
oldsize = self.numdigits()
newsize = oldsize + wordshift + 1
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
accum = _widen_digit(0)
j = 0
while j < oldsize:
accum += self.widedigit(j) << remshift
z.setdigit(wordshift, accum)
accum >>= SHIFT
wordshift += 1
j += 1
newsize -= 1
assert newsize >= 0
z.setdigit(newsize, accum)
z._normalize()
return z
lshift._always_inline_ = True # It's so fast that it's always benefitial.
@jit.elidable
def lqshift(self, int_other):
" A quicker one with much less checks, int_other is valid and for the most part constant."
assert int_other > 0
oldsize = self.numdigits()
z = rbigint([NULLDIGIT] * (oldsize + 1), self.sign, (oldsize + 1))
accum = _widen_digit(0)
i = 0
while i < oldsize:
accum += self.widedigit(i) << int_other
z.setdigit(i, accum)
accum >>= SHIFT
i += 1
z.setdigit(oldsize, accum)
z._normalize()
return z
lqshift._always_inline_ = True # It's so fast that it's always benefitial.
@jit.elidable
def rshift(self, int_other, dont_invert=False):
if int_other < 0:
raise ValueError("negative shift count")
elif int_other == 0:
return self
if self.sign == -1 and not dont_invert:
a = self.invert().rshift(int_other)
return a.invert()
wordshift = int_other / SHIFT
newsize = self.numdigits() - wordshift
if newsize <= 0:
return NULLRBIGINT
loshift = int_other % SHIFT
hishift = SHIFT - loshift
z = rbigint([NULLDIGIT] * newsize, self.sign, newsize)
i = 0
while i < newsize:
newdigit = (self.digit(wordshift) >> loshift)
if i+1 < newsize:
newdigit |= (self.digit(wordshift+1) << hishift)
z.setdigit(i, newdigit)
i += 1
wordshift += 1
z._normalize()
return z
rshift._always_inline_ = 'try' # It's so fast that it's always benefitial.
@jit.elidable
def abs_rshift_and_mask(self, bigshiftcount, mask):
assert isinstance(bigshiftcount, r_ulonglong)
assert mask >= 0
wordshift = bigshiftcount / SHIFT
numdigits = self.numdigits()
if wordshift >= numdigits:
return 0
wordshift = intmask(wordshift)
loshift = intmask(intmask(bigshiftcount) - intmask(wordshift * SHIFT))
lastdigit = self.digit(wordshift) >> loshift
if mask > (MASK >> loshift) and wordshift + 1 < numdigits:
hishift = SHIFT - loshift
lastdigit |= self.digit(wordshift+1) << hishift
return lastdigit & mask
@staticmethod
def from_list_n_bits(list, nbits):
if len(list) == 0:
return NULLRBIGINT
if nbits == SHIFT:
z = rbigint(list, 1)
else:
if not (1 <= nbits < SHIFT):
raise ValueError
lllength = (r_ulonglong(len(list)) * nbits) // SHIFT
length = intmask(lllength) + 1
z = rbigint([NULLDIGIT] * length, 1)
out = 0
i = 0
accum = 0
for input in list:
accum |= (input << i)
original_i = i
i += nbits
if i > SHIFT:
z.setdigit(out, accum)
out += 1
accum = input >> (SHIFT - original_i)
i -= SHIFT
assert out < length
z.setdigit(out, accum)
z._normalize()
return z
@jit.elidable
def and_(self, other):
return _bitwise(self, '&', other)
@jit.elidable
def int_and_(self, other):
return _int_bitwise(self, '&', other)
@jit.elidable
def xor(self, other):
return _bitwise(self, '^', other)
@jit.elidable
def int_xor(self, other):
return _int_bitwise(self, '^', other)
@jit.elidable
def or_(self, other):
return _bitwise(self, '|', other)
@jit.elidable
def int_or_(self, other):
return _int_bitwise(self, '|', other)
@jit.elidable
def oct(self):
if self.sign == 0:
return '0L'
else:
return _format(self, BASE8, '0', 'L')
@jit.elidable
def hex(self):
return _format(self, BASE16, '0x', 'L')
@jit.elidable
def log(self, base):
# base is supposed to be positive or 0.0, which means we use e
if base == 10.0:
return _loghelper(math.log10, self)
ret = _loghelper(math.log, self)
if base != 0.0:
ret /= math.log(base)
return ret
def tolong(self):
"NOT_RPYTHON"
l = 0L
digits = list(self._digits)
digits.reverse()
for d in digits:
l = l << SHIFT
l += intmask(d)
return l * self.sign
def _normalize(self):
i = self.numdigits()
while i > 1 and self._digits[i - 1] == NULLDIGIT:
i -= 1
assert i > 0
if i != self.numdigits():
self.size = i
if self.numdigits() == 1 and self._digits[0] == NULLDIGIT:
self.sign = 0
self._digits = [NULLDIGIT]
_normalize._always_inline_ = True
@jit.elidable
def bit_length(self):
i = self.numdigits()
if i == 1 and self._digits[0] == NULLDIGIT:
return 0
msd = self.digit(i - 1)
msd_bits = 0
while msd >= 32:
msd_bits += 6
msd >>= 6
msd_bits += [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
][msd]
# yes, this can overflow: a huge number which fits 3 gigabytes of
# memory has around 24 gigabits!
bits = ovfcheck((i-1) * SHIFT) + msd_bits
return bits
def __repr__(self):
return "<rbigint digits=%s, sign=%s, size=%d, len=%d, %s>" % (self._digits,
self.sign, self.size, len(self._digits),
self.str())
ONERBIGINT = rbigint([ONEDIGIT], 1, 1)
ONENEGATIVERBIGINT = rbigint([ONEDIGIT], -1, 1)
NULLRBIGINT = rbigint()
_jmapping = [(5 * SHIFT) % 5,
(4 * SHIFT) % 5,
(3 * SHIFT) % 5,
(2 * SHIFT) % 5,
(1 * SHIFT) % 5]
# if the bigint has more digits than this, it cannot fit into an int
MAX_DIGITS_THAT_CAN_FIT_IN_INT = rbigint.fromint(-sys.maxint - 1).numdigits()
#_________________________________________________________________
# Helper Functions
def _help_mult(x, y, c):
"""
Multiply two values, then reduce the result:
result = X*Y % c. If c is None, skip the mod.
"""
res = x.mul(y)
# Perform a modular reduction, X = X % c, but leave X alone if c
# is NULL.
if c is not None:
res = res.mod(c)
return res
def digits_from_nonneg_long(l):
digits = []
while True:
digits.append(_store_digit(_mask_digit(l & MASK)))
l = l >> SHIFT
if not l:
return digits[:] # to make it non-resizable
digits_from_nonneg_long._annspecialcase_ = "specialize:argtype(0)"
def digits_for_most_neg_long(l):
# This helper only works if 'l' is the most negative integer of its
# type, which in base 2 looks like: 1000000..0000
digits = []
while _mask_digit(l) == 0:
digits.append(NULLDIGIT)
l = l >> SHIFT
# now 'l' looks like: ...111100000
# turn it into: ...000100000
# to drop the extra unwanted 1's introduced by the signed right shift
l = -intmask(l)
assert l & MASK == l
digits.append(_store_digit(l))
return digits[:] # to make it non-resizable
digits_for_most_neg_long._annspecialcase_ = "specialize:argtype(0)"
def args_from_rarith_int1(x):
if x > 0:
return digits_from_nonneg_long(x), 1
elif x == 0:
return [NULLDIGIT], 0
elif x != most_neg_value_of_same_type(x):
# normal case
return digits_from_nonneg_long(-x), -1
else:
# the most negative integer! hacks needed...
return digits_for_most_neg_long(x), -1
args_from_rarith_int1._annspecialcase_ = "specialize:argtype(0)"
def args_from_rarith_int(x):
return args_from_rarith_int1(widen(x))
args_from_rarith_int._annspecialcase_ = "specialize:argtype(0)"
# ^^^ specialized by the precise type of 'x', which is typically a r_xxx
# instance from rlib.rarithmetic
def args_from_long(x):
"NOT_RPYTHON"
if x >= 0:
if x == 0:
return [NULLDIGIT], 0
else:
return digits_from_nonneg_long(x), 1
else:
return digits_from_nonneg_long(-x), -1
def _x_add(a, b):
""" Add the absolute values of two bigint integers. """
size_a = a.numdigits()
size_b = b.numdigits()
# Ensure a is the larger of the two:
if size_a < size_b:
a, b = b, a
size_a, size_b = size_b, size_a
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(0)
carry = UDIGIT_TYPE(0)
while i < size_b:
carry += a.udigit(i) + b.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_int_add(a, b):
""" Add the absolute values of one bigint and one integer. """
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a + 1), 1)
i = UDIGIT_TYPE(1)
carry = a.udigit(0) + abs(b)
z.setdigit(0, carry)
carry >>= SHIFT
while i < size_a:
carry += a.udigit(i)
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
def _x_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
size_b = b.numdigits()
sign = 1
# Ensure a is the larger of the two:
if size_a < size_b:
sign = -1
a, b = b, a
size_a, size_b = size_b, size_a
elif size_a == size_b:
# Find highest digit where a and b differ:
i = size_a - 1
while i >= 0 and a.digit(i) == b.digit(i):
i -= 1
if i < 0:
return NULLRBIGINT
if a.digit(i) < b.digit(i):
sign = -1
a, b = b, a
size_a = size_b = i+1
z = rbigint([NULLDIGIT] * size_a, sign, size_a)
borrow = UDIGIT_TYPE(0)
i = _load_unsigned_digit(0)
while i < size_b:
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(i) - b.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
i += 1
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
def _x_int_sub(a, b):
""" Subtract the absolute values of two integers. """
size_a = a.numdigits()
bdigit = abs(b)
if size_a == 1:
# Find highest digit where a and b differ:
adigit = a.digit(0)
if adigit == bdigit:
return NULLRBIGINT
return rbigint.fromint(adigit - bdigit)
z = rbigint([NULLDIGIT] * size_a, 1, size_a)
i = _load_unsigned_digit(1)
# The following assumes unsigned arithmetic
# works modulo 2**N for some N>SHIFT.
borrow = a.udigit(0) - bdigit
z.setdigit(0, borrow)
borrow >>= SHIFT
#borrow &= 1 # Keep only one sign bit
while i < size_a:
borrow = a.udigit(i) - borrow
z.setdigit(i, borrow)
borrow >>= SHIFT
#borrow &= 1
i += 1
assert borrow == 0
z._normalize()
return z
# A neat little table of power of twos.
ptwotable = {}
for x in range(SHIFT-1):
ptwotable[r_longlong(2 << x)] = x+1
ptwotable[r_longlong(-2 << x)] = x+1
def _x_mul(a, b, digit=0):
"""
Grade school multiplication, ignoring the signs.
Returns the absolute value of the product, or None if error.
"""
size_a = a.numdigits()
size_b = b.numdigits()
if a is b:
# Efficient squaring per HAC, Algorithm 14.16:
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
# Gives slightly less than a 2x speedup when a == b,
# via exploiting that each entry in the multiplication
# pyramid appears twice (except for the size_a squares).
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
while i < size_a:
f = a.widedigit(i)
pz = i << 1
pa = i + 1
carry = z.widedigit(pz) + f * f
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
assert carry <= MASK
# Now f is added in twice in each column of the
# pyramid it appears. Same as adding f<<1 once.
f <<= 1
while pa < size_a:
carry += z.widedigit(pz) + a.widedigit(pa) * f
pa += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
carry += z.widedigit(pz)
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
assert (carry >> SHIFT) == 0
i += 1
z._normalize()
return z
elif digit:
if digit & (digit - 1) == 0:
return b.lqshift(ptwotable[digit])
# Even if it's not power of two it can still be useful.
return _muladd1(b, digit)
# a is not b
# use the following identity to reduce the number of operations
# a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n
z = rbigint([NULLDIGIT] * (size_a + size_b), 1)
i = UDIGIT_TYPE(0)
size_a1 = UDIGIT_TYPE(size_a - 1)
size_b1 = UDIGIT_TYPE(size_b - 1)
while i < size_a1:
f0 = a.widedigit(i)
f1 = a.widedigit(i + 1)
pz = i
carry = z.widedigit(pz) + b.widedigit(0) * f0
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j = UDIGIT_TYPE(0)
while j < size_b1:
# this operation does not overflow using
# SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it
# carry and z.widedigit(pz) are less than 2**(B - 1);
# b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so
# carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 +
# b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1
carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \
b.widedigit(j) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
j += 1
# carry < 2**(B + 1) - 2
carry += z.widedigit(pz) + b.widedigit(size_b1) * f1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
# carry < 4
if carry:
z.setdigit(pz, carry)
assert (carry >> SHIFT) == 0
i += 2
if size_a & 1:
pz = size_a1
f = a.widedigit(pz)
pb = 0
carry = _widen_digit(0)
while pb < size_b:
carry += z.widedigit(pz) + b.widedigit(pb) * f
pb += 1
z.setdigit(pz, carry)
pz += 1
carry >>= SHIFT
if carry:
z.setdigit(pz, z.widedigit(pz) + carry)
z._normalize()
return z
def _kmul_split(n, size):
"""
A helper for Karatsuba multiplication (k_mul).
Takes a bigint "n" and an integer "size" representing the place to
split, and sets low and high such that abs(n) == (high << size) + low,
viewing the shift as being by digits. The sign bit is ignored, and
the return values are >= 0.
"""
size_n = n.numdigits()
size_lo = min(size_n, size)
# We use "or" her to avoid having a check where list can be empty in _normalize.
lo = rbigint(n._digits[:size_lo] or [NULLDIGIT], 1)
hi = rbigint(n._digits[size_lo:n.size] or [NULLDIGIT], 1)
lo._normalize()
hi._normalize()
return hi, lo
def _k_mul(a, b):
"""
Karatsuba multiplication. Ignores the input signs, and returns the
absolute value of the product (or raises if error).
See Knuth Vol. 2 Chapter 4.3.3 (Pp. 294-295).
"""
asize = a.numdigits()
bsize = b.numdigits()
# (ah*X+al)(bh*X+bl) = ah*bh*X*X + (ah*bl + al*bh)*X + al*bl
# Let k = (ah+al)*(bh+bl) = ah*bl + al*bh + ah*bh + al*bl
# Then the original product is
# ah*bh*X*X + (k - ah*bh - al*bl)*X + al*bl
# By picking X to be a power of 2, "*X" is just shifting, and it's
# been reduced to 3 multiplies on numbers half the size.
# Split a & b into hi & lo pieces.
shift = bsize >> 1
ah, al = _kmul_split(a, shift)
if ah.sign == 0:
# This may happen now that _k_lopsided_mul ain't catching it.
return _x_mul(a, b)
#assert ah.sign == 1 # the split isn't degenerate
if a is b:
bh = ah
bl = al
else:
bh, bl = _kmul_split(b, shift)
# The plan:
# 1. Allocate result space (asize + bsize digits: that's always
# enough).
# 2. Compute ah*bh, and copy into result at 2*shift.
# 3. Compute al*bl, and copy into result at 0. Note that this
# can't overlap with #2.
# 4. Subtract al*bl from the result, starting at shift. This may
# underflow (borrow out of the high digit), but we don't care:
# we're effectively doing unsigned arithmetic mod
# BASE**(sizea + sizeb), and so long as the *final* result fits,
# borrows and carries out of the high digit can be ignored.
# 5. Subtract ah*bh from the result, starting at shift.
# 6. Compute (ah+al)*(bh+bl), and add it into the result starting
# at shift.
# 1. Allocate result space.
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
# 2. t1 <- ah*bh, and copy into high digits of result.
t1 = ah.mul(bh)
assert t1.sign >= 0
assert 2*shift + t1.numdigits() <= ret.numdigits()
for i in range(t1.numdigits()):
ret._digits[2*shift + i] = t1._digits[i]
# Zero-out the digits higher than the ah*bh copy. */
## ignored, assuming that we initialize to zero
##i = ret->ob_size - 2*shift - t1->ob_size;
##if (i)
## memset(ret->ob_digit + 2*shift + t1->ob_size, 0,
## i * sizeof(digit));
# 3. t2 <- al*bl, and copy into the low digits.
t2 = al.mul(bl)
assert t2.sign >= 0
assert t2.numdigits() <= 2*shift # no overlap with high digits
for i in range(t2.numdigits()):
ret._digits[i] = t2._digits[i]
# Zero out remaining digits.
## ignored, assuming that we initialize to zero
##i = 2*shift - t2->ob_size; /* number of uninitialized digits */
##if (i)
## memset(ret->ob_digit + t2->ob_size, 0, i * sizeof(digit));
# 4 & 5. Subtract ah*bh (t1) and al*bl (t2). We do al*bl first
# because it's fresher in cache.
i = ret.numdigits() - shift # # digits after shift
_v_isub(ret, shift, i, t2, t2.numdigits())
_v_isub(ret, shift, i, t1, t1.numdigits())
# 6. t3 <- (ah+al)(bh+bl), and add into result.
t1 = _x_add(ah, al)
if a is b:
t2 = t1
else:
t2 = _x_add(bh, bl)
t3 = t1.mul(t2)
assert t3.sign >= 0
# Add t3. It's not obvious why we can't run out of room here.
# See the (*) comment after this function.
_v_iadd(ret, shift, i, t3, t3.numdigits())
ret._normalize()
return ret
""" (*) Why adding t3 can't "run out of room" above.
Let f(x) mean the floor of x and c(x) mean the ceiling of x. Some facts
to start with:
1. For any integer i, i = c(i/2) + f(i/2). In particular,
bsize = c(bsize/2) + f(bsize/2).
2. shift = f(bsize/2)
3. asize <= bsize
4. Since we call k_lopsided_mul if asize*2 <= bsize, asize*2 > bsize in this
routine, so asize > bsize/2 >= f(bsize/2) in this routine.
We allocated asize + bsize result digits, and add t3 into them at an offset
of shift. This leaves asize+bsize-shift allocated digit positions for t3
to fit into, = (by #1 and #2) asize + f(bsize/2) + c(bsize/2) - f(bsize/2) =
asize + c(bsize/2) available digit positions.
bh has c(bsize/2) digits, and bl at most f(size/2) digits. So bh+hl has
at most c(bsize/2) digits + 1 bit.
If asize == bsize, ah has c(bsize/2) digits, else ah has at most f(bsize/2)
digits, and al has at most f(bsize/2) digits in any case. So ah+al has at
most (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 1 bit.
The product (ah+al)*(bh+bl) therefore has at most
c(bsize/2) + (asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits
and we have asize + c(bsize/2) available digit positions. We need to show
this is always enough. An instance of c(bsize/2) cancels out in both, so
the question reduces to whether asize digits is enough to hold
(asize == bsize ? c(bsize/2) : f(bsize/2)) digits + 2 bits. If asize < bsize,
then we're asking whether asize digits >= f(bsize/2) digits + 2 bits. By #4,
asize is at least f(bsize/2)+1 digits, so this in turn reduces to whether 1
digit is enough to hold 2 bits. This is so since SHIFT=15 >= 2. If
asize == bsize, then we're asking whether bsize digits is enough to hold
c(bsize/2) digits + 2 bits, or equivalently (by #1) whether f(bsize/2) digits
is enough to hold 2 bits. This is so if bsize >= 2, which holds because
bsize >= KARATSUBA_CUTOFF >= 2.
Note that since there's always enough room for (ah+al)*(bh+bl), and that's
clearly >= each of ah*bh and al*bl, there's always enough room to subtract
ah*bh and al*bl too.
"""
def _k_lopsided_mul(a, b):
# Not in use anymore, only account for like 1% performance. Perhaps if we
# Got rid of the extra list allocation this would be more effective.
"""
b has at least twice the digits of a, and a is big enough that Karatsuba
would pay off *if* the inputs had balanced sizes. View b as a sequence
of slices, each with a->ob_size digits, and multiply the slices by a,
one at a time. This gives k_mul balanced inputs to work with, and is
also cache-friendly (we compute one double-width slice of the result
at a time, then move on, never bactracking except for the helpful
single-width slice overlap between successive partial sums).
"""
asize = a.numdigits()
bsize = b.numdigits()
# nbdone is # of b digits already multiplied
assert asize > KARATSUBA_CUTOFF
assert 2 * asize <= bsize
# Allocate result space, and zero it out.
ret = rbigint([NULLDIGIT] * (asize + bsize), 1)
# Successive slices of b are copied into bslice.
#bslice = rbigint([0] * asize, 1)
# XXX we cannot pre-allocate, see comments below!
# XXX prevent one list from being created.
bslice = rbigint(sign=1)
nbdone = 0
while bsize > 0:
nbtouse = min(bsize, asize)
# Multiply the next slice of b by a.
#bslice.digits[:nbtouse] = b.digits[nbdone : nbdone + nbtouse]
# XXX: this would be more efficient if we adopted CPython's
# way to store the size, instead of resizing the list!
# XXX change the implementation, encoding length via the sign.
bslice._digits = b._digits[nbdone : nbdone + nbtouse]
bslice.size = nbtouse
product = _k_mul(a, bslice)
# Add into result.
_v_iadd(ret, nbdone, ret.numdigits() - nbdone,
product, product.numdigits())
bsize -= nbtouse
nbdone += nbtouse
ret._normalize()
return ret
def _inplace_divrem1(pout, pin, n, size=0):
"""
Divide bigint pin by non-zero digit n, storing quotient
in pout, and returning the remainder. It's OK for pin == pout on entry.
"""
rem = _widen_digit(0)
assert n > 0 and n <= MASK
if not size:
size = pin.numdigits()
size -= 1
while size >= 0:
rem = (rem << SHIFT) | pin.widedigit(size)
hi = rem // n
pout.setdigit(size, hi)
rem -= hi * n
size -= 1
return rffi.cast(lltype.Signed, rem)
def _divrem1(a, n):
"""
Divide a bigint integer by a digit, returning both the quotient
and the remainder as a tuple.
The sign of a is ignored; n should not be zero.
"""
assert n > 0 and n <= MASK
size = a.numdigits()
z = rbigint([NULLDIGIT] * size, 1, size)
rem = _inplace_divrem1(z, a, n)
z._normalize()
return z, rem
def _v_iadd(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by adding y.digits[0:m] to it. Carries are propagated as far as
x[m-1], and the remaining carry (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
carry = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
carry += x.udigit(i) + y.udigit(i-xofs)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
iend = xofs + m
while carry and i < iend:
carry += x.udigit(i)
x.setdigit(i, carry)
carry >>= SHIFT
i += 1
return carry
def _v_isub(x, xofs, m, y, n):
"""
x and y are rbigints, m >= n required. x.digits[0:n] is modified in place,
by substracting y.digits[0:m] to it. Borrows are propagated as
far as x[m-1], and the remaining borrow (0 or 1) is returned.
Python adaptation: x is addressed relative to xofs!
"""
borrow = UDIGIT_TYPE(0)
assert m >= n
i = _load_unsigned_digit(xofs)
iend = xofs + n
while i < iend:
borrow = x.udigit(i) - y.udigit(i-xofs) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1 # keep only 1 sign bit
i += 1
iend = xofs + m
while borrow and i < iend:
borrow = x.udigit(i) - borrow
x.setdigit(i, borrow)
borrow >>= SHIFT
borrow &= 1
i += 1
return borrow
def _muladd1(a, n, extra=0):
"""Multiply by a single digit and add a single digit, ignoring the sign.
"""
size_a = a.numdigits()
z = rbigint([NULLDIGIT] * (size_a+1), 1)
assert extra & MASK == extra
carry = _widen_digit(extra)
i = 0
while i < size_a:
carry += a.widedigit(i) * n
z.setdigit(i, carry)
carry >>= SHIFT
i += 1
z.setdigit(i, carry)
z._normalize()
return z
_muladd1._annspecialcase_ = "specialize:argtype(2)"
def _v_lshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits left, with 0 <= d < SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the top.
"""
carry = 0
assert 0 <= d and d < SHIFT
i = 0
while i < m:
acc = a.widedigit(i) << d | carry
z.setdigit(i, acc)
carry = acc >> SHIFT
i += 1
return carry
def _v_rshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits right, with 0 <= d < PyLong_SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the bottom.
"""
carry = _widen_digit(0)
acc = _widen_digit(0)
mask = (1 << d) - 1
assert 0 <= d and d < SHIFT
i = m-1
while i >= 0:
acc = (carry << SHIFT) | a.widedigit(i)
carry = acc & mask
z.setdigit(i, acc >> d)
i -= 1
return carry
def _x_divrem(v1, w1):
""" Unsigned bigint division with remainder -- the algorithm """
size_v = v1.numdigits()
size_w = w1.numdigits()
assert size_v >= size_w and size_w > 1
v = rbigint([NULLDIGIT] * (size_v + 1), 1, size_v + 1)
w = rbigint([NULLDIGIT] * size_w, 1, size_w)
""" normalize: shift w1 left so that its top digit is >= PyLong_BASE/2.
shift v1 left by the same amount. Results go into w and v. """
d = SHIFT - bits_in_digit(w1.digit(abs(size_w-1)))
carry = _v_lshift(w, w1, size_w, d)
assert carry == 0
carry = _v_lshift(v, v1, size_v, d)
if carry != 0 or v.digit(abs(size_v-1)) >= w.digit(abs(size_w-1)):
v.setdigit(size_v, carry)
size_v += 1
""" Now v->ob_digit[size_v-1] < w->ob_digit[size_w-1], so quotient has
at most (and usually exactly) k = size_v - size_w digits. """
k = size_v - size_w
if k == 0:
# We can't use v1, nor NULLRBIGINT here as some function modify the result.
assert _v_rshift(w, v, size_w, d) == 0
w._normalize()
return rbigint([NULLDIGIT]), w
assert k > 0
a = rbigint([NULLDIGIT] * k, 1, k)
wm1 = w.widedigit(abs(size_w-1))
wm2 = w.widedigit(abs(size_w-2))
j = size_v - 1
k -= 1
while k >= 0:
assert j >= 0
""" inner loop: divide vk[0:size_w+1] by w0[0:size_w], giving
single-digit quotient q, remainder in vk[0:size_w]. """
# estimate quotient digit q; may overestimate by 1 (rare)
if j >= size_v:
vtop = 0
else:
vtop = v.widedigit(j)
assert vtop <= wm1
vv = (vtop << SHIFT) | v.widedigit(abs(j-1))
q = vv / wm1
r = vv - wm1 * q
while wm2 * q > ((r << SHIFT) | v.widedigit(abs(j-2))):
q -= 1
r += wm1
#assert q <= MASK+1, We need to compare to BASE <=, but ehm, it gives a buildin long error. So we ignore this.
# subtract q*w0[0:size_w] from vk[0:size_w+1]
zhi = 0
i = 0
while i < size_w:
z = v.widedigit(k+i) + zhi - q * w.widedigit(i)
v.setdigit(k+i, z)
zhi = z >> SHIFT
i += 1
# add w back if q was too large (this branch taken rarely)
if vtop + zhi < 0:
carry = UDIGIT_TYPE(0)
i = 0
while i < size_w:
carry += v.udigit(k+i) + w.udigit(i)
v.setdigit(k+i, carry)
carry >>= SHIFT
i += 1
q -= 1
# store quotient digit
a.setdigit(k, q)
k -= 1
j -= 1
carry = _v_rshift(w, v, size_w, d)
assert carry == 0
a._normalize()
w._normalize()
return a, w
def _divrem(a, b):
""" Long division with remainder, top-level routine """
size_a = a.numdigits()
size_b = b.numdigits()
if b.sign == 0:
raise ZeroDivisionError("long division or modulo by zero")
if (size_a < size_b or
(size_a == size_b and
a.digit(abs(size_a-1)) < b.digit(abs(size_b-1)))):
# |a| < |b|
return NULLRBIGINT, a# result is 0
if size_b == 1:
z, urem = _divrem1(a, b.digit(0))
rem = rbigint([_store_digit(urem)], int(urem != 0), 1)
else:
z, rem = _x_divrem(a, b)
# Set the signs.
# The quotient z has the sign of a*b;
# the remainder r has the sign of a,
# so a = b*z + r.
if a.sign != b.sign:
z.sign = - z.sign
if a.sign < 0 and rem.sign != 0:
rem.sign = - rem.sign
return z, rem
# ______________ conversions to double _______________
def _AsScaledDouble(v):
"""
NBITS_WANTED should be > the number of bits in a double's precision,
but small enough so that 2**NBITS_WANTED is within the normal double
range. nbitsneeded is set to 1 less than that because the most-significant
Python digit contains at least 1 significant bit, but we don't want to
bother counting them (catering to the worst case cheaply).
57 is one more than VAX-D double precision; I (Tim) don't know of a double
format with more precision than that; it's 1 larger so that we add in at
least one round bit to stand in for the ignored least-significant bits.
"""
NBITS_WANTED = 57
if v.sign == 0:
return 0.0, 0
i = v.numdigits() - 1
sign = v.sign
x = float(v.digit(i))
nbitsneeded = NBITS_WANTED - 1
# Invariant: i Python digits remain unaccounted for.
while i > 0 and nbitsneeded > 0:
i -= 1
x = x * FLOAT_MULTIPLIER + float(v.digit(i))
nbitsneeded -= SHIFT
# There are i digits we didn't shift in. Pretending they're all
# zeroes, the true value is x * 2**(i*SHIFT).
exponent = i
assert x > 0.0
return x * sign, exponent
##def ldexp(x, exp):
## assert type(x) is float
## lb1 = LONG_BIT - 1
## multiplier = float(1 << lb1)
## while exp >= lb1:
## x *= multiplier
## exp -= lb1
## if exp:
## x *= float(1 << exp)
## return x
# note that math.ldexp checks for overflows,
# while the C ldexp is not guaranteed to do.
# XXX make sure that we don't ignore this!
# YYY no, we decided to do ignore this!
@jit.dont_look_inside
def _AsDouble(n):
""" Get a C double from a bigint object. """
# This is a "correctly-rounded" version from Python 2.7.
#
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG # 53 for IEEE 754 binary64
DBL_MAX_EXP = rfloat.DBL_MAX_EXP # 1024 for IEEE 754 binary64
assert DBL_MANT_DIG < r_ulonglong.BITS
# Reduce to case n positive.
sign = n.sign
if sign == 0:
return 0.0
elif sign < 0:
n = n.neg()
# Find exponent: 2**(exp - 1) <= n < 2**exp
exp = n.bit_length()
# Get top DBL_MANT_DIG + 2 significant bits of n, with a 'sticky'
# last bit: that is, the least significant bit of the result is 1
# iff any of the shifted-out bits is set.
shift = DBL_MANT_DIG + 2 - exp
if shift >= 0:
q = _AsULonglong_mask(n) << shift
if not we_are_translated():
assert q == n.tolong() << shift # no masking actually done
else:
shift = -shift
n2 = n.rshift(shift)
q = _AsULonglong_mask(n2)
if not we_are_translated():
assert q == n2.tolong() # no masking actually done
if not n.eq(n2.lshift(shift)):
q |= 1
# Now remove the excess 2 bits, rounding to nearest integer (with
# ties rounded to even).
q = (q >> 2) + r_uint((bool(q & 2) and bool(q & 5)))
if exp > DBL_MAX_EXP or (exp == DBL_MAX_EXP and
q == r_ulonglong(1) << DBL_MANT_DIG):
raise OverflowError("integer too large to convert to float")
ad = math.ldexp(float(q), exp - DBL_MANT_DIG)
if sign < 0:
ad = -ad
return ad
def _loghelper(func, arg):
"""
A decent logarithm is easy to compute even for huge bigints, but libm can't
do that by itself -- loghelper can. func is log or log10.
Note that overflow isn't possible: a bigint can contain
no more than INT_MAX * SHIFT bits, so has value certainly less than
2**(2**64 * 2**16) == 2**2**80, and log2 of that is 2**80, which is
small enough to fit in an IEEE single. log and log10 are even smaller.
"""
x, e = _AsScaledDouble(arg)
if x <= 0.0:
raise ValueError
# Value is ~= x * 2**(e*SHIFT), so the log ~=
# log(x) + log(2) * e * SHIFT.
# CAUTION: e*SHIFT may overflow using int arithmetic,
# so force use of double. */
return func(x) + (e * float(SHIFT) * func(2.0))
_loghelper._annspecialcase_ = 'specialize:arg(0)'
# ____________________________________________________________
BASE_AS_FLOAT = float(1 << SHIFT) # note that it may not fit an int
BitLengthTable = ''.join(map(chr, [
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]))
def bits_in_digit(d):
# returns the unique integer k such that 2**(k-1) <= d <
# 2**k if d is nonzero, else 0.
d_bits = 0
while d >= 32:
d_bits += 6
d >>= 6
d_bits += ord(BitLengthTable[d])
return d_bits
def _truediv_result(result, negate):
if negate:
result = -result
return result
def _truediv_overflow():
raise OverflowError("integer division result too large for a float")
def _bigint_true_divide(a, b):
# A longish method to obtain the floating-point result with as much
# precision as theoretically possible. The code is almost directly
# copied from CPython. See there (Objects/longobject.c,
# long_true_divide) for detailled comments. Method in a nutshell:
#
# 0. reduce to case a, b > 0; filter out obvious underflow/overflow
# 1. choose a suitable integer 'shift'
# 2. use integer arithmetic to compute x = floor(2**-shift*a/b)
# 3. adjust x for correct rounding
# 4. convert x to a double dx with the same value
# 5. return ldexp(dx, shift).
from rpython.rlib import rfloat
DBL_MANT_DIG = rfloat.DBL_MANT_DIG # 53 for IEEE 754 binary64
DBL_MAX_EXP = rfloat.DBL_MAX_EXP # 1024 for IEEE 754 binary64
DBL_MIN_EXP = rfloat.DBL_MIN_EXP
MANT_DIG_DIGITS = DBL_MANT_DIG // SHIFT
MANT_DIG_BITS = DBL_MANT_DIG % SHIFT
# Reduce to case where a and b are both positive.
negate = (a.sign < 0) ^ (b.sign < 0)
if not b.tobool():
raise ZeroDivisionError("long division or modulo by zero")
if not a.tobool():
return _truediv_result(0.0, negate)
a_size = a.numdigits()
b_size = b.numdigits()
# Fast path for a and b small (exactly representable in a double).
# Relies on floating-point division being correctly rounded; results
# may be subject to double rounding on x86 machines that operate with
# the x87 FPU set to 64-bit precision.
a_is_small = (a_size <= MANT_DIG_DIGITS or
(a_size == MANT_DIG_DIGITS+1 and
a.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
b_is_small = (b_size <= MANT_DIG_DIGITS or
(b_size == MANT_DIG_DIGITS+1 and
b.digit(MANT_DIG_DIGITS) >> MANT_DIG_BITS == 0))
if a_is_small and b_is_small:
a_size -= 1
da = float(a.digit(a_size))
while True:
a_size -= 1
if a_size < 0:
break
da = da * BASE_AS_FLOAT + a.digit(a_size)
b_size -= 1
db = float(b.digit(b_size))
while True:
b_size -= 1
if b_size < 0:
break
db = db * BASE_AS_FLOAT + b.digit(b_size)
return _truediv_result(da / db, negate)
# Catch obvious cases of underflow and overflow
diff = a_size - b_size
if diff > sys.maxint/SHIFT - 1:
return _truediv_overflow() # Extreme overflow
elif diff < 1 - sys.maxint/SHIFT:
return _truediv_result(0.0, negate) # Extreme underflow
# Next line is now safe from overflowing integers
diff = (diff * SHIFT + bits_in_digit(a.digit(a_size - 1)) -
bits_in_digit(b.digit(b_size - 1)))
# Now diff = a_bits - b_bits.
if diff > DBL_MAX_EXP:
return _truediv_overflow()
elif diff < DBL_MIN_EXP - DBL_MANT_DIG - 1:
return _truediv_result(0.0, negate)
# Choose value for shift; see comments for step 1 in CPython.
shift = max(diff, DBL_MIN_EXP) - DBL_MANT_DIG - 2
inexact = False
# x = abs(a * 2**-shift)
if shift <= 0:
x = a.lshift(-shift)
else:
x = a.rshift(shift, dont_invert=True)
# set inexact if any of the bits shifted out is nonzero
if not a.eq(x.lshift(shift)):
inexact = True
# x //= b. If the remainder is nonzero, set inexact.
x, rem = _divrem(x, b)
if rem.tobool():
inexact = True
assert x.tobool() # result of division is never zero
x_size = x.numdigits()
x_bits = (x_size-1)*SHIFT + bits_in_digit(x.digit(x_size-1))
# The number of extra bits that have to be rounded away.
extra_bits = max(x_bits, DBL_MIN_EXP - shift) - DBL_MANT_DIG
assert extra_bits == 2 or extra_bits == 3
# Round by remembering a modified copy of the low digit of x
mask = r_uint(1 << (extra_bits - 1))
low = x.udigit(0) | r_uint(inexact)
if (low & mask) != 0 and (low & (3*mask-1)) != 0:
low += mask
x_digit_0 = low & ~(mask-1)
# Convert x to a double dx; the conversion is exact.
x_size -= 1
dx = 0.0
while x_size > 0:
dx += x.digit(x_size)
dx *= BASE_AS_FLOAT
x_size -= 1
dx += x_digit_0
# Check whether ldexp result will overflow a double.
if (shift + x_bits >= DBL_MAX_EXP and
(shift + x_bits > DBL_MAX_EXP or dx == math.ldexp(1.0, x_bits))):
return _truediv_overflow()
return _truediv_result(math.ldexp(dx, shift), negate)
# ____________________________________________________________
BASE8 = '01234567'
BASE10 = '0123456789'
BASE16 = '0123456789abcdef'
def _format_base2_notzero(a, digits, prefix='', suffix=''):
base = len(digits)
# JRH: special case for power-of-2 bases
accum = 0
accumbits = 0 # # of bits in accum
basebits = 0
i = base
while i > 1:
basebits += 1
i >>= 1
# Compute a rough upper bound for the length of the string
size_a = a.numdigits()
i = 5 + len(prefix) + len(suffix) + (size_a*SHIFT + basebits-1) // basebits
result = [chr(0)] * i
next_char_index = i
j = len(suffix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = suffix[j]
i = 0
while i < size_a:
accum |= a.widedigit(i) << accumbits
accumbits += SHIFT
assert accumbits >= basebits
while 1:
cdigit = intmask(accum & (base - 1))
next_char_index -= 1
assert next_char_index >= 0
result[next_char_index] = digits[cdigit]
accumbits -= basebits
accum >>= basebits
if i < size_a - 1:
if accumbits < basebits:
break
else:
if accum <= 0:
break
i += 1
j = len(prefix)
while j > 0:
next_char_index -= 1
j -= 1
result[next_char_index] = prefix[j]
if a.sign < 0:
next_char_index -= 1
result[next_char_index] = '-'
assert next_char_index >= 0 # otherwise, buffer overflow (this is also a
# hint for the annotator for the slice below)
return ''.join(result[next_char_index:])
class _PartsCache(object):
def __init__(self):
# 36 - 3, because bases 0, 1 make no sense
# and 2 is handled differently
self.parts_cache = [None] * 34
self.mindigits = [0] * 34
for i in range(34):
base = i + 3
mindigits = 1
while base ** mindigits < sys.maxint:
mindigits += 1
mindigits -= 1
self.mindigits[i] = mindigits
def get_cached_parts(self, base):
index = base - 3
res = self.parts_cache[index]
if res is None:
rbase = rbigint.fromint(base)
part = rbase.pow(rbigint.fromint(self.mindigits[index]))
res = [part]
self.parts_cache[base - 3] = res
return res
def get_mindigits(self, base):
return self.mindigits[base - 3]
_parts_cache = _PartsCache()
def _format_int_general(val, digits):
base = len(digits)
out = []
while val:
out.append(digits[val % base])
val //= base
out.reverse()
return "".join(out)
def _format_int10(val, digits):
return str(val)
@specialize.arg(7)
def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int):
# bottomed out with min_digit sized pieces
# use str of ints
if i < 0:
# this checks whether any digit has been appended yet
if output.getlength() == size_prefix:
if x.sign != 0:
s = _format_int(x.toint(), digits)
output.append(s)
else:
s = _format_int(x.toint(), digits)
output.append_multiple_char(digits[0], mindigits - len(s))
output.append(s)
else:
top, bot = x.divmod(pts[i]) # split the number
_format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
_format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int)
def _format(x, digits, prefix='', suffix=''):
if x.sign == 0:
return prefix + "0" + suffix
base = len(digits)
assert base >= 2 and base <= 36
if (base & (base - 1)) == 0:
return _format_base2_notzero(x, digits, prefix, suffix)
negative = x.sign < 0
if negative:
x = x.neg()
rbase = rbigint.fromint(base)
two = rbigint.fromint(2)
pts = _parts_cache.get_cached_parts(base)
mindigits = _parts_cache.get_mindigits(base)
stringsize = mindigits
startindex = 0
for startindex, part in enumerate(pts):
if not part.lt(x):
break
stringsize *= 2 # XXX can this overflow on 32 bit?
else:
# not enough parts computed yet
while pts[-1].lt(x):
pts.append(pts[-1].pow(two))
stringsize *= 2
startindex = len(pts) - 1
# remove first base**2**i greater than x
startindex -= 1
output = StringBuilder(stringsize)
if negative:
output.append('-')
output.append(prefix)
if digits == BASE10:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int10)
else:
_format_recursive(
x, startindex, output, pts, digits, output.getlength(), mindigits,
_format_int_general)
output.append(suffix)
return output.build()
def _bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b.sign < 0:
b = b.invert()
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
# JRH: The original logic here was to allocate the result value (z)
# as the longer of the two operands. However, there are some cases
# where the result is guaranteed to be shorter than that: AND of two
# positives, OR of two negatives: use the shorter number. AND with
# mixed signs: use the positive number. OR with mixed signs: use the
# negative number. After the transformations above, op will be '&'
# iff one of these cases applies, and mask will be non-0 for operands
# whose length should be ignored.
size_a = a.numdigits()
size_b = b.numdigits()
if op == '&':
if maska:
size_z = size_b
else:
if maskb:
size_z = size_a
else:
size_z = min(size_a, size_b)
else:
size_z = max(size_a, size_b)
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i < size_b:
digb = b.digit(i) ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
_bitwise._annspecialcase_ = "specialize:arg(1)"
def _int_bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
if not int_in_valid_range(b):
# Fallback to long.
return _bitwise(a, op, rbigint.fromint(b))
if a.sign < 0:
a = a.invert()
maska = MASK
else:
maska = 0
if b < 0:
b = ~b
maskb = MASK
else:
maskb = 0
negz = 0
if op == '^':
if maska != maskb:
maska ^= MASK
negz = -1
elif op == '&':
if maska and maskb:
op = '|'
maska ^= MASK
maskb ^= MASK
negz = -1
elif op == '|':
if maska or maskb:
op = '&'
maska ^= MASK
maskb ^= MASK
negz = -1
# JRH: The original logic here was to allocate the result value (z)
# as the longer of the two operands. However, there are some cases
# where the result is guaranteed to be shorter than that: AND of two
# positives, OR of two negatives: use the shorter number. AND with
# mixed signs: use the positive number. OR with mixed signs: use the
# negative number. After the transformations above, op will be '&'
# iff one of these cases applies, and mask will be non-0 for operands
# whose length should be ignored.
size_a = a.numdigits()
if op == '&':
if maska:
size_z = 1
else:
if maskb:
size_z = size_a
else:
size_z = 1
else:
size_z = size_a
z = rbigint([NULLDIGIT] * size_z, 1, size_z)
i = 0
while i < size_z:
if i < size_a:
diga = a.digit(i) ^ maska
else:
diga = maska
if i == 0:
digb = b ^ maskb
else:
digb = maskb
if op == '&':
z.setdigit(i, diga & digb)
elif op == '|':
z.setdigit(i, diga | digb)
elif op == '^':
z.setdigit(i, diga ^ digb)
i += 1
z._normalize()
if negz == 0:
return z
return z.invert()
_int_bitwise._annspecialcase_ = "specialize:arg(1)"
ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1))
LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1)))
def _AsLongLong(v):
"""
Get a r_longlong integer from a bigint object.
Raises OverflowError if overflow occurs.
"""
x = _AsULonglong_ignore_sign(v)
# grr grr grr
if x >= ULONGLONG_BOUND:
if x == ULONGLONG_BOUND and v.sign < 0:
x = LONGLONG_MIN
else:
raise OverflowError
else:
x = r_longlong(x)
if v.sign < 0:
x = -x
return x
def _AsULonglong_ignore_sign(v):
x = r_ulonglong(0)
i = v.numdigits() - 1
while i >= 0:
prev = x
x = (x << SHIFT) + r_ulonglong(v.widedigit(i))
if (x >> SHIFT) != prev:
raise OverflowError(
"long int too large to convert to unsigned long long int")
i -= 1
return x
def make_unsigned_mask_conversion(T):
def _As_unsigned_mask(v):
x = T(0)
i = v.numdigits() - 1
while i >= 0:
x = (x << SHIFT) + T(v.digit(i))
i -= 1
if v.sign < 0:
x = -x
return x
return _As_unsigned_mask
_AsULonglong_mask = make_unsigned_mask_conversion(r_ulonglong)
_AsUInt_mask = make_unsigned_mask_conversion(r_uint)
def _hash(v):
# This is designed so that Python ints and longs with the
# same value hash to the same value, otherwise comparisons
# of mapping keys will turn out weird. Moreover, purely
# to please decimal.py, we return a hash that satisfies
# hash(x) == hash(x % ULONG_MAX). In particular, this
# implies that hash(x) == hash(x % (2**64-1)).
i = v.numdigits() - 1
sign = v.sign
x = r_uint(0)
LONG_BIT_SHIFT = LONG_BIT - SHIFT
while i >= 0:
# Force a native long #-bits (32 or 64) circular shift
x = (x << SHIFT) | (x >> LONG_BIT_SHIFT)
x += v.udigit(i)
# If the addition above overflowed we compensate by
# incrementing. This preserves the value modulo
# ULONG_MAX.
if x < v.udigit(i):
x += 1
i -= 1
res = intmask(intmask(x) * sign)
return res
#_________________________________________________________________
# a few internal helpers
def digits_max_for_base(base):
dec_per_digit = 1
while base ** dec_per_digit < MASK:
dec_per_digit += 1
dec_per_digit -= 1
return base ** dec_per_digit
BASE_MAX = [0, 0] + [digits_max_for_base(_base) for _base in range(2, 37)]
DEC_MAX = digits_max_for_base(10)
assert DEC_MAX == BASE_MAX[10]
def _decimalstr_to_bigint(s):
# a string that has been already parsed to be decimal and valid,
# is turned into a bigint
p = 0
lim = len(s)
sign = False
if s[p] == '-':
sign = True
p += 1
elif s[p] == '+':
p += 1
a = rbigint()
tens = 1
dig = 0
ord0 = ord('0')
while p < lim:
dig = dig * 10 + ord(s[p]) - ord0
p += 1
tens *= 10
if tens == DEC_MAX or p == lim:
a = _muladd1(a, tens, dig)
tens = 1
dig = 0
if sign and a.sign == 1:
a.sign = -1
return a
def parse_digit_string(parser):
# helper for fromstr
a = rbigint()
base = parser.base
digitmax = BASE_MAX[base]
tens, dig = 1, 0
while True:
digit = parser.next_digit()
if tens == digitmax or digit < 0:
a = _muladd1(a, tens, dig)
if digit < 0:
break
dig = digit
tens = base
else:
dig = dig * base + digit
tens *= base
a.sign *= parser.sign
return a
|
import tvm
import tvm._ffi
import numpy as np
from functools import reduce
from tvm.tensor_graph.core.utils import to_int, to_tuple, flatten_tir_graph, op_feature
def make_tir_graph(fwd_graph, loss=None, optimizer=None, inference=True, need_output=True, need_grad=True):
if inference:
finputs, foutputs, fweights = fwd_graph()
inputs = [x.tvm_tensor for x in finputs]
weights = [x.tvm_tensor for x in fweights]
outputs = [x.tvm_tensor for x in foutputs]
labels = []
loss = None
gradients = []
lr = None
updates = []
tir_graph = tvm.tg.make_tir_graph_inference(inputs, outputs, weights)
else:
assert loss is not None and optimizer is not None
bwd_graph = fwd_graph.make_backward(loss, optimizer)
inputs = [x.tvm_tensor for x in bwd_graph.inputs]
weights = [x.tvm_tensor for x in bwd_graph.weights]
outputs = [x.tvm_tensor for x in bwd_graph.outputs] if need_output else []
labels = [x.tvm_tensor for x in bwd_graph.labels]
loss = bwd_graph.loss.tvm_tensor
gradients = [x.tvm_tensor for x in bwd_graph.gradients] if need_grad else []
lr = optimizer.lr_tensor
updates = [x.tvm_tensor for x in bwd_graph.updates]
tir_graph = tvm.tg.make_tir_graph_training(inputs, labels, outputs, weights, loss, gradients, lr, updates)
return tir_graph
@tvm._ffi.register_func("tg.graph.partition_policy")
def partition_policy(graph, pre, post, number):
pre_stat = graph.operation_stat_dict[pre]
post_stat = graph.operation_stat_dict[post]
# root op must be separated
if pre_stat.must_compute_root:
return True
if pre_stat.num_consumers > 1:
# do not fuse multi-output
return True
# if pre_stat.injective:
# return False
# if number > 10:
# return True
if pre_stat.reductive and post_stat.reductive:
# do not fuse reductive nodes
return True
if pre_stat.injective and post_stat.injective:
return False
if pre_stat.injective and post_stat.reductive:
return False
if pre_stat.reductive and post_stat.injective:
return True
# if pre_stat.injective and post_stat.injective:
# return ((not pre_stat.merge_backward) and post_stat.merge_backward)
# if pre_stat.injective and post_stat.reductive:
# return not pre_stat.merge_backward
# if pre_stat.reductive and post_stat.injective:
# return post_stat.merge_backward
return True
def set_partition_policy(policy):
tvm._ffi.register_func("tg.graph.partition_policy", policy, True)
"""
Below are deprecated Python implementations
They'll be removed in the future
"""
def is_injective(op):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
return is_compute and (not has_reduce)
def is_reductive(op):
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
return has_reduce
def remain_shape(op):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
if not is_compute:
return False
ret = True
output_shape = to_tuple(op.output(0).shape)
for t in op.input_tensors:
if to_tuple(t.shape) != output_shape:
ret = False
break
return ret
def able_inline(op, down_graph):
is_compute = isinstance(op, tvm.te.tensor.ComputeOp)
has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis
is_output = False
for i in range(op.num_outputs):
if op.output(i) not in down_graph:
is_output = True
break
return is_compute and (not has_reduce) and (not is_output)
class PyOpState(object):
def __init__(self):
self.injective = False
self.elementwise = False
self.reductive = False
self.num_inputs = 0
self.num_consumers = 0
self.head = True
# self.tail = False
self.reductions = []
self.output_shape = []
self.num_add = 0
self.num_mul = 0
self.num_div = 0
self.num_branch = 0
self.num_logic = 0
self.num_special = 0
self.gflop = 0
self.input_occur_count = []
# is output
self.must_root = False
def set_states(self, op, down_graph, root_ops):
assert isinstance(op, tvm.te.tensor.ComputeOp)
self.injective = is_injective(op)
# the output shapes of multi-output op are the same
self.output_shape = list(to_tuple(op.output(0).shape))
self.reductive = is_reductive(op)
self.elementwise = self.injective and remain_shape(op)
self.num_inputs = len(op.input_tensors)
for i in range(op.num_outputs):
if op.output(i) in down_graph:
self.num_consumers += len(down_graph[op.output(i)])
if self.reductive:
for iv in op.reduce_axis:
self.reductions.append(to_int(iv.dom.extent))
operation_count = tvm.tg.count_operation(op)
for (k, v) in operation_count.items():
setattr(self, k.value, v.value)
input_occur = tvm.tg.count_input_occur(op.input_tensors, op)
self.input_occur_count = [x.value for x in input_occur]
if op in root_ops:
self.must_root = True
self.gflop = reduce(lambda x, y: x * y, self.reductions, 1) * \
reduce(lambda x, y: x * y, self.output_shape, 1) * \
(self.num_add + self.num_mul + self.num_div) / 1e9
class PyTIRSubGraph(object):
def __init__(self):
self.inputs = {}
self.outputs = {}
self.labels = {}
self.weights = {}
self.loss = {}
self.gradients = {}
self.lr = {}
self.updates = {}
self.index = {}
self.connected_sets = {}
self.op_stat_dict = {}
self.op_list = []
self.ops = []
self.tensors = []
self.down_graph = {}
self.c_list = []
def __repr__(self):
ret = "PyTIRSubGraph\n"
ret += "inputs=" + str(self.inputs) + "\n"
ret += "outputs=" + str(self.outputs) + "\n"
ret += "labels=" + str(self.labels) + "\n"
ret += "weights=" + str(self.weights) + "\n"
ret += "loss=" + str(self.loss) + "\n"
ret += "gradients=" + str(self.gradients) + "\n"
ret += "lr=" + str(self.lr) + "\n"
ret += "updates=" + str(self.updates) + "\n"
return ret
def __str__(self):
return self.__repr__()
class PyTIRGraph(object):
"""PyTIRGraph
inputs : (list of) tvm Tensor
graph inputs
outputs : (list of) tvm Tensor
graph outputs
wire :
"""
def __init__(self, inputs, labels, outputs, weights, loss, gradients, lr, updates, wire=None):
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not isinstance(labels, (list, tuple)):
labels = [labels]
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
if not isinstance(weights, (list, tuple)):
weights = [weights]
if not isinstance(gradients, (list, tuple)):
gradients = [gradients]
if not isinstance(updates, (list, tuple)):
updates = [updates]
self.inputs = inputs
self.labels = labels
self.outputs = outputs
self.weights = weights
self.loss = loss
self.gradients = gradients
self.lr = lr
self.updates = updates
if self.loss is None:
self.root_ops = [x.op for x in outputs + gradients + updates]
else:
self.root_ops = [x.op for x in outputs + [loss] + gradients + updates]
if len(updates) > 0:
assert len(weights) == len(updates)
op_list, down_graph = flatten_tir_graph(self.root_ops)
# a list of compute op after topological sorting
self.op_list = op_list
self.num_ops = len(op_list)
self.op_feature_dict = {}
# this graph is tensor to op list
self.down_graph = down_graph
# these are runtime properties
self.ctx = None
self.tvm_array_dict = {}
# these are properties that can be modified by user
self.np_array_dict = {}
# these are properties that can be modified by scheduler
self.op_stat_dict = {}
self.subgraphs = {}
self.subgraph_features = {}
self.op_map = {}
self.call_order = []
self.schedules = {}
self.scheduled_subgraphs = set()
self.bufs = {}
self.functions = {}
self.shared_functions = {}
# initialize some of them
for op in op_list:
self.op_stat_dict[op] = PyOpState()
# get the states of each op
self._analyze()
def _analyze(self):
look_up = set(self.root_ops)
def func(op):
self.op_stat_dict[op].set_states(op, self.down_graph, look_up)
feature = op_feature(op)
self.op_feature_dict[op] = feature
return None
_ = list(map(func, self.op_list))
def partition_graph(self):
partition = PyTIRSubGraphPartition()
(subgraphs, op_map), order = partition.partion_graph(self)
self.subgraphs = subgraphs
self.op_map = op_map
self.call_order = order
def func(kv):
mark, subgraph = kv
tensors = list(set(list(subgraph.outputs.keys()) + list(subgraph.loss.keys())
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())))
subgraph.tensors = tensors
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = self.op_map[op]
if v in self.op_stat_dict:
op_stat_dict[op] = self.op_stat_dict[v]
subgraph.op_stat_dict = op_stat_dict
subgraph.ops = ops
subgraph.op_list = op_list
subgraph.down_graph = down_graph
self.subgraph_features[mark] = ";".join(map(lambda x: self.op_feature_dict[self.op_map[x]], op_list))
return None
_ = list(map(func, subgraphs.items()))
def set_inputs(self, inputs):
for tvm_tensor, np_array in inputs.items():
self.np_array_dict[tvm_tensor] = np_array
def set_lr(self, lr):
if self.lr is None:
raise RuntimeError("TIR Graph has no learning rate.")
self.np_array_dict[self.lr] = lr
def set_labels(self, labels):
for tvm_tensor, np_array in labels.items():
self.np_array_dict[tvm_tensor] = np_array
def set_weights(self, weights):
for tvm_tensor, np_array in weights.items():
self.np_array_dict[tvm_tensor] = np_array
def get_tvm_array(self, tvm_tensor):
return self.tvm_array_dict[tvm_tensor]
def get_outputs(self):
return [self.tvm_array_dict[x] for x in self.outputs]
def get_loss(self, tvm_tensor):
assert self.loss is not None
return self.tvm_array_dict[self.loss]
def get_gradients(self):
return [self.tvm_array_dict[x] for x in self.gradients]
def get_updates(self):
return [self.tvm_array_dict[x] for x in self.updates]
def clear_schedule(self):
self.op_stat_dict = {}
self.subgraphs = {}
self.subgraph_features = {}
self.op_map = {}
self.call_order = []
self.schedules = {}
self.scheduled_subgraphs = set()
self.bufs = {}
self.functions = {}
self.shared_functions = {}
# initialize some of them
for op in self.op_list:
self.op_stat_dict[op] = PyOpState()
# get the states of each op
self._analyze()
def clear_runtime(self):
self.ctx = None
self.tvm_array_dict = {}
def create_schedule_for(self, mark=0, force=False):
subgraphs = self.subgraphs
feature = self.subgraph_features[mark]
if force:
self.scheduled_subgraphs.remove(feature)
elif feature in self.scheduled_subgraphs:
return False
subgraph = subgraphs[mark]
inputs = list(subgraph.inputs.keys())
outputs = list(subgraph.outputs.keys())
weights = list(subgraph.weights.keys())
labels = list(subgraph.labels.keys())
loss = list(subgraph.loss.keys())
gradients = list(subgraph.gradients.keys())
lr = list(subgraph.lr.keys())
updates = list(subgraph.updates.keys())
sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates))
self.bufs[mark] = sub_bufs
ops = [x.op for x in outputs + loss + gradients + updates]
s = tvm.te.create_schedule(ops)
self.schedules[mark] = s
self.scheduled_subgraphs.add(feature)
return True
def create_schedule(self, force=False):
subgraphs = self.subgraphs
if force:
self.scheduled_subgraphs = set()
for mark, subgraph in subgraphs.items():
feature = self.subgraph_features[mark]
if feature in self.scheduled_subgraphs:
continue
inputs = list(subgraph.inputs.keys())
outputs = list(subgraph.outputs.keys())
weights = list(subgraph.weights.keys())
labels = list(subgraph.labels.keys())
loss = list(subgraph.loss.keys())
gradients = list(subgraph.gradients.keys())
lr = list(subgraph.lr.keys())
updates = list(subgraph.updates.keys())
sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates))
self.bufs[mark] = sub_bufs
ops = [x.op for x in outputs + loss + gradients + updates]
s = tvm.te.create_schedule(ops)
self.schedules[mark] = s
self.scheduled_subgraphs.add(feature)
def build_for(self, target, mark=0, force=False):
feature = self.subgraph_features[mark]
if force:
self.shared_functions.pop(feature)
elif feature in self.shared_functions:
self.functions[mark] = self.shared_functions[feature]
return True
bufs = self.bufs[mark]
sch = self.schedules[mark]
try:
func = tvm.build(sch, bufs, target=target)
self.functions[mark] = func
self.shared_functions[feature] = func
# print("build success for subgraph", mark)
return True
except Exception as e:
print("build error in subgraph", mark)
print(e)
# print(bufs)
# print(tvm.lower(sch, bufs, simple_mode=True))
return False
def build(self, target, force=False):
fail = 0
if force:
self.shared_functions = {}
for mark, sch in self.schedules.items():
feature = self.subgraph_features[mark]
if feature in self.shared_functions:
self.functions[mark] = self.shared_functions[feature]
continue
bufs = self.bufs[mark]
try:
func = tvm.build(sch, bufs, target=target)
self.functions[mark] = func
self.shared_functions[feature] = func
# print("build success for subgraph", mark)
except Exception as e:
fail += 1
print("build error in subgraph", mark)
print(e)
print(bufs)
print(tvm.lower(sch, bufs, simple_mode=True))
return fail == 0
def allocate_buffer(self, target, dev, force=False):
if not force and self.ctx is not None:
return
self.ctx = tvm.context(target, dev)
# inputs
for inp in self.inputs:
if inp in self.np_array_dict:
np_array = self.np_array_dict[inp].astype(inp.dtype)
else:
raise RuntimeError("Should provide input tensor for %s" % (str(inp)))
self.tvm_array_dict[inp] = tvm.nd.array(np_array, self.ctx)
# outputs
for out in self.outputs:
self.tvm_array_dict[out] = tvm.nd.empty(to_tuple(out.shape), out.dtype, ctx=self.ctx)
# labels
for label in self.labels:
if label in self.np_array_dict:
np_array = self.np_array_dict[label].astype(label.dtype)
else:
raise RuntimeError("Should provide input tensor for %s" % (str(label)))
self.tvm_array_dict[label] = tvm.nd.array(np_array, self.ctx)
# loss
if self.loss is not None:
self.tvm_array_dict[self.loss] = tvm.nd.empty(to_tuple(self.loss.shape), self.loss.dtype, ctx=self.ctx)
# weights
for weight in self.weights:
if weight in self.np_array_dict:
np_array = self.np_array_dict[weight].astype(weight.dtype)
else:
# TODO: add initializer
np_array = np.random.uniform(-1, 1, to_tuple(weight.shape)).astype(weight.dtype)
self.tvm_array_dict[weight] = tvm.nd.array(np_array, self.ctx)
# gradients
for grad in self.gradients:
self.tvm_array_dict[grad] = tvm.nd.empty(to_tuple(grad.shape), grad.dtype, ctx=self.ctx)
# lr
if self.lr is not None:
if self.lr in self.np_array_dict:
np_array = self.np_array_dict[self.lr].astype(self.lr.dtype)
else:
raise RuntimeError("Should provide learning rate.")
self.tvm_array_dict[self.lr] = tvm.nd.array(np_array, self.ctx)
# updates
for i, update in enumerate(self.updates):
self.tvm_array_dict[update] = self.tvm_array_dict[self.weights[i]]
# intermediate buffer
for subgraph in self.subgraphs.values():
for out, old_tensor in subgraph.outputs.items():
if old_tensor not in self.outputs:
# it's new output
self.tvm_array_dict[old_tensor] = tvm.nd.empty(to_tuple(old_tensor.shape), old_tensor.dtype, ctx=self.ctx)
def run(self, scheduler, target, dev):
"""
This is not enabled
"""
raise NotImplementedError()
# generate specific space
# scheduler has a cache, so multiple calls has the same effect
scheduler.add_task(self, target)
config = scheduler.propose(self, target)
scheduler.apply_config(self, target, config)
# apply config
# 1. modify op stat list -> head, tail
# 2. make subgraphs
# 3. create schedule
# 4. modify schedule
self.build(target)
# allocate buffer
# only the first call has effect
self.allocate_buffer(target, dev)
for mark in self.call_order:
func = self.functions[mark]
bufs = self.bufs[mark]
real_bufs = [self.tvm_array_dict[self.subgraphs[mark].index[x]] for x in bufs]
func(*real_bufs)
class PyTIRSubGraphPartition(object):
def __init__(self):
pass
def __call__(self, graph):
"""
graph: PyTIRGraph
"""
pass
def is_boundary(self, pre, post, graph):
pre_stat = graph.op_stat_dict[pre]
post_stat = graph.op_stat_dict[post]
# root op must be separated
if pre_stat.must_root:
return True
if pre_stat.num_consumers > 1:
# do not fuse multi-output
return True
if pre_stat.reductive and post_stat.reductive:
# do not fuse reductive nodes
return True
if pre_stat.injective and post_stat.injective:
return ((not pre_stat.head) and post_stat.head)
if pre_stat.injective and post_stat.reductive:
return not pre_stat.head
if pre_stat.reductive and post_stat.injective:
return post_stat.head
return True
def partion_graph(self, graph):
"""
graph: PyTIRGraph
returns:
list of list of tvm ComputeOp
dict from tvm ComputeOp to list of DataPort
"""
# -1 for not visited
graph_mark = {x: -1 for x in graph.op_list}
# setup initial nodes, all compute ops are included
# this guarantees no node is left
visit_stack = list(reversed(graph.op_list))
visited = set()
global_mark = -1
while len(visit_stack) > 0:
cur = visit_stack.pop()
if cur in visited:
continue
if graph_mark[cur] < 0:
# not marked
# new subgraph
global_mark += 1
graph_mark[cur] = global_mark
graph_mark[cur] = global_mark
# all the outputs
for i in range(cur.num_outputs):
t = cur.output(i)
if t in graph.down_graph:
for op in graph.down_graph[t]:
if not self.is_boundary(cur, op, graph):
if graph_mark[op] < 0:
# mark it as the same subgraph
graph_mark[op] = global_mark
# only add node within the same subgraph
visit_stack.append(op)
# all the inputs
for t in cur.input_tensors:
if isinstance(t.op, tvm.te.tensor.ComputeOp):
if not self.is_boundary(t.op, cur, graph):
if graph_mark[t.op] < 0:
# mark it as the same subgraph
graph_mark[t.op] = global_mark
# only add node within the same subgraph
visit_stack.append(t.op)
# add visit
visited.add(cur)
order = self.validate_partition(graph_mark)
return self.subgraph_rewrite(graph_mark, graph), order
def subgraph_rewrite(self, graph_mark, tgraph):
ret = tvm.tg.subgraph_partition(graph_mark, tgraph.root_ops)
op_map = {}
inputs_set = set(tgraph.inputs)
outputs_set = set(tgraph.outputs)
labels_set = set(tgraph.labels)
weights_set = set(tgraph.weights)
gradients_set = set(tgraph.gradients)
updates_set = set(tgraph.updates)
subgraphs = {}
for (old_op, mark) in graph_mark.items():
new_op = ret[old_op]
op_map[new_op] = old_op
if mark not in subgraphs:
subgraphs[mark] = PyTIRSubGraph()
for i, t in enumerate(old_op.input_tensors):
if t in inputs_set:
# new -> old
subgraphs[mark].inputs[new_op.input_tensors[i]] = t
if t in labels_set:
subgraphs[mark].labels[new_op.input_tensors[i]] = t
if t == tgraph.lr:
subgraphs[mark].lr[new_op.input_tensors[i]] = t
if t in weights_set:
subgraphs[mark].weights[new_op.input_tensors[i]] = t
# this is special
# ret contains the new placeholder op because
# this indicates an intermediate input
if new_op.input_tensors[i].op in ret:
subgraphs[mark].inputs[new_op.input_tensors[i]] = \
ret[new_op.input_tensors[i].op].output(t.value_index)
another_mark = graph_mark[ret[new_op.input_tensors[i].op]]
if another_mark not in subgraphs:
subgraphs[another_mark] = PyTIRSubGraph()
subgraphs[another_mark].outputs[ret[ret[new_op.input_tensors[i].op]].output(t.value_index)] = \
ret[new_op.input_tensors[i].op].output(t.value_index)
for i in range(old_op.num_outputs):
t = old_op.output(i)
if t in outputs_set:
subgraphs[mark].outputs[new_op.output(i)] = t
if t in gradients_set:
subgraphs[mark].gradients[new_op.output(i)] = t
if t in updates_set:
subgraphs[mark].updates[new_op.output(i)] = t
if t == tgraph.loss:
subgraphs[mark].loss[new_op.output(i)] = t
for mark, subgraph in subgraphs.items():
subgraph.index = {
**subgraph.inputs, **subgraph.outputs, **subgraph.labels, **subgraph.loss, \
**subgraph.weights, **subgraph.gradients, **subgraph.lr, **subgraph.updates}
return subgraphs, op_map
def validate_partition(self, graph_mark):
# dst -> src
order = []
ref = {}
max_mark = 0
for (op, mark) in graph_mark.items():
max_mark = max(mark, max_mark)
for inp in op.input_tensors:
if inp.op in graph_mark:
src_mark = graph_mark[inp.op]
if src_mark != mark:
if mark not in ref:
ref[mark] = set()
ref[mark].add(src_mark)
visited = set()
visiting = set()
def func(val):
if val in visited:
return
if val in visiting:
raise RuntimeError(
"The subgraph relation has a circular reference.")
visiting.add(val)
if val not in ref:
order.append(val)
visiting.remove(val)
visited.add(val)
return
for inp in ref[val]:
func(inp)
order.append(val)
visiting.remove(val)
visited.add(val)
return
for mark in range(max_mark+1):
func(mark)
return order
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import math
import operator
import re
import time
import uuid
from Crypto.Random import random
import eventlet
from eventlet import tpool
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=str(backup.created_at),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': str(snapshot.created_at),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default."),
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s)."),
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning(_LW("Error encountered translating config_string: "
"%(config_string)s to dict"),
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
return encryption_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Persistent identifier fetchers.
A proper fetcher is defined as a function that return a
:data:`invenio_pidstore.fetchers.FetchedPID` instance.
E.g.
.. code-block:: python
def my_fetcher(record_uuid, data):
return FetchedPID(
provider=MyRecordIdProvider,
pid_type=MyRecordIdProvider.pid_type,
pid_value=extract_pid_value(data),
)
To see more about providers see :mod:`invenio_pidstore.providers`.
"""
from __future__ import absolute_import, print_function
from invenio_pidstore.fetchers import FetchedPID
from oarepo_communities.converters import CommunityPIDValue
from oarepo_communities.proxies import current_oarepo_communities
from .providers import NRNresultsIdProvider
def nr_nresults_id_fetcher(record_uuid, data):
"""Fetch a record's identifiers.
:param record_uuid: The record UUID.
:param data: The record metadata.
:returns: A :data:`invenio_pidstore.fetchers.FetchedPID` instance.
"""
id_field = "control_number"
return FetchedPID( # FetchedPID je obyčejný namedtuple
provider=NRNresultsIdProvider,
pid_type=NRNresultsIdProvider.pid_type,
pid_value=CommunityPIDValue(
str(data[id_field]),
current_oarepo_communities.get_primary_community_field(data))
)
|
import logging,os
from rest import Restclient
LOCAL_DATA_FOLDER = '/DATA'
GENOTYPE_FOLDER = '/GENOTYPE'
REST_HOST = os.environ['REST_HOST']
REST_USERNAME = os.environ['REST_USERNAME']
REST_PASSWORD = os.environ['REST_PASSWORD']
restclient = Restclient(REST_HOST,REST_USERNAME,REST_PASSWORD)
class CeleryProgressLogHandler(logging.StreamHandler):
def __init__(self,task):
logging.StreamHandler.__init__(self)
self.task = task
def emit(self,record):
if 'progress' in record.__dict__:
progress = record.__dict__['progress']
msg = self.format(record)
if 'task' in record.__dict__:
msg = record.__dict__['task']
body = {'progress':progress,'task':msg}
self.task.update_state(state='PROGRESS',meta=body)
|
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas
_ramachandran_densities = pandas.read_csv(
'data/rama500-general.data',
skiprows=6,
delimiter=' ',
names=['phi', 'psi', 'value']
)
"""
DSSP output:
H = α-helix
B = residue in isolated β-bridge
E = extended strand, participates in β ladder
G = 3-helix (310 helix)
I = 5 helix (π-helix)
T = hydrogen bonded turn
S = bend
Colors extracted from rcsb.org.
"""
DSSP_to_color = {
'H': '#ED6161',
'B': '#CCA200',
'E': '#FFFB00',
'G': '#FFC2C2',
'I': '#900000',
'T': '#990099',
'S': '#0000FF',
'-': 'black',
}
def ramachandran_surface():
"""
Plot density surface for generic ramachandran
"""
fontsize = 18
ticks = [-180, -90, 0, 90, 180]
plt.contourf(
list(OrderedDict.fromkeys(_ramachandran_densities['phi'])),
list(OrderedDict.fromkeys(_ramachandran_densities['psi'])),
_ramachandran_densities['value'].values.reshape(180, 180).T,
levels=[0, 0.0005, 0.02, 1],
colors=['#FFFFFF', '#B3E8FF', '#7FD9FF']
)
plt.xlabel('$\phi$', fontsize=fontsize)
plt.ylabel('$\psi$', fontsize=fontsize)
plt.xticks(ticks)
plt.yticks(ticks)
plt.tick_params(direction="out")
plt.margins(0.05)
ax = plt.axes()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def ramachandran(torsion_angles, fragment, target_pdb=None,
output_writer=None, output_dir=None):
"""
Plot ramachandran of a set of torsion angles for a given fragment
:param torsion_angles: Dictionary with torsion angles phi and psi
:param fragment: Fragment identifier, used for displaying purposes
"""
target_pdb = None
plt.figure()
ramachandran_surface()
plt.title('Ramachandran plot for ' + fragment)
plt.scatter(
x=torsion_angles['phi'],
y=torsion_angles['psi'],
s=[1.05 ** x for x in torsion_angles['identity']],
c=[DSSP_to_color[ss] for ss in torsion_angles['central_ss']],
marker='o',
alpha=0.5,
)
if target_pdb and (target_pdb in list(torsion_angles['pdb'])):
i = list(torsion_angles['pdb']).index(target_pdb)
plt.scatter(
x=torsion_angles['phi'][i],
y=torsion_angles['psi'][i],
marker='D',
c='red',
s=50
)
if output_writer:
output_writer.savefig(dpi=150)
if output_dir:
plt.savefig(
os.path.join(output_dir, 'ramachandran', fragment + '.svg'),
format='svg', dpi=300
)
plt.close()
|
""" This is a dummy file used only to avoid errors in ReadTheDocs. The real BF.py is created during the setup once swig is run. """
def CP():
pass
def LeP():
pass
def LaP():
pass
def HoPpro():
pass
def HoPphy():
pass
def FS():
pass
def ELMReLU():
pass
def ELMSigmoid():
pass
def ELMTanh():
pass
def ELMSin():
pass
def ELMSwish():
pass
def nCP():
pass
def nLeP():
pass
def nFS():
pass
def nELMReLU():
pass
def nELMSigmoid():
pass
def nELMTanh():
pass
def nELMSin():
pass
def nELMSwish():
pass
|
seq = 'CTTCTCACGTACAACAAAATC'
symbol2number = {"A":0,"C":1,"G":2,"T":3}
def PatternToNumber(Pattern):
if not Pattern:
return 0
symbol = Pattern[-1]
prefix = Pattern[:-1]
return ((4*PatternToNumber(prefix))+symbol2number[symbol])
def NumberToPattern(index, k):
bases = ['A', 'C', 'G', 'T']
pattern = ''
for i in range(k):
pattern += bases[index % 4]
index = index // 4
return pattern[::-1]
def ComputingFrequencies(text,k):
FrequencyArray =[]
for i in range(0,((4**k))):
FrequencyArray.append(0)
for i in range(0,(len(text)-1)):
pattern = text[i:(i+k)]
j = PatternToNumber(pattern)
FrequencyArray[j] = FrequencyArray[j]+1
return FrequencyArray
def FasterFrequentWords(text,k):
FrequentPatterns = []
FrequencyArray = ComputingFrequencies(text,k)
maxCount = max(FrequencyArray)
for i in range(0,(4**k)):
if FrequencyArray[i] == maxCount:
pattern = NumberToPattern(i,k)
FrequentPatterns.append(pattern)
return FrequentPatterns
print(FasterFrequentWords("ACGCGGCTCTGAAA",2))
|
import os
from dotenv import load_dotenv, find_dotenv
#this will load all the envars from a .env file located in the project root (api)
load_dotenv(find_dotenv())
CONFIGURATION = {
"development": "config.DevConfig",
"testing": "config.TestConfig",
"production": "config.Config",
"default": "config.Config"
}
class Config(object):
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = 'a secret'
SQLALCHEMY_TRACK_MODIFICATIONS = False
NRO_SERVICE_ACCOUNT = os.getenv('NRO_SERVICE_ACCOUNT', 'nro_service_account')
SOLR_BASE_URL = os.getenv('SOLR_BASE_URL', None)
SOLR_SYNONYMS_API_URL = os.getenv('SOLR_SYNONYMS_API_URL', None)
NRO_EXTRACTOR_URI = os.getenv('NRO_EXTRACTOR_URI', None)
ALEMBIC_INI='migrations/alembic.ini'
# POSTGRESQL
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD','')
DB_NAME = os.getenv('DATABASE_NAME','')
DB_HOST = os.getenv('DATABASE_HOST','')
DB_PORT = os.getenv('DATABASE_PORT','5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
## ORACLE - LEGACY NRO NAMESDB
NRO_USER = os.getenv('NRO_USER', '')
NRO_SCHEMA = os.getenv('NRO_SCHEMA', None)
NRO_PASSWORD = os.getenv('NRO_PASSWORD', '')
NRO_DB_NAME = os.getenv('NRO_DB_NAME', '')
NRO_HOST = os.getenv('NRO_HOST', '')
NRO_PORT = int(os.getenv('NRO_PORT', '1521'))
# JWT_OIDC Settings
JWT_OIDC_WELL_KNOWN_CONFIG = os.getenv('JWT_OIDC_WELL_KNOWN_CONFIG')
JWT_OIDC_ALGORITHMS = os.getenv('JWT_OIDC_ALGORITHMS')
JWT_OIDC_JWKS_URI = os.getenv('JWT_OIDC_JWKS_URI')
JWT_OIDC_ISSUER = os.getenv('JWT_OIDC_ISSUER')
JWT_OIDC_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_CACHING_ENABLED = os.getenv('JWT_OIDC_CACHING_ENABLED')
try:
JWT_OIDC_JWKS_CACHE_TIMEOUT = int(os.getenv('JWT_OIDC_JWKS_CACHE_TIMEOUT'))
except:
JWT_OIDC_JWKS_CACHE_TIMEOUT = 300
TESTING = False,
DEBUG = False
class DevConfig(Config):
TESTING = False,
DEBUG = True
class TestConfig(Config):
DEBUG = True
TESTING = True
# POSTGRESQL
DB_USER = os.getenv('DATABASE_TEST_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_TEST_PASSWORD','')
DB_NAME = os.getenv('DATABASE_TEST_NAME','')
DB_HOST = os.getenv('DATABASE_TEST_HOST','')
DB_PORT = os.getenv('DATABASE_TEST_PORT','5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=int(DB_PORT),
name=DB_NAME,
)
# JWT OIDC settings
## JWT_OIDC_TEST_MODE will set jwt_manager to use
JWT_OIDC_TEST_MODE = True
JWT_OIDC_TEST_AUDIENCE = os.getenv('JWT_OIDC_AUDIENCE')
JWT_OIDC_TEST_CLIENT_SECRET = os.getenv('JWT_OIDC_CLIENT_SECRET')
JWT_OIDC_TEST_ISSUER = 'https://sso-dev.pathfinder.gov.bc.ca/auth/realms/sbc'
JWT_OIDC_TEST_KEYS = {
"keys": [
{
"kid": "flask-jwt-oidc-test-client",
"kty": "RSA",
"alg": "RS256",
"use": "sig",
"n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e": "AQAB"
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_JWKS = {
"keys": [
{
"kid": "flask-jwt-oidc-test-client",
"kty": "RSA",
"alg": "RS256",
"use": "sig",
"kty": "RSA",
"n": "AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e": "AQAB",
"d": "C0G3QGI6OQ6tvbCNYGCqq043YI_8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhskURaDwk4-8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh_xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0",
"p": "APXcusFMQNHjh6KVD_hOUIw87lvK13WkDEeeuqAydai9Ig9JKEAAfV94W6Aftka7tGgE7ulg1vo3eJoLWJ1zvKM",
"q": "AOjX3OnPJnk0ZFUQBwhduCweRi37I6DAdLTnhDvcPTrrNWuKPg9uGwHjzFCJgKd8KBaDQ0X1rZTZLTqi3peT43s",
"dp": "AN9kBoA5o6_Rl9zeqdsIdWFmv4DB5lEqlEnC7HlAP-3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhc",
"dq": "ANtbSY6njfpPploQsF9sU26U0s7MsuLljM1E8uml8bVJE1mNsiu9MgpUvg39jEu9BtM2tDD7Y51AAIEmIQex1nM",
"qi": "XLE5O360x-MhsdFXx8Vwz4304-MJg-oGSJXCK_ZWYOB_FGXFRTfebxCsSYi0YwJo-oNu96bvZCuMplzRI1liZw"
}
]
}
JWT_OIDC_TEST_PRIVATE_KEY_PEM = """
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDfn1nKQshOSj8xw44oC2klFWSNLmK3BnHONCJ1bZfq0EQ5gIfg
tlvB+Px8Ya+VS3OnK7Cdi4iU1fxO9ktN6c6TjmmmFevk8wIwqLthmCSF3r+3+h4e
ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB
AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs
kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/
xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei
lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia
C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b
AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB
5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb
W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT
NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg
4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn
-----END RSA PRIVATE KEY-----"""
|
# Execution time : 0.003847 seconds
# Solution Explanation
# A simple brute-froce approach is enough
import time
width = 40
from functools import reduce
def solution():
v = list()
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
for line in open('input_p011.in','r'):
v.append(list(map(int,line.split())))
v[-1].extend([0,0,0])
v.append([0]*23)
v.append([0]*23)
v.append([0]*23)
ans = 0
for it1 in range(3,23):
for it2 in range(20):
ans = max(ans,reduce(lambda a,b: a*b,[v[it1][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1-k][it2+k] for k in range(4)]))
ans = max(ans,reduce(lambda a,b: a*b,[v[it1+k][it2+k] for k in range(4)]))
return ans
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
|
import sys
config = {
"Database": {
"Address": "localhost",
"Username": "root",
"Password": "",
"Name": "Houdini",
"Driver": "PyMySQL" if sys.platform == "win32" else "MySQLdb"
},
"Redis": {
"Address": "127.0.0.1",
"Port": 6379
},
"Servers": {
"Login": {
"Address": "127.0.0.1",
"Port": 6112,
"World": False,
"Plugins": [
"Example"
],
"Logging": {
"General": "logs/login.log",
"Errors": "logs/login-errors.log",
"Level": "INFO"
},
"LoginFailureLimit": 5,
"LoginFailureTimer": 3600
},
"Wind": {
"Id": "100",
"Address": "127.0.0.1",
"Port": 9875,
"World": True,
"Capacity": 200,
"CacheExpiry": 3600,
"Plugins": [
"Commands",
"Bot",
"Rank"
],
"Logging": {
"General": "logs/wind.log",
"Errors": "logs/wind-errors.log",
"Level": "DEBUG"
}
}
},
"Tables": {
"Four": [
{ "RoomId": 220, "Tables": [205, 206, 207] },
{ "RoomId": 221, "Tables": [200, 201, 202, 203, 204] }
],
"Mancala": [
{ "RoomId": 111, "Tables": [100, 101, 102, 103, 104] }
],
"Treasure": [
{ "RoomId": 422, "Tables": [300, 301, 302, 303, 304, 305, 306, 307] }
]
},
"Waddles": {
"Sled": [
{ "RoomId": 230, "Waddles": [
{ "Id": 100, "Seats": 4 },
{ "Id": 101, "Seats": 3 },
{ "Id": 102, "Seats": 2 },
{ "Id": 103, "Seats": 2 }
]}
],
"Card": [
{ "RoomId": 320, "Waddles": [
{ "Id": 200, "Seats": 2 },
{ "Id": 201, "Seats": 2 },
{ "Id": 202, "Seats": 2 },
{ "Id": 203, "Seats": 2 }
]}
],
"CardFire": [
{ "RoomId": 812, "Waddles": [
{ "Id": 300, "Seats": 2 },
{ "Id": 301, "Seats": 2 },
{ "Id": 302, "Seats": 3 },
{ "Id": 303, "Seats": 4 }
]}
]
},
"Treasure": {
"Food": [115, 114, 109, 112, 110, 105, 113, 106, 108, 107, 111, 128],
"Furniture": [305, 313, 504, 506, 500, 503, 501, 507, 505, 502, 616, 542, 340, 150, 149, 369, 370, 300],
"Clothing": [3028, 232, 412, 112, 184, 1056, 6012, 118, 774, 366, 103, 498, 469, 1082,
5196, 790, 4039, 326, 105, 122, 5080, 111],
"Gold": {
"Clothing": [2139, 2137, 5385, 3185, 5384, 5386, 6209, 2138, 1735, 3186, 1734, 2136, 4994, 4993, 3187],
"Furniture": [2132, 2131, 2130, 2129]
},
"BorderTabby": {
"Clothing": [24073, 24075, 24078, 24074, 24080, 24076, 24081,
24071, 24072, 24077, 24079, 24070, 4414, 122],
"Furniture": [2180, 2182, 2183]
},
"Dinosaur": {
"Clothing": [24031, 24030, 24033, 24029],
"Furniture": [2180, 2182, 2183]
}
}
}
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from setuptools import setup, find_packages
setup(name='haveibeenpwned-rapid7-plugin',
version='4.0.2',
description='Determine if a user, domain, or password has been leaked via data available in the Have I Been Pwned database',
author='rapid7',
author_email='',
url='',
packages=find_packages(),
install_requires=['komand'], # Add third-party dependencies to requirements.txt, not here!
scripts=['bin/icon_haveibeenpwned']
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.