text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geekshop.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
"""
OpenVINO DL Workbench
Class for creation job for creating and exporting inference report
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import os
from contextlib import closing
from sqlalchemy.orm import Session
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.jobs.interfaces.job_observers import ExportInferenceReportDBObserver
from wb.main.models import SingleInferenceInfoModel, DownloadableArtifactsModel, InferenceReportExportJobModel
class InferenceReportExportJob(IJob):
job_type = JobTypesEnum.export_inference_report
_job_model_class = InferenceReportExportJobModel
ext = '.csv'
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
export_project_report_db_observer = ExportInferenceReportDBObserver(job_id=self._job_id)
self._job_state_subject.attach(export_project_report_db_observer)
self._attach_default_db_and_socket_observers()
def run(self):
self._job_state_subject.update_state(log='Starting inference report creation job.',
status=StatusEnum.running,
progress=0)
with closing(get_db_session_for_celery()) as session:
session: Session
job_model: InferenceReportExportJobModel = self.get_job_model(session)
artifact: DownloadableArtifactsModel = job_model.shared_artifact
artifact_path = artifact.build_full_artifact_path(ext=self.ext)
inference_job: SingleInferenceInfoModel = job_model.inference
per_layer_data = json.loads(inference_job.runtime_representation)
# create report
with open(artifact_path, 'w', newline='') as csvfile:
report_writer = csv.writer(csvfile, delimiter=';')
report_writer.writerow(
['Execution Order', 'Layer Name', 'Layer Type', 'Execution Time', 'Runtime Precision'])
for layer in per_layer_data:
report_writer.writerow([
layer['details'][0]['executionParams']['execOrder'],
layer['layerName'],
layer['layerType'],
layer['execTime'][0] if layer['execTime'][0] != 'not_executed' else 0,
layer['runtimePrecision'],
])
artifact.update(artifact_path)
artifact.write_record(session)
self._job_state_subject.update_state(log='Finishing inference report job.', status=StatusEnum.ready,
progress=100)
self._job_state_subject.detach_all_observers()
def on_failure(self, exception: Exception):
with closing(get_db_session_for_celery()) as session:
job_model = self.get_job_model(session)
artifact = job_model.downloadable_artifact
artifact_path = artifact.build_full_artifact_path(ext=self.ext)
if os.path.isfile(artifact_path):
os.remove(artifact_path)
super().on_failure(exception)
|
import sys
class KaffeError(Exception):
pass
def print_stderr(msg):
sys.stderr.write('%s\n' % msg)
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest initialization for handlers. No actual tests"""
__author__ = 'lschumacher@google.com (Lee Schumacher)'
import main
import webob
import urllib
from google.appengine.ext import webapp
def initialize_handler(
handler_class, action, repo='haiti', environ=None, params=None):
"""Initialize handler_cless and return initialized handler.
"""
params_str = ('?' + urllib.urlencode(params)) if params else ''
request = webapp.Request(webob.Request.blank(
'/' + repo + '/' + action + params_str, environ=environ).environ)
response = webapp.Response()
return handler_class(request, response, main.setup_env(request))
|
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class Copy(function_node.FunctionNode):
"""Copies the input variable onto the specified device."""
def __init__(self, out_device):
self.out_device = out_device
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1
)
def forward(self, inputs):
x, = inputs
self._in_device = cuda.get_device_from_array(x).id
if int(self.out_device) == -1:
return cuda.to_cpu(x),
else:
return cuda.to_gpu(x, self.out_device),
def backward(self, indexes, grad_outputs):
return Copy(self._in_device).apply(grad_outputs)
def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from device to device and from device to host.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable to be copied.
dst (int): Target device specifier.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> import chainer.backends.cuda as cuda
>>> x = np.random.uniform(-1, 1, (5, 10))
>>> cuda.get_device_from_array(x).id
-1
>>> y = F.copy(x, 0) # from host to device0
>>> cuda.get_device_from_array(y.data).id
0
>>> z = F.copy(y, -1) # from device0 to host
>>> cuda.get_device_from_array(z.data).id
-1
"""
y, = Copy(dst).apply((x,))
return y
|
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import Node
from utilities.vocab_building_dictionary import DELIM
class DSymtabMod(Node):
def __init__(self, val,
type_helper=None,
child=None, sibling=None):
super().__init__(val, child, sibling)
self.type_helper = type_helper if type_helper is not None else DELIM
self.type = DSymtabMod.name()
@staticmethod
def name():
return 'DSymtabMod'
|
from datetime import date
from flask_wtf import FlaskForm
from wtforms import StringField
class ProducaoFinalizadasForm(FlaskForm):
nome = StringField('Nome:')
data_comeco = StringField('Data de início:')
data_coleta = StringField('Data de coleta:')
|
# coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OneOfDeviceFiltersInnerValueItems(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""OneOfDeviceFiltersInnerValueItems - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OneOfDeviceFiltersInnerValueItems, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfDeviceFiltersInnerValueItems):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import ugame
import stage
import utils
GAME = None
#######################################################
# Game
class Game(stage.Stage):
"""Base class for a game and its display"""
# TODO: add game state machine
# TODO: make each screen a state, and make a transition between them when player overlaps with trigger zones
# TODO: have a combat state
def __init__(self, display=None, fps=12):
# require singleton
global GAME
if GAME:
raise ValueError("Only one Game is allowed at a time")
GAME = self
# NOTE: PyGamer display is 160x128
if display:
super().__init__(display, fps)
else:
super().__init__(ugame.display, fps)
self.midX = int(self.width*0.5)
self.midY = int(self.height*0.5)
self.spriteSize = 16 # static size of sprites in pixels using the stage library
self.bounceX = self.width-self.spriteSize
self.bounceY = self.height-self.spriteSize
self.tilesX = int(self.width/self.spriteSize) # number of tiles that will fit in game
self.tilesY = int(self.height/self.spriteSize)
self.map = None
self.updaters = []
self.sprites = []
self.forceRefresh = False # force a refresh on the next frame
self._pauseObject = None # object that receives updates while game is paused
self.framesToWaitAfterPause = 2
self._curFramesWaiting = 0
def addToUpdates(self, obj):
if isinstance(obj, list):
self.updaters.extend(obj)
else:
self.updaters.append(obj)
def removeFromUpdates(self, obj):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.updaters.remove(o)
def addToSprites(self, obj, updater=True):
if isinstance(obj, list):
self.sprites.extend(obj)
else:
self.sprites.append(obj)
if updater:
self.addToUpdates(obj)
def removeFromSprites(self, obj, updater=True):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.sprites.remove(o)
if updater:
self.removeFromUpdates(obj)
def pause(self, pauseObject):
self._pauseObject = pauseObject
def resume(self):
self._pauseObject = None
self._curFramesWaiting = 0
def gameLoop(self):
while True:
if self._pauseObject:
self._pauseObject.update()
elif self._curFramesWaiting < self.framesToWaitAfterPause:
ugame.buttons.get_pressed() # clear out button press cache
self._curFramesWaiting += 1
else:
for obj in self.updaters:
obj.update()
if not self.forceRefresh:
self.render_sprites(self.sprites)
else:
self.render_block(0, 0)
self.forceRefresh = False
self.tick()
#######################################################
# Map
class TileMap(stage.Grid):
"""A tile map for the whole screen, utilizing a tile set from the given bank"""
def __init__(self, bank, width=8, height=8, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.shaking = 0
self.framesToShake = 4
self._curShakeFrame = 0
self.solidTypes = [] # tile types that should be treated as solid walls for collision
self.triggerTypes = [] # tile types that should trigger some action when overlapped
def fromHexList(self, tileList):
"""
Given a list of hex codes, update the tile map
Example:
tileList = [
"0123456789ABCDEF", # row 0
"0123456790ABCDEF", # row 1
...
]
"""
# validate input
if len(tileList) != self.height:
raise ValueError("Length of tileList is {} but expected {}".format(len(tileList), self.height))
# iterate through tile list
x = 0
y = 0
for row in tileList:
if len(row) != self.width:
raise ValueError("Length of row {} is {} but expected {}".format(y, len(row), self.width))
for tileValue in row:
self.tile(x, y, int(tileValue, 16))
x += 1
y += 1
x = 0
def shake(self, amount=4):
self.shaking = amount
self._curShakeFrame = 0
def handleTrigger(self, sprite, x, y, tileType):
"""Handle special actions based on the tile type"""
pass
def update(self):
if self.shaking != 0:
GAME.forceRefresh = True
if self._curShakeFrame % 2 == 0:
self.move(self.shaking, 0)
else:
self.move(-self.shaking, 0)
self._curShakeFrame += 1
if self._curShakeFrame >= self.framesToShake:
self._curShakeFrame = 0
self.shaking = 0
#######################################################
# Entities
class Moveable(stage.Sprite):
"""Base class for moveable sprites like a player or enemy"""
def __init__(self, bank, x, y):
super().__init__(bank, 0, x, y)
self.x = x
self.y = y
self.collider = utils.BoundingBox(self,2, 2, 12, 12)
self.animations = utils.StateMachine()
def getTilesInCollider(self, dx=0, dy=0):
"""Calculate the grid tiles that are underneath each corner of this sprite's bounding box"""
tiles = []
rect = utils.Rectangle(self.collider.x+dx, self.collider.y+dy, self.collider.width, self.collider.height)
# top left
point = rect.getTopLeft()
point[0] >>= 4 # divide by 16
point[1] >>= 4 # divide by 16
if point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY:
tiles.append(point)
# top right
point = rect.getTopRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom left
point = rect.getBtmLeft()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom right
point = rect.getBtmRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# return list of tiles
return tiles
def getMovement(self):
"""
Determine desired movement (whether AI or player controls) and return dx, dy for this frame
NOTE: tile collision currently only supports moving in one direction at a time (no diagonal)
"""
return 0, 0
def applyMovementAndAnims(self, dx, dy):
"""Apply the desired movement and animations to this sprite"""
# handle movement and constrain to the stage
self.x = max(min(self.x + dx, GAME.bounceX), 0)
self.y = max(min(self.y + dy, GAME.bounceY), 0)
# finish movement
self.move(self.x, self.y)
self.collider.update()
self.animations.update()
def checkTileCollision(self, dx, dy):
"""Check the game map for collisions with tiles. Works best by checking one axis at a time"""
if dx != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(dx, 0)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dx > 0:
self.x = ((t[0]-1) << 4) + self.collider.dx - 1
else:
self.x = ((t[0]+1) << 4) - self.collider.dx + 1
dx = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
if dy != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(0, dy)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dy > 0:
self.y = ((t[1]-1) << 4) + self.collider.dy - 1
else:
self.y = ((t[1]+1) << 4) - self.collider.dy + 1
dy = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
return dx, dy
def getAnimation(self, dx, dy):
"""Update the animation based on the movement and state"""
pass
def update(self):
super().update()
dx, dy = self.getMovement()
dx, dy = self.checkTileCollision(dx, dy)
self.getAnimation(dx, dy)
self.applyMovementAndAnims(dx, dy)
#######################################################
# Animation Helpers
class AnimState(utils.State):
"""
Base class for animation states in a state machine
Expects all the frames to be consecutive in the sprite sheet
Can delay a number of game frames between each animation frame (ex: delay of 1 with 12 fps means delay 1/12 sec between animation frames)
"""
LOOP_FOREVER = -1
ROTATE_MIRROR = 4
ROTATE_90CW = 1
ROTATE_90CCW = 2
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
"""
Create the new state. By default, the animation will advance each game frame, and it will loop forever.
"""
super().__init__(name)
self.sprite = sprite
self.frameStart = frameStart
self.frameEnd = frameEnd
self._curFrame = frameStart
self.delay = delay
self._curDelay = 0
self.numTimes = numTimes
self._curTimes = 0
self.nextState = nextState
self.rotate = rotate
def enter(self, machine):
utils.log("Entering {} and setting frame to {}. Will repeat {} times and then go to state {}".format(self.name, self.frameStart, self.numTimes, self.nextState))
self.sprite.set_frame(self.frameStart, self.rotate)
self._curFrame = self.frameStart
self._curDelay = 0
def update(self, machine):
# handle delay in the animation
if self.delay > 0:
if self._curDelay < self.delay:
self._curDelay += 1
return
# advance the frame in the animation
self._curFrame += 1
self._curDelay = 0
# handle looping/exiting animation
if self._curFrame > self.frameEnd:
self._curFrame = self.frameStart
self._curTimes += 1
if self.numTimes != self.LOOP_FOREVER and self._curTimes > self.numTimes:
self.goToNextState(machine)
return
self.sprite.set_frame(self._curFrame, self.rotate)
def goToNextState(self, machine):
machine.goToState(self.nextState)
class AnimLoop(AnimState):
"""
Loop an animation for a sprite. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, rotate=rotate)
class AnimRepeatN(AnimState):
"""
Repeat an animation N times. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, numTimes, nextState, rotate)
#######################################################
# GUI
class Dialog(TileMap):
"""A modal text dialog built using a tile map"""
def __init__(self, bank, width=8, height=2, text1=None, text2=None, sprite1=None, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.showing = False
# first line of text
self.marginX = 4
self.marginY = 4
self.text = None
if text1:
self.text1 = stage.Text(width=len(text1), height=1)
self.text1.text(text1)
# second line of text
self.marginX2 = self.marginX
self.marginY2 = self.marginY + 15
self.text2 = None
if text2:
self.text2 = stage.Text(width=len(text2), height=1)
self.text2.text(text2)
# extra sprite
self.sprite1 = None
if sprite1:
self.sprite1 = sprite1
# frames to wait at start (avoids accidental button presses)
self.framesToWait = 2
self._curFramesWaiting = 0
def move(self, x, y, z=None):
if self.text1:
self.text1.move(x+self.marginX, y+self.marginY, z)
if self.text2:
self.text2.move(x+self.marginX2, y+self.marginY2, z)
super().move(x, y, z)
def show(self):
"""Display this dialog on top of all the other layers and pause the game"""
if self.showing:
return
GAME.layers.insert(0, self)
if self.text1:
GAME.layers.insert(0, self.text1)
if self.text2:
GAME.layers.insert(0, self.text2)
if self.sprite1:
GAME.layers.insert(0, self.sprite1)
GAME.forceRefresh = True
GAME.pause(self)
self.showing = True
self._curFramesWaiting = 0
def hide(self):
"""Hide this dialog and unpause the game"""
if not self.showing:
return
GAME.layers.remove(self)
if self.text1:
GAME.layers.remove(self.text1)
if self.text2:
GAME.layers.remove(self.text2)
if self.sprite1:
GAME.layers.remove(self.sprite1)
GAME.forceRefresh = True
GAME.resume()
self.showing = False
def update(self):
"""Update function called while the game is paused"""
if self._curFramesWaiting < self.framesToWait:
self._curFramesWaiting += 1
return
|
"""
TODO: Add doc what this file is doing
"""
from marshmallow import Schema, post_dump
class RootSchema(Schema):
SKIP_VALUES = [None]
@post_dump
def remove_skip_values(self, data, many, **kwargs):
return {
key: value for key, value in data.items()
if value not in self.SKIP_VALUES
}
@post_dump(pass_original=True)
def add_extra(self, serialized, original, many, **kwargs):
from kubi_ecs_logger.models.include import INCLUDE_FIELDS
for k, v in original.__dict__.items():
if k not in serialized and v is not None:
type_name = str(type(v).__name__).lower()
if type_name in INCLUDE_FIELDS:
schema = INCLUDE_FIELDS[type_name].schema
data = schema.dump(v)
if "kind" not in data:
data["kind"] = type_name
serialized[k] = data
elif isinstance(v, (int, float, str, bool, dict)):
if not str(k).startswith('_'):
serialized[k] = v
return serialized
|
import os
import io
import time
import base64
import functools
from PIL import Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from helpers import *
os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
class PythonPredictor:
def __init__(self, config):
# Import TF-Hub module
self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
def predict(self, payload):
# Preprocess image
hr_image = preprocess_image(payload["image_b64"])
# Run model
fake_image = self.hub_module(hr_image)
# convert to base64
img = get_image(tf.squeeze(fake_image))
im_file = io.BytesIO()
img.save(im_file, format="PNG")
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
return im_bytes
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------------------
# DISCLAIMER: This is just a slightly adjusted version of the EpsilonGreedyPolicy in TF-Agents.
# Most of the code here is directly copied from there.
# I changed it such that the policy in the epsilon case is not random, but sampled from
# the original policy distribution.
# ------------------------------------------------------------------------------------------
"""Policy implementation that generates epsilon-greedy actions from a policy.
TODO(kbanoop): Make policy state optional in the action method.
"""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Optional, Text
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.policies import policy_utilities
from tf_agents.policies import greedy_policy
from tf_agents.policies import tf_policy
from tf_agents.trajectories import policy_step
from tf_agents.typing import types
from tf_agents.utils import nest_utils
tfd = tfp.distributions
class EpsilonGreedyPolicy(tf_policy.TFPolicy):
"""Returns epsilon-greedy samples of a given policy."""
def __init__(self,
policy: tf_policy.TFPolicy,
epsilon: types.FloatOrReturningFloat,
name: Optional[Text] = None):
"""Builds an epsilon-greedy MixturePolicy wrapping the given policy.
Args:
policy: A policy implementing the tf_policy.TFPolicy interface.
epsilon: The probability of taking the random action represented as a
float scalar, a scalar Tensor of shape=(), or a callable that returns a
float scalar or Tensor.
name: The name of this policy.
Raises:
ValueError: If epsilon is invalid.
"""
try:
observation_and_action_constraint_splitter = (
policy.observation_and_action_constraint_splitter)
except AttributeError:
observation_and_action_constraint_splitter = None
try:
accepts_per_arm_features = policy.accepts_per_arm_features
except AttributeError:
accepts_per_arm_features = False
self._greedy_policy = greedy_policy.GreedyPolicy(policy)
self._epsilon = epsilon
self._epsilon_policy = self._greedy_policy.wrapped_policy # this is my main change from the original code
super(EpsilonGreedyPolicy, self).__init__(
policy.time_step_spec,
policy.action_spec,
policy.policy_state_spec,
policy.info_spec,
emit_log_probability=policy.emit_log_probability,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)
@property
def wrapped_policy(self) -> tf_policy.TFPolicy:
return self._greedy_policy.wrapped_policy
def _variables(self):
return self._greedy_policy.variables()
def _get_epsilon(self):
if callable(self._epsilon):
return self._epsilon()
else:
return self._epsilon
def _action(self, time_step, policy_state, seed):
seed_stream = tfp.util.SeedStream(seed=seed, salt='epsilon_greedy')
greedy_action = self._greedy_policy.action(time_step, policy_state)
epsilon_action = self._epsilon_policy.action(time_step, (), seed_stream())
outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec)
rng = tf.random.uniform(
outer_shape, maxval=1.0, seed=seed_stream(), name='epsilon_rng')
cond = tf.greater(rng, self._get_epsilon())
# Selects the action/info from the random policy with probability epsilon.
# TODO(b/133175894): tf.compat.v1.where only supports a condition which is
# either a scalar or a vector. Use tf.compat.v2 so that it can support any
# condition whose leading dimensions are the same as the other operands of
# tf.where.
outer_ndims = int(outer_shape.shape[0])
if outer_ndims >= 2:
raise ValueError(
'Only supports batched time steps with a single batch dimension')
action = tf.nest.map_structure(lambda g, r: tf.compat.v1.where(cond, g, r),
greedy_action.action, epsilon_action.action)
if greedy_action.info:
if not epsilon_action.info:
raise ValueError('Incompatible info field')
# Note that the objects in PolicyInfo may have different shapes, so we
# need to call nest_utils.where() on each type of object.
info = tf.nest.map_structure(lambda x, y: nest_utils.where(cond, x, y),
greedy_action.info, epsilon_action.info)
if self._emit_log_probability:
# At this point, info.log_probability contains the log prob of the
# action chosen, conditioned on the policy that was chosen. We want to
# emit the full log probability of the action, so we'll add in the log
# probability of choosing the policy.
random_log_prob = tf.nest.map_structure(
lambda t: tf.math.log(tf.zeros_like(t) + self._get_epsilon()),
info.log_probability)
greedy_log_prob = tf.nest.map_structure(
lambda t: tf.math.log(tf.ones_like(t) - self._get_epsilon()),
random_log_prob)
log_prob_of_chosen_policy = nest_utils.where(cond, greedy_log_prob,
random_log_prob)
log_prob = tf.nest.map_structure(lambda a, b: a + b,
log_prob_of_chosen_policy,
info.log_probability)
info = policy_step.set_log_probability(info, log_prob)
# Overwrite bandit policy info type.
if policy_utilities.has_bandit_policy_type(info, check_for_tensor=True):
# Generate mask of the same shape as bandit_policy_type (batch_size, 1).
# This is the opposite of `cond`, which is 1-D bool tensor (batch_size,)
# that is true when greedy policy was used, otherwise `cond` is false.
random_policy_mask = tf.reshape(tf.logical_not(cond),
tf.shape(info.bandit_policy_type))
bandit_policy_type = policy_utilities.bandit_policy_uniform_mask(
info.bandit_policy_type, mask=random_policy_mask)
info = policy_utilities.set_bandit_policy_type(
info, bandit_policy_type)
else:
if epsilon_action.info:
raise ValueError('Incompatible info field')
info = ()
# The state of the epsilon greedy policy is the state of the underlying
# greedy policy (the random policy carries no state).
# It is commonly assumed that the new policy state only depends only
# on the previous state and "time_step", the action (be it the greedy one
# or the random one) does not influence the new policy state.
state = greedy_action.state
return policy_step.PolicyStep(action, state, info)
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'EpsilonGreedyPolicy does not support distributions yet.')
|
from os import system
def comprar(comida, juguetes):
comprado = ""
while not comprado:
system("cls")
comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower()
if comprar == "alimento":
print(f"Carne: {comida['carne']['cantidad']}|Agua: {comida['agua']['cantidad']}|Huesos: {comida['hueso']['cantidad']}")
producto = (input("Que queres comprar?: ")).lower()
if producto in comida.keys():
cantidad = input("Cuánto quieres comprar?: ")
if cantidad.isdecimal():
comida[producto]['cantidad'] += int(cantidad)
comprado = producto
if comprar == "juguete":
print("Pelota | Soga | Muñeco")
producto = (input("Que quieres comprar?: ")).lower()
if producto in juguetes.keys():
juguetes[producto] = "si"
comprado = producto
|
"""
Third generation models implementation (VPIN)
"""
import pandas as pd
def get_vpin(volume: pd.Series, buy_volume: pd.Series, window: int = 1) -> pd.Series:
"""
Get Volume-Synchronized Probability of Informed Trading (VPIN) from bars, p. 292-293.
:param volume: (pd.Series) bar volume
:param buy_volume: (pd.Series) bar volume classified as buy (either tick rule, BVC or aggressor side methods applied)
:param window: (int) estimation window
:return: (pd.Series) VPIN series
"""
sell_volume = volume - buy_volume
volume_imbalance = abs(buy_volume - sell_volume)
return volume_imbalance.rolling(window=window).mean() / volume
|
"""
github3.gists.comment
---------------------
Module containing the logic for a GistComment
"""
from github3.models import BaseComment
from github3.users import User
class GistComment(BaseComment):
"""This object represents a comment on a gist.
Two comment instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.id == c2.id
c1.id != c2.id
See also: http://developer.github.com/v3/gists/comments/
"""
def __init__(self, comment, session=None):
super(GistComment, self).__init__(comment, session)
#: :class:`User <github3.users.User>` who made the comment
#: Unless it is not associated with an account
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self) # (No coverage)
def __repr__(self):
return '<Gist Comment [{0}]>'.format(self.user.login)
|
import numpy as np
class NeuralNetwork(object):
def __init__(self, topology, epsilon, numLabels):
self.theta = []
self.topology = topology
self.numLabels = numLabels
self.gradientChecking = False
for layer in range(len(self.topology)):
if layer == 0:
continue
self.theta.append(np.random.rand(self.topology[layer], self.topology[layer - 1] + 1) * 2 * epsilon - epsilon)
def gradientDescent(self, iters, alpha, lamda, X, Y):
self.X = X
self.Y = Y
for i in range(iters):
(J, thetaGrad) = self.getCostAndGradient(lamda)
# gradient checking
if self.gradientChecking:
thetaCopy = self.theta.copy()
for i in range(len(self.topology) - 1):
for j in range(self.topology[i + 1]):
for k in range(self.topology[i]):
EPS = 0.00001
self.theta[i][j, k] += EPS
J2 = self.getCostAndGradient(lamda)[0]
self.theta[i][j, k] -= 2 * EPS
J1 = self.getCostAndGradient(lamda)[0]
print(str((J2 - J1) / (2 * EPS) - thetaGrad[i][j, k]))
self.theta = thetaCopy
# end
for layer in range(len(self.topology) - 1):
self.theta[layer] -= thetaGrad[layer] * alpha
print("Iter " + str(i) + ": " + str(J))
def predict(self, x):
x = x.reshape((x.shape[0], 1))
x = np.concatenate(([[1]], x))
for layer in range(1, len(self.topology)):
x = np.matmul(self.theta[layer - 1], x)
for i in range(x.shape[0]):
x[i, 0] = self.sigmoid(x[i, 0])
if layer != len(self.topology) - 1:
x = np.concatenate(([[1]], x))
prediction = -1
predictionSurety = -1
for i in range(self.numLabels):
if x[i, 0] > predictionSurety:
prediction = i
predictionSurety = x[i, 0]
return prediction
def getCostAndGradient(self, lamda):
J = 0
thetaGrad = []
for layer in range(len(self.topology)):
if layer == 0:
continue
thetaGrad.append(np.zeros((self.topology[layer], self.topology[layer - 1] + 1)))
m = self.X.shape[0]
for example in range(m):
x = self.X[example].copy()
x = x.reshape((x.shape[0], 1))
y = np.zeros(self.numLabels)
y[self.Y[example]] = 1
y = y.reshape((y.shape[0], 1))
a = []
z = []
delta = []
for layer in range(len(self.topology)):
if layer == 0:
a.append(np.concatenate(([[1]], x)))
z.append(np.concatenate(([[1]], x)))
delta.append(0)
continue
z.append(np.matmul(self.theta[layer - 1], a[layer - 1]))
a.append(z[layer].copy())
for i in range(self.topology[layer]):
a[layer][i, 0] = self.sigmoid(a[layer][i, 0])
if layer != len(self.topology) - 1:
a[layer] = np.concatenate(([[1]], a[layer]))
z[layer] = np.concatenate(([[1]], z[layer]))
delta.append(0)
for layer in range(len(self.topology) - 1, 0, -1):
if layer == len(self.topology) - 1:
delta[layer] = a[layer] - y
thetaGrad[layer - 1] += np.matmul(delta[layer], a[layer - 1].transpose())
continue
sigDerZ = z[layer].copy()
for i in range(self.topology[layer] + 1):
sigDerZ[i] = self.sigmoidDerivative(sigDerZ[i])
if layer >= len(self.topology) - 2:
delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1]) * sigDerZ
else:
delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1][1:, :]) * sigDerZ
thetaGrad[layer - 1] += np.matmul(delta[layer][1:, :], a[layer - 1].transpose())
J += np.sum(-(1 - y) * np.log(1 - a[len(self.topology) - 1])) - np.sum(y * np.log(a[len(self.topology) - 1]))
J /= m
for layer in range(len(self.topology) - 1):
thetaGrad[layer] *= (1 / m)
for i in range(len(self.topology) - 1):
for j in range(self.topology[i + 1]):
for k in range(1, self.topology[i]):
J += (lamda / (2 * m)) * self.theta[i][j, k] ** 2
thetaGrad[i][j, k] += (lamda / m) * self.theta[i][j, k]
return (J, thetaGrad)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoidDerivative(self, x):
sig = self.sigmoid(x)
return sig * (1 - sig)
|
"""“Write a for loop to print all the values in the half_lives list from Operations on Lists, all on a single line. half_lives refers to [87.74, 24110.0, 6537.0, 14.4, 376000.0]."""
half_lives = [87.74, 24110.0, 6537.0, 14.4, 376000.0]
for i in half_lives:
print(i , end=' ')
|
#!/usr/bin/env python3
"""Read a correct swagger file and check whether it conforms to a style guide."""
import argparse
import pathlib
from typing import List
import sys
import swagger_to.intermediate
import swagger_to.style
import swagger_to.swagger
def main() -> int:
"""Execute the main routine."""
parser = argparse.ArgumentParser("Reads a correct swagger file and checks that it conforms to the style guide.")
parser.add_argument("--swagger_path", help="path to the swagger file", required=True)
parser.add_argument("--verbose", help="if set, prints as much information as possible.", action="store_true")
parser.add_argument(
"--with_line_number",
help="if set, prints the errors with the corresponding file name and line number.",
action="store_true")
args = parser.parse_args()
assert isinstance(args.swagger_path, str)
assert isinstance(args.verbose, bool)
assert isinstance(args.with_line_number, bool)
swagger_path = pathlib.Path(args.swagger_path)
if not swagger_path.exists():
print("File not found error: Swagger file does not exist: {}".format(swagger_path))
return 2
swagger, errs = swagger_to.swagger.parse_yaml_file(path=swagger_path)
if errs:
print("Value error: Failed to parse Swagger file {}:\n{}".format(swagger_path, "\n".join(errs)))
return 2
intermediate_typedefs = swagger_to.intermediate.to_typedefs(swagger=swagger)
intermediate_params = swagger_to.intermediate.to_parameters(swagger=swagger, typedefs=intermediate_typedefs)
endpoints = swagger_to.intermediate.to_endpoints(
swagger=swagger, typedefs=intermediate_typedefs, params=intermediate_params)
result = swagger_to.style.perform(swagger=swagger, typedefs=intermediate_typedefs, endpoints=endpoints)
if result:
complaints = '\n'.join(
format_complaints(
complaints=result,
swagger_path=str(swagger_path),
verbose=args.verbose,
with_line_number=args.with_line_number))
print("Style checks failed: \n{}".format(complaints))
return 1
print("Style checks succeeded.")
return 0
def format_complaints(complaints: List[swagger_to.style.Complaint], swagger_path: str, verbose: bool,
with_line_number: bool) -> List[str]:
"""
Convert a list of complaints into a well-formatted list of error messages.
:param complaints:
:param swagger_path:
:param verbose:
:param with_line_number:
:return:
"""
if with_line_number:
complaints.sort(key=lambda complaint: complaint.line)
complaints_str = [] # type: List[str]
for complaint in complaints:
complaint_str = ''
if with_line_number:
complaint_str += "{}:{} ".format(swagger_path, complaint.line)
else:
complaint_str += "{}: ".format(complaint.where)
complaint_str += "{} ".format(complaint.message)
if verbose:
complaint_str += "\"{}\"".format(complaint.what.replace('\n', ' '))
complaints_str.append(complaint_str)
return complaints_str
if __name__ == "__main__":
sys.exit(main())
|
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
The version of the OpenAPI document: v1beta1-0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.configuration import Configuration
class V1beta1ExperimentSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'algorithm': 'V1beta1AlgorithmSpec',
'early_stopping': 'V1beta1EarlyStoppingSpec',
'max_failed_trial_count': 'int',
'max_trial_count': 'int',
'metrics_collector_spec': 'V1beta1MetricsCollectorSpec',
'nas_config': 'V1beta1NasConfig',
'objective': 'V1beta1ObjectiveSpec',
'parallel_trial_count': 'int',
'parameters': 'list[V1beta1ParameterSpec]',
'resume_policy': 'str',
'trial_template': 'V1beta1TrialTemplate'
}
attribute_map = {
'algorithm': 'algorithm',
'early_stopping': 'earlyStopping',
'max_failed_trial_count': 'maxFailedTrialCount',
'max_trial_count': 'maxTrialCount',
'metrics_collector_spec': 'metricsCollectorSpec',
'nas_config': 'nasConfig',
'objective': 'objective',
'parallel_trial_count': 'parallelTrialCount',
'parameters': 'parameters',
'resume_policy': 'resumePolicy',
'trial_template': 'trialTemplate'
}
def __init__(self, algorithm=None, early_stopping=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ExperimentSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._algorithm = None
self._early_stopping = None
self._max_failed_trial_count = None
self._max_trial_count = None
self._metrics_collector_spec = None
self._nas_config = None
self._objective = None
self._parallel_trial_count = None
self._parameters = None
self._resume_policy = None
self._trial_template = None
self.discriminator = None
if algorithm is not None:
self.algorithm = algorithm
if early_stopping is not None:
self.early_stopping = early_stopping
if max_failed_trial_count is not None:
self.max_failed_trial_count = max_failed_trial_count
if max_trial_count is not None:
self.max_trial_count = max_trial_count
if metrics_collector_spec is not None:
self.metrics_collector_spec = metrics_collector_spec
if nas_config is not None:
self.nas_config = nas_config
if objective is not None:
self.objective = objective
if parallel_trial_count is not None:
self.parallel_trial_count = parallel_trial_count
if parameters is not None:
self.parameters = parameters
if resume_policy is not None:
self.resume_policy = resume_policy
if trial_template is not None:
self.trial_template = trial_template
@property
def algorithm(self):
"""Gets the algorithm of this V1beta1ExperimentSpec. # noqa: E501
:return: The algorithm of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1AlgorithmSpec
"""
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
"""Sets the algorithm of this V1beta1ExperimentSpec.
:param algorithm: The algorithm of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1AlgorithmSpec
"""
self._algorithm = algorithm
@property
def early_stopping(self):
"""Gets the early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:return: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1EarlyStoppingSpec
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1beta1ExperimentSpec.
:param early_stopping: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1EarlyStoppingSpec
"""
self._early_stopping = early_stopping
@property
def max_failed_trial_count(self):
"""Gets the max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
Max failed trials to mark experiment as failed. # noqa: E501
:return: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_failed_trial_count
@max_failed_trial_count.setter
def max_failed_trial_count(self, max_failed_trial_count):
"""Sets the max_failed_trial_count of this V1beta1ExperimentSpec.
Max failed trials to mark experiment as failed. # noqa: E501
:param max_failed_trial_count: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._max_failed_trial_count = max_failed_trial_count
@property
def max_trial_count(self):
"""Gets the max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
Max completed trials to mark experiment as succeeded # noqa: E501
:return: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_trial_count
@max_trial_count.setter
def max_trial_count(self, max_trial_count):
"""Sets the max_trial_count of this V1beta1ExperimentSpec.
Max completed trials to mark experiment as succeeded # noqa: E501
:param max_trial_count: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._max_trial_count = max_trial_count
@property
def metrics_collector_spec(self):
"""Gets the metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:return: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1MetricsCollectorSpec
"""
return self._metrics_collector_spec
@metrics_collector_spec.setter
def metrics_collector_spec(self, metrics_collector_spec):
"""Sets the metrics_collector_spec of this V1beta1ExperimentSpec.
:param metrics_collector_spec: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1MetricsCollectorSpec
"""
self._metrics_collector_spec = metrics_collector_spec
@property
def nas_config(self):
"""Gets the nas_config of this V1beta1ExperimentSpec. # noqa: E501
:return: The nas_config of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1NasConfig
"""
return self._nas_config
@nas_config.setter
def nas_config(self, nas_config):
"""Sets the nas_config of this V1beta1ExperimentSpec.
:param nas_config: The nas_config of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1NasConfig
"""
self._nas_config = nas_config
@property
def objective(self):
"""Gets the objective of this V1beta1ExperimentSpec. # noqa: E501
:return: The objective of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1ObjectiveSpec
"""
return self._objective
@objective.setter
def objective(self, objective):
"""Sets the objective of this V1beta1ExperimentSpec.
:param objective: The objective of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1ObjectiveSpec
"""
self._objective = objective
@property
def parallel_trial_count(self):
"""Gets the parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:return: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._parallel_trial_count
@parallel_trial_count.setter
def parallel_trial_count(self, parallel_trial_count):
"""Sets the parallel_trial_count of this V1beta1ExperimentSpec.
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:param parallel_trial_count: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._parallel_trial_count = parallel_trial_count
@property
def parameters(self):
"""Gets the parameters of this V1beta1ExperimentSpec. # noqa: E501
List of hyperparameter configurations. # noqa: E501
:return: The parameters of this V1beta1ExperimentSpec. # noqa: E501
:rtype: list[V1beta1ParameterSpec]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1beta1ExperimentSpec.
List of hyperparameter configurations. # noqa: E501
:param parameters: The parameters of this V1beta1ExperimentSpec. # noqa: E501
:type: list[V1beta1ParameterSpec]
"""
self._parameters = parameters
@property
def resume_policy(self):
"""Gets the resume_policy of this V1beta1ExperimentSpec. # noqa: E501
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:return: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501
:rtype: str
"""
return self._resume_policy
@resume_policy.setter
def resume_policy(self, resume_policy):
"""Sets the resume_policy of this V1beta1ExperimentSpec.
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:param resume_policy: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501
:type: str
"""
self._resume_policy = resume_policy
@property
def trial_template(self):
"""Gets the trial_template of this V1beta1ExperimentSpec. # noqa: E501
:return: The trial_template of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1TrialTemplate
"""
return self._trial_template
@trial_template.setter
def trial_template(self, trial_template):
"""Sets the trial_template of this V1beta1ExperimentSpec.
:param trial_template: The trial_template of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1TrialTemplate
"""
self._trial_template = trial_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ExperimentSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ExperimentSpec):
return True
return self.to_dict() != other.to_dict()
|
from collections import namedtuple
import os
import json
import numpy as np
from tqdm import tqdm
from data_generators.utils import load_image_rgb
# Copied from: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
#
# Cityscapes labels
#
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
def label2dict(label):
return {
'name': label.name, 'id': label.id, 'trainId': label.trainId,
'category': label.category, 'catId': label.categoryId, 'hasInstances': label.hasInstances,
'ignoreInEval': label.ignoreInEval, 'color': label.color
}
def save_labels(labels, fpath):
l = []
for label in labels:
l.append(label2dict(label))
fp = open(fpath, 'w')
json.dump(l, fp)
fp.close()
def load_labels(fpath):
fp = open(fpath, 'r')
l = json.load(fp)
fp.close()
labels = []
for item in l:
labels.append(
Label(
item['name'], item['id'], item['trainId'],
item['category'], item['catId'], item['hasInstances'],
item['ignoreInEval'], tuple(item['color']))
)
return labels
class KittiDataset:
def __init__(self):
self.image_ids = []
def load_kitti(self, dataset_dir, subset, tag='simple'):
'Initialization'
assert subset in ['train', 'val'], 'subset must be either train or val but {} is given'.format(subset)
self.labels = load_labels(os.path.join(dataset_dir, 'annotations', 'semantic_{}.json'.format(tag)))
# trainId to colors
self.trainId2colors = {label.trainId: [] for label in self.labels}
for label in self.labels:
self.trainId2colors[label.trainId].append(label.color)
# trainId to name
self.trainId2name = {label.trainId: label.name for label in self.labels}
# number of valid trainIds + background class
self.num_classes = max([label.trainId for label in self.labels if label.trainId >= 0 and label.trainId < 255]) + 2
self.class_names = [self.trainId2name[i] for i in range(self.num_classes - 1)]
self.image_dir = os.path.join(dataset_dir, subset, 'images')
self.label_dir = os.path.join(dataset_dir, subset, 'semantic_rgb')
assert os.path.exists(self.image_dir), 'No such directory: {}'.format(self.image_dir)
assert os.path.exists(self.label_dir), 'No such directory: {}'.format(self.label_dir)
self.image_files = sorted([x for x in os.listdir(self.image_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')])
self.label_files = sorted([x for x in os.listdir(self.label_dir) if x.lower().endswith('.png')])
assert len(self.image_files) == len(self.label_files), \
'image - label size mismatch! There are {} image files and {} label files'.format(len(self.image_files), len(self.label_files))
self.num_images = len(self.image_files)
self.image_ids = np.arange(self.num_images)
def check_sanity(self):
for i in tqdm(self.image_ids):
assert self.image_files[i][:-4] == self.label_files[i][:-4],\
'image - label filename mismatch: {} - {}'.format(self.image_files[i], self.label_files[i])
img = load_image_rgb(os.path.join(self.image_dir, self.image_files[i]))
msk = load_image_rgb(os.path.join(self.label_dir, self.label_files[i]))
assert img.shape == msk.shape,\
'img.shape: {}, msk.shape: {}'.format(img.shape, msk.shape)
def load_image(self, image_id):
return load_image_rgb(os.path.join(self.image_dir, self.image_files[image_id]))
def load_mask(self, image_id):
rgb_mask = load_image_rgb(os.path.join(self.label_dir, self.label_files[image_id]))
mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1], self.num_classes - 1))
for cls in range(self.num_classes - 1):
colors = self.trainId2colors[cls]
cls_mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1]))
for color in colors:
cls_mask = np.logical_or(cls_mask, (rgb_mask == color).all(axis=2))
mask[:,:,cls] = cls_mask
return mask
|
"""PyMC4 continuous random variables for tensorflow."""
import tensorflow_probability as tfp
from pymc4.distributions import abstract
from pymc4.distributions.tensorflow.distribution import BackendDistribution
tfd = tfp.distributions
__all__ = [
"Beta",
"Cauchy",
"ChiSquared",
"Exponential",
"Gamma",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"InverseGamma",
"InverseGaussian",
"Kumaraswamy",
"Laplace",
"LogNormal",
"Logistic",
"LogitNormal",
"Normal",
"Pareto",
"StudentT",
"Triangular",
"Uniform",
"VonMises",
]
class Normal(BackendDistribution, abstract.Normal):
__doc__ = r"""{}
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- mu: loc
- sigma: scale
""".format(
abstract.Normal.__doc__
)
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.Normal(loc=mu, scale=sigma)
class HalfNormal(BackendDistribution, abstract.HalfNormal):
__doc__ = r"""{}
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- sigma: scale
""".format(
abstract.HalfNormal.__doc__
)
def _init_backend(self):
sigma = self.conditions["sigma"]
self._backend_distribution = tfd.HalfNormal(scale=sigma)
class Beta(BackendDistribution, abstract.Beta):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Beta(concentration0=alpha, concentration1=beta)
class Cauchy(BackendDistribution, abstract.Cauchy):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Cauchy(loc=alpha, scale=beta)
class ChiSquared(BackendDistribution, abstract.ChiSquared):
def _init_backend(self):
nu = self.conditions["nu"]
self._backend_distribution = tfd.Chi2(df=nu)
class Exponential(BackendDistribution, abstract.Exponential):
def _init_backend(self):
lam = self.conditions["lam"]
self._backend_distribution = tfd.Exponential(rate=lam)
class Gamma(BackendDistribution, abstract.Gamma):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Gamma(concentration=alpha, rate=beta)
class Gumbel(BackendDistribution, abstract.Gumbel):
def _init_backend(self):
mu, beta = self.conditions["mu"], self.conditions["beta"]
self._backend_distribution = tfd.Gumbel(loc=mu, scale=beta)
class HalfCauchy(BackendDistribution, abstract.HalfCauchy):
def _init_backend(self):
beta = self.conditions["beta"]
self._backend_distribution = tfd.HalfCauchy(loc=0, scale=beta)
class InverseGamma(BackendDistribution, abstract.InverseGamma):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.InverseGamma(concentration=alpha, scale=beta)
class InverseGaussian(BackendDistribution, abstract.InverseGaussian):
def _init_backend(self):
mu, lam = self.conditions["mu"], self.conditions["lam"]
self._backend_distribution = tfd.InverseGaussian(loc=mu, concentration=lam)
class Kumaraswamy(BackendDistribution, abstract.Kumaraswamy):
def _init_backend(self):
a, b = self.conditions["a"], self.conditions["b"]
self._backend_distribution = tfd.Kumaraswamy(concentration0=a, concentration1=b)
class Laplace(BackendDistribution, abstract.Laplace):
def _init_backend(self):
mu, b = self.conditions["mu"], self.conditions["b"]
self._backend_distribution = tfd.Laplace(loc=mu, scale=b)
class Logistic(BackendDistribution, abstract.Logistic):
def _init_backend(self):
mu, s = self.conditions["mu"], self.conditions["s"]
self._backend_distribution = tfd.Logistic(loc=mu, scale=s)
class LogitNormal(BackendDistribution, abstract.LogitNormal):
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=mu, scale=sigma),
bijector=tfp.bijectors.Sigmoid(),
name="LogitNormal",
)
class LogNormal(BackendDistribution, abstract.LogNormal):
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.LogNormal(loc=mu, scale=sigma)
class Pareto(BackendDistribution, abstract.Pareto):
def _init_backend(self):
alpha, m = self.conditions["alpha"], self.conditions["m"]
self._backend_distribution = tfd.Pareto(concentration=alpha, scale=m)
class StudentT(BackendDistribution, abstract.StudentT):
def _init_backend(self):
nu, mu, sigma = self.conditions["nu"], self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.StudentT(df=nu, loc=mu, scale=sigma)
class Triangular(BackendDistribution, abstract.Triangular):
def _init_backend(self):
lower, upper, c = self.conditions["lower"], self.conditions["upper"], self.conditions["c"]
self._backend_distribution = tfd.Triangular(low=lower, high=upper, peak=c)
class Uniform(BackendDistribution, abstract.Uniform):
def _init_backend(self):
lower, upper = self.conditions["lower"], self.conditions["upper"]
self._backend_distribution = tfd.Uniform(low=lower, high=upper)
class VonMises(BackendDistribution, abstract.VonMises):
def _init_backend(self):
mu, kappa = self.conditions["mu"], self.conditions["kappa"]
self._backend_distribution = tfd.VonMises(loc=mu, concentration=kappa)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import DatadogApiKey
from ._models_py3 import DatadogApiKeyListResponse
from ._models_py3 import DatadogHost
from ._models_py3 import DatadogHostListResponse
from ._models_py3 import DatadogHostMetadata
from ._models_py3 import DatadogInstallMethod
from ._models_py3 import DatadogLogsAgent
from ._models_py3 import DatadogMonitorResource
from ._models_py3 import DatadogMonitorResourceListResponse
from ._models_py3 import DatadogMonitorResourceUpdateParameters
from ._models_py3 import DatadogOrganizationProperties
from ._models_py3 import DatadogSetPasswordLink
from ._models_py3 import DatadogSingleSignOnProperties
from ._models_py3 import DatadogSingleSignOnResource
from ._models_py3 import DatadogSingleSignOnResourceListResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import FilteringTag
from ._models_py3 import LinkedResource
from ._models_py3 import LinkedResourceListResponse
from ._models_py3 import MonitoredResource
from ._models_py3 import MonitoredResourceListResponse
from ._models_py3 import MonitoringTagRules
from ._models_py3 import MonitoringTagRulesListResponse
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationResult
from ._models_py3 import ResourceProviderDefaultErrorResponse
from ._models_py3 import UserInfo
except (SyntaxError, ImportError):
from ._models import DatadogApiKey # type: ignore
from ._models import DatadogApiKeyListResponse # type: ignore
from ._models import DatadogHost # type: ignore
from ._models import DatadogHostListResponse # type: ignore
from ._models import DatadogHostMetadata # type: ignore
from ._models import DatadogInstallMethod # type: ignore
from ._models import DatadogLogsAgent # type: ignore
from ._models import DatadogMonitorResource # type: ignore
from ._models import DatadogMonitorResourceListResponse # type: ignore
from ._models import DatadogMonitorResourceUpdateParameters # type: ignore
from ._models import DatadogOrganizationProperties # type: ignore
from ._models import DatadogSetPasswordLink # type: ignore
from ._models import DatadogSingleSignOnProperties # type: ignore
from ._models import DatadogSingleSignOnResource # type: ignore
from ._models import DatadogSingleSignOnResourceListResponse # type: ignore
from ._models import ErrorResponseBody # type: ignore
from ._models import FilteringTag # type: ignore
from ._models import LinkedResource # type: ignore
from ._models import LinkedResourceListResponse # type: ignore
from ._models import MonitoredResource # type: ignore
from ._models import MonitoredResourceListResponse # type: ignore
from ._models import MonitoringTagRules # type: ignore
from ._models import MonitoringTagRulesListResponse # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OperationResult # type: ignore
from ._models import ResourceProviderDefaultErrorResponse # type: ignore
from ._models import UserInfo # type: ignore
from ._microsoft_datadog_client_enums import (
LiftrResourceCategories,
ManagedIdentityTypes,
MarketplaceSubscriptionStatus,
MonitoringStatus,
ProvisioningState,
SingleSignOnStates,
TagAction,
)
__all__ = [
'DatadogApiKey',
'DatadogApiKeyListResponse',
'DatadogHost',
'DatadogHostListResponse',
'DatadogHostMetadata',
'DatadogInstallMethod',
'DatadogLogsAgent',
'DatadogMonitorResource',
'DatadogMonitorResourceListResponse',
'DatadogMonitorResourceUpdateParameters',
'DatadogOrganizationProperties',
'DatadogSetPasswordLink',
'DatadogSingleSignOnProperties',
'DatadogSingleSignOnResource',
'DatadogSingleSignOnResourceListResponse',
'ErrorResponseBody',
'FilteringTag',
'LinkedResource',
'LinkedResourceListResponse',
'MonitoredResource',
'MonitoredResourceListResponse',
'MonitoringTagRules',
'MonitoringTagRulesListResponse',
'OperationDisplay',
'OperationListResult',
'OperationResult',
'ResourceProviderDefaultErrorResponse',
'UserInfo',
'LiftrResourceCategories',
'ManagedIdentityTypes',
'MarketplaceSubscriptionStatus',
'MonitoringStatus',
'ProvisioningState',
'SingleSignOnStates',
'TagAction',
]
|
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Startup message
To begin a session, the frontend opens a connection to the backend and sends a
Startup message.
"""
from __future__ import print_function, division, absolute_import
import platform
import os
from struct import pack
# noinspection PyUnresolvedReferences,PyCompatibility
import vertica_python
from ..message import BulkFrontendMessage
class Startup(BulkFrontendMessage):
message_id = None
def __init__(self, user, database, session_label, os_user_name):
BulkFrontendMessage.__init__(self)
try:
os_platform = platform.platform()
except Exception as e:
os_platform = ''
print("WARN: Cannot get the OS info: {}".format(str(e)))
try:
pid = str(os.getpid())
except Exception as e:
pid = '0'
print("WARN: Cannot get the process ID: {}".format(str(e)))
self.parameters = {
b'user': user,
b'database': database,
b'client_label': session_label,
b'client_type': 'vertica-python',
b'client_version': vertica_python.__version__,
b'client_os': os_platform,
b'client_os_user_name': os_user_name,
b'client_pid': pid,
}
def read_bytes(self):
# The fixed protocol version is followed by pairs of parameter name and value strings.
# A zero byte is required as a terminator after the last name/value pair.
# Parameters can appear in any order.
fixed_protocol_version = 3 << 16 | 5
bytes_ = pack('!I', fixed_protocol_version)
# The frontend sends a requested protocol version to the backend.
# Old servers (protocol < 3.7) ignore this value and use the fixed protocol version.
# New servers (protocol >= 3.7) would try to find the common protocol
# version in use for both client and server, and send back a ParameterStatus
# message (key='protocol_version', value=<effective protocol version>)
bytes_ += pack('!16sxIx', b'protocol_version', vertica_python.PROTOCOL_VERSION)
for k in self.parameters:
v = self.parameters[k].encode('utf-8')
bytes_ += pack('!{}sx{}sx'.format(len(k), len(v)), k, v)
bytes_ += pack('x')
return bytes_
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_setting
short_description: Configure router settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify router feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
router_setting:
description:
- Configure router settings.
default: null
suboptions:
hostname:
description:
- Hostname for this virtual domain router.
show-filter:
description:
- Prefix-list as filter for showing routes. Source router.prefix-list.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure router settings.
fortios_router_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_setting:
hostname: "myhostname"
show-filter: "<your_own_value> (source router.prefix-list.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_router_setting_data(json):
option_list = ['hostname', 'show-filter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def router_setting(data, fos):
vdom = data['vdom']
router_setting_data = data['router_setting']
flattened_data = flatten_multilists_attributes(router_setting_data)
filtered_data = filter_router_setting_data(flattened_data)
return fos.set('router',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_router(data, fos):
login(data)
if data['router_setting']:
resp = router_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"router_setting": {
"required": False, "type": "dict",
"options": {
"hostname": {"required": False, "type": "str"},
"show-filter": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_router(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
import os
import posixpath
from enum import Enum
from fastapi import Path, HTTPException
from utils import security
class UploadPath(str, Enum):
default = "default"
UPLOAD_PATH_DICT = {
UploadPath.default: "default/"
}
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func
class DownloadPath(str, Enum):
default = "default"
DOWNLOAD_PATH_DICT = {
DownloadPath.default: "default/"
}
def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")):
"""
获取下载文件路径
:param download_key:
:return:
"""
root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key])
def func(folder):
path = security.safe_join(root_path, folder)
if not posixpath.exists(path):
raise HTTPException(404, "The access file does not exist")
for filename in os.listdir(path):
return posixpath.join(path, filename), filename
return func
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Repeatmodeler(Package):
"""RepeatModeler is a de-novo repeat family identification and modeling
package."""
homepage = "http://www.repeatmasker.org/RepeatModeler/"
url = "http://www.repeatmasker.org/RepeatModeler/RepeatModeler-open-1.0.11.tar.gz"
version('1.0.11', sha256='7ff0d588b40f9ad5ce78876f3ab8d2332a20f5128f6357413f741bb7fa172193')
depends_on('perl', type=('build', 'run'))
depends_on('perl-json', type=('build', 'run'))
depends_on('perl-uri', type=('build', 'run'))
depends_on('perl-libwww-perl', type=('build', 'run'))
depends_on('repeatmasker', type='run')
depends_on('recon+repeatmasker', type='run')
depends_on('repeatscout', type='run')
depends_on('trf', type='run')
depends_on('nseg', type='run')
depends_on('ncbi-rmblastn', type='run')
def install(self, spec, prefix):
# like repeatmasker, another interactive installer
# questions:
# 1. <enter to continue>
# 2. <perl path, default is OK>
# 3. <source path, default is OK>
# 4. RepeatMasker bin path
# 5. RECON bin path
# 6. RepeatScout bin path
# 7. Nseg bin path
# 8. trf bin path
# 9. Add a search engine:
# 1. RMBlast -> Path, Default? (Y/N)
# 2. WUBlast/ABBlast -> Path, Default? (Y/N)
# 3. Done
config_answers = [
'', '', '',
spec['repeatmasker'].prefix.bin,
spec['recon'].prefix.bin,
spec['repeatscout'].prefix.bin,
spec['nseg'].prefix.bin,
spec['trf'].prefix.bin,
'1', spec['ncbi-rmblastn'].prefix.bin, 'Y',
'3',
]
config_filename = 'spack-config.in'
with open(config_filename, 'w') as f:
f.write('\n'.join(config_answers))
with open(config_filename, 'r') as f:
perl = which('perl')
perl('configure', input=f)
install_tree('.', prefix.bin)
|
import math
pi = math.pi
raio = float(input('Qual é o raio da esfera?: '))
volume_esf = 4/3*pi*math.pow(raio, 3)
litro = 1
lata = litro*5
precolata = 50.00
totaltinta = volume_esf *lata
totalpreco = totaltinta * precolata
print(f'O volume da esfera é {volume_esf: .2f}')
print(f'A quantidade de tinta necessária é {totaltinta} litros de tinta')
print(f'O total a pagar é: R$ {totalpreco: .2f}')
#AE TO COM DUVIDA
|
import pycurl
import sys, getopt
from StringIO import StringIO
import json
import copy
from importCommon import *
from importNormativeTypes import *
import importCommon
#####################################################################################################################################################################################################
# #
# Import all users from a given file #
# #
# activation : #
# python importUsers.py [optional -s <scheme> | --scheme=<scheme>, default http] [-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-f <input file> | --ifile=<input file> ] #
# #
# shortest activation (be host = localhost, be port = 8080): #
# python importUsers.py [-f <input file> | --ifile=<input file> ] #
# #
#####################################################################################################################################################################################################
def importOnapTypes(scheme, beHost, bePort, adminUser, fileDir, updateversion):
#Add desired type names to the list
onapTypes = []
responseCodes = [200, 201]
if(updateversion == 'false'):
responseCodes = [200, 201, 409]
results = []
for onapType in onapTypes:
result = createNormativeType(scheme, beHost, bePort, adminUser, fileDir, onapType, updateversion)
results.append(result)
if ( result[1] == None or result[1] not in responseCodes) :
print "Failed creating heat type " + onapType + ". " + str(result[1])
return results
def main(argv):
print 'Number of arguments:', len(sys.argv), 'arguments.'
beHost = 'localhost'
bePort = '8080'
adminUser = 'jh0003'
updateversion = 'true'
scheme = 'http'
try:
opts, args = getopt.getopt(argv,"i:p:u:v:h:",["ip=","port=","user=","updateversion="])
except getopt.GetoptError:
usage()
errorAndExit(2, 'Invalid input')
for opt, arg in opts:
#print opt, arg
if opt == '-h':
usage()
sys.exit(3)
elif opt in ("-i", "--ip"):
beHost = arg
elif opt in ("-p", "--port"):
bePort = arg
elif opt in ("-u", "--user"):
adminUser = arg
elif opt in ("-s", "--scheme"):
scheme = arg
elif opt in ("-v", "--updateversion"):
if (arg.lower() == "false" or arg.lower() == "no"):
updateversion = 'false'
print 'scheme =',scheme,',be host =',beHost,', be port =', bePort,', user =', adminUser
if ( beHost == None ):
usage()
sys.exit(3)
results = importOnapTypes(scheme, beHost, bePort, adminUser, "../../../import/tosca/onap-types/", updateversion)
print "-----------------------------"
for result in results:
print "{0:20} | {1:6}".format(result[0], result[1])
print "-----------------------------"
responseCodes = [200, 201]
if(updateversion == 'false'):
responseCodes = [200, 201, 409]
failedNormatives = filter(lambda x: x[1] == None or x[1] not in responseCodes, results)
if (len(failedNormatives) > 0):
errorAndExit(1, None)
else:
errorAndExit(0, None)
if __name__ == "__main__":
main(sys.argv[1:])
|
import numpy as np
from seren3 import config
from seren3.halos import Halo, HaloCatalogue
import logging
logger = logging.getLogger('seren3.halos.halos')
class AHFCatalogue(HaloCatalogue):
'''
Class to handle catalogues produced by AHF.
'''
# assume file structure like this
# ID(1) hostHalo(2) numSubStruct(3) Mvir(4) npart(5) Xc(6)
# Yc(7) Zc(8) VXc(9) VYc(10) VZc(11) Rvir(12) Rmax(13)
# r2(14) mbp_offset(15) com_offset(16) Vmax(17) v_esc(18)
# sigV(19) lambda(20) lambdaE(21) Lx(22) Ly(23) Lz(24)
# b(25) c(26) Eax(27) Eay(28) Eaz(29) Ebx(30) Eby(31) Ebz(32) Ecx(33)
# Ecy(34) Ecz(35) ovdens(36) nbins(37) fMhires(38) Ekin(39)
# Epot(40) SurfP(41) Phi0(42) cNFW(43)
# n_gas(44) M_gas(45) lambda_gas(46) lambdaE_gas(47)
# Lx_gas(48) Ly_gas(49) Lz_gas(50) b_gas(51)
# c_gas(52) Eax_gas(53) Eay_gas(54) Eaz_gas(55)
# Ebx_gas(56) Eby_gas(57) Ebz_gas(58) Ecx_gas(59)
# Ecy_gas(60) Ecz_gas(61) Ekin_gas(62) Epot_gas(63)
# n_star(64) M_star(65) lambda_star(66) lambdaE_star(67)
# Lx_star(68) Ly_star(69) Lz_star(70) b_star(71)
# c_star(72) Eax_star(73) Eay_star(74) Eaz_star(75)
# Ebx_star(76) Eby_star(77) Ebz_star(78) Ecx_star(79)
# Ecy_star(80) Ecz_star(81) Ekin_star(82) Epot_star(83)
halo_type = np.dtype([('id', np.int64), ('hosthalo', np.int64), ('numsubstruct', np.int64), ('mvir', 'f'),
('num_p', np.int64), ('pos',
'f', 3), ('vel', 'f', 3),
('rvir', 'f'), ('rmax', 'f'), ('r2',
'f'), ('mpb_offset', 'f'),
('com_offset', 'f'), ('v_max',
'f'), ('v_esc', 'f'), ('sigv', 'f'),
('bullock_spin', 'f'), ('spin', 'f'), ('l',
'f', 3), ('b', 'f'), ('c', 'f'),
('ea', 'f', 3), ('eb', 'f',
3), ('ec', 'f', 3), ('ovdens', 'f'),
('nbins', np.int64), ('fmhires', 'f'), ('ekin', 'f'),
('epot', 'f'), ('surfp', 'f'), ('phiO',
'f'), ('cnfw', 'f'), ('n_gas', np.int64),
('m_gas', 'f'), ('bullock_spin_gas',
'f'), ('spin_gas', 'f'),
('l_gas', 'f', 3), ('b_gas', 'f'), ('c_gas', 'f'),
('ea_gas', 'f', 3), ('eb_gas',
'f', 3), ('ec_gas', 'f', 3,),
('ekin_gas', 'f'), ('epot_gas',
'f'), ('n_star', np.int64),
('m_star', 'f'), ('bullock_spin_star',
'f'), ('spin_star', 'f'),
('l_star', 'f', 3), ('b_star', 'f'), ('c_star', 'f'),
('ea_star', 'f', 3), ('eb_star',
'f', 3), ('ec_star', 'f', 3,),
('ekin_star', 'f'), ('epot_star', 'f')])
units = {'mvir': 'Msol h**-1',
'pos': 'kpc a h**-1',
'vel': 'km s**-1',
'rvir': 'kpc a h**-1',
'rmax': 'kpc a h**-1',
'r2': 'kpc a h**-1',
'mpb_offset': 'kpc a h**-1',
'com_offset': 'kpc a h**-1',
'v_max': 'km s**-1',
'v_esc': 'km s**-1',
'sigv': 'km s**-1',
'b_to_a': 'kpc a h**-1',
'c_to_a': 'kpc a h**-1',
'ekin': 'Msol h**-1 (km s**-1ec)**2',
'epot': 'Msol h**-1 (km s**-1ec)**2',
'surfp': 'Msol h**-1 (km s**-1ec)**2',
'phiO': '(km s**-1ec)**2',
'm_gas': 'Msol h**-1',
'b_to_a_gas': 'kpc a h**-1',
'c_to_a_gas': 'kpc a h**-1',
'ekin_gas': 'Msol h**-1 (km s**-1ec)**2',
'epot_gas': 'Msol h**-1 (km s**-1ec)**2',
'm_star': 'Msol h**-1',
'b_to_a_star': 'kpc a h**-1',
'c_to_a_star': 'kpc a h**-1',
'ekin_star': 'Msol h**-1 (km s**-1ec)**2',
'epot_star': 'Msol h**-1 (km s**-1ec)**2',
}
def __init__(self, pymses_snapshot, filename=None, **kwargs):
super(AHFCatalogue, self).__init__(
pymses_snapshot, "AHF", filename=filename, **kwargs)
################## IMPLEMENT ABSTRACT FUNCTIONS ##################
def gadget_format_exists(self):
'''
Checks if ramses2gadget has been ran
'''
import glob
path = "%s/output_%05d/" % (self.base.path, self.base.ioutput)
return len(glob.glob("%s/ramses2gadget*" % path)) > 0
def run(self, **kwargs):
'''
Run ramses2gadget then AHF
'''
import subprocess, os
from seren3.utils.sge import ncpu
from seren3.utils import which
r2g = which("ramses2gadget")
ahf = which("AHF-v1.0-084")
tasks = []
# Write the config
path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput)
if os.path.isfile("%s/ahf.input" % path) is False:
if os.path.isfile("%s/ahf.input" % path) is False:
if self.write_cfg(**kwargs):
print "AHFCatalogue wrote a partial(!) config file."
else:
raise Exception("AHFCatalogue unable to write config file!")
# Check if GADGET data exists
print 'GADGET format exists: ', self.gadget_format_exists()
if self.gadget_format_exists() is False:
r2g_mode = kwargs.pop("r2g_mode", "g") # default to sim with gas
# Build the ramses2gadget input_dir
r2g_input_dir = "%s/output_%05d/" % (self.base.path, self.base.ioutput)
# Build exe string
r2g_exe = "{EXE} -{MODE} {INPUT_DIR} | tee {INPUT_DIR}/r2g.log".format(EXE=r2g, MODE=r2g_mode, INPUT_DIR=r2g_input_dir)
tasks.append(r2g_exe)
# Repeat for AHF
ahf_path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput)
ahf_input_fname = "%s/ahf.input" % ahf_path
if os.path.isdir("%s/halos" % ahf_path) is False:
os.mkdir("%s/halos" % ahf_path)
ahf_exe = "{EXE} {FNAME}".format(EXE=ahf, FNAME=ahf_input_fname)
tasks.append(ahf_exe)
# Run the tasks
NSLOTS = kwargs.get("NSLOTS", int(ncpu() / 4.))
for task in tasks:
mpi_task = "mpirun -np {NSLOTS} {EXE}".format(NSLOTS=NSLOTS, EXE=task)
print mpi_task
subprocess.check_output(mpi_task, shell=True)
subprocess.check_output("cat {AHF_PATH}/halos/*_halos > {AHF_PATH}/halos/all_halos".format(AHF_PATH=ahf_path), shell=True)
super(AHFCatalogue, self).__init__(self.base, "AHF", filename=None, **kwargs)
return True
@property
def ahf_path(self):
return "%s/%03d/halos/" % (self.finder_base_dir, self.base.ioutput)
def get_boxsize(self, **kwargs):
'''
Returns the boxsize, according to AHF, in Mpc a h**-1
'''
import glob
list_files = glob.glob("%s/*.log" % self.ahf_path)
with open(list_files[0], 'r') as f:
while True:
l = f.readline()
if l.startswith('simu.boxsize'):
box_size = float(l.split(':')[1])
return self.base.array(box_size, "Mpc a h**-1")
# def can_load(self, **kwargs):
# '''
# Check if hlist files exist
# '''
# import os
# if os.path.isfile('%s/all_halos' % self.ahf_path) is False:
# path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput)
# if os.path.isfile("%s/ahf.input" % path) is False:
# if self.write_cfg(**kwargs):
# print "AHFCatalogue wrote a partial(!) config file."
# else:
# raise Exception("AHFCatalogue unable to write config file!")
# else:
# print "AHFCatalogue not found - ahf.input already written!"
# return False
# return True
def can_load(self, **kwargs):
import os
if os.path.isfile("%s/all_halos" % self.ahf_path):
return True, "exists"
else:
return False, "Cannot locate all_halos file"
def get_filename(self, **kwargs):
return "%s/all_halos" % self.ahf_path
def load(self, within_r=None, center=np.array([0.5, 0.5, 0.5]), **kwargs):
# Ensures file is closed at the end. If within_r is specified, it must be in code units
with open(self.filename, 'r') as f:
haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#")
if within_r:
d = np.array([np.sqrt( (center[0] - (h['pos'][0]/self.boxsize/1.e3))**2 + \
(center[1] - (h['pos'][1]/self.boxsize/1.e3))**2 + \
(center[2] - (h['pos'][2]/self.boxsize/1.e3))**2 ) for h in haloprops])
idx = np.where(d <= within_r)
haloprops = haloprops[idx]
self._nhalos = len(haloprops)
self._haloprops = haloprops
#for h in xrange(self._nhalos):
# self._halos[h] = Halo(self._haloprops[h]['id'], self, self._haloprops[h])
def _get_halo(self, item):
haloprops = self._haloprops[item]
return Halo(haloprops, self.base, self.units, self.get_boxsize())
def write_cfg(self, **kwargs):
'''
Internal function to write an appropriate AHF input file
'''
import os
path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput)
if os.path.isdir(path) is False:
if os.path.isdir("%s/AHF/" % self.base.path) is False:
os.mkdir("%s/AHF/" % self.base.path)
os.mkdir(path)
with open("%s/ahf.input" % path, "w") as f:
f.write("[AHF]\n")
f.write("ic_filename = %s/output_%05d/ramses2gadget_%03d.\n" % (self.base.path, self.base.ioutput, self.base.ioutput))
f.write("ic_filetype = 61\n") # GADGET
f.write("outfile_prefix = %s/AHF/%03d/halos/ahf_\n" % (self.base.path, self.base.ioutput))
LgridDomain = kwargs.pop("LgridDomain", 128)
LgridMax = kwargs.pop("LgridMax", 16777216)
NperDomCell = kwargs.pop("NperDomCell", 5.0)
NperRefCell = kwargs.pop("NperRefCell", 5.0)
VescTune = kwargs.pop("VescTune", 1.5)
NminPerHalo = kwargs.pop("NminPerHalo", 20)
RhoVir = kwargs.pop("RhoVir", 0)
Dvir = kwargs.pop("Dvir", 200)
MaxGatherRad = kwargs.pop("MaxGatherRad", 3.0)
LevelDomainDecomp = kwargs.pop("LevelDomainDecomp", 6)
NcpuReading = kwargs.pop("NcpuReading", 1)
GADGET_LUNIT = kwargs.pop("GADGET_LUNIT", 1e-3)
GADGET_MUNIT = kwargs.pop("GADGET_MUNIT", 1e10)
f.write("LgridDomain = %d\n" % LgridDomain)
f.write("LgridMax = %d\n" % LgridMax)
f.write("NperDomCell = %f\n" % NperDomCell)
f.write("NperRefCell = %f\n" % NperRefCell)
f.write("VescTune = %f\n" % VescTune)
f.write("NminPerHalo = %d\n" % NminPerHalo)
f.write("RhoVir = %f\n" % RhoVir)
f.write("Dvir = %f\n" % Dvir)
f.write("MaxGatherRad = %f\n" % MaxGatherRad)
f.write("LevelDomainDecomp = %d\n" % LevelDomainDecomp)
f.write("NcpuReading = %d\n" % NcpuReading)
f.write("[GADGET]\n")
f.write("GADGET_LUNIT = %e\n" % GADGET_LUNIT)
f.write("GADGET_MUNIT = %e\n" % GADGET_MUNIT)
# Any params we missed
# for key in kwargs.keys():
# f.write("%s = %s\n" % (key, kwargs[key]))
logger.info(
"%sCatalogue wrote a partial(!) config file. Exiting" % self.finder)
return True
class RockstarCatalogue(HaloCatalogue):
'''
Class to handle catalogues produced by Rockstar
Reads the out.list files
'''
# halo_type = np.dtype( [('id', np.int64), ('descid', np.int64), \
# ('mvir', 'f'), ('vmax', 'f'), \
# ('vrms', 'f'), ('rvir', 'f'), \
# ('rs', 'f'), ('np', 'f'), \
# ('pos', 'f', 3), ('vel', 'f', 3), \
# ('J', 'f', 3), ('spin', 'f'), \
# ('rs_klypin', 'f'), ('mvir_all', 'f'), \
# ('m200b', 'f'), ('m200c', 'f'), \
# ('m500c', 'f'), ('m2500c', 'f'), \
# ('r200b', 'f'), ('r200c', 'f'), \
# ('r500c', 'f'), ('r2500c', 'f'), \
# ('xoff', 'f'), ('voff', 'f'), \
# ('spin_bullock', 'f'), ('b_to_a', 'f'), \
# ('c_to_a', 'f'), ('A', 'f', 3), \
# ('b_to_a_500c', 'f'), ('c_to_a_500c', 'f'), \
# ('A500c', 'f', 3), ('T/U', 'f'), \
# ('m_pe_behroozi', 'f'), ('M_pe_Diemer', 'f'), \
# ('halfmass_radius', 'f')] )
halo_type = np.dtype( [('id', np.int64), ('descid', np.int64), \
('mvir', 'f'), ('vmax', 'f'), \
('vrms', 'f'), ('rvir', 'f'), \
('rs', 'f'), ('np', 'f'), \
('pos', 'f', 3), ('vel', 'f', 3), \
('J', 'f', 3), ('spin', 'f'), \
('rs_klypin', 'f'), ('mvir_all', 'f'), \
('m200b', 'f'), ('m200c', 'f'), \
('m500c', 'f'), ('m2500c', 'f'), \
('xoff', 'f'), ('voff', 'f'), \
('spin_bullock', 'f'), ('b_to_a', 'f'), \
('c_to_a', 'f'), ('A', 'f', 3), \
('b_to_a_500c', 'f'), ('c_to_a_500c', 'f'), \
('A500c', 'f', 3), ('T/U', 'f'), \
('m_pe_behroozi', 'f'), ('M_pe_Diemer', 'f'), \
('halfmass_radius', 'f')] )
units = {'sam_mvir': 'Msol h**-1',
'mvir': 'Msol h**-1',
'rvir': 'kpc a h**-1',
'rs': 'kpc a h**-1',
'vrms': 'km s**-1',
'vmax': 'km s**-1',
'pos': 'Mpc a h**-1',
'vel': 'km s**-1',
'J': 'Msol h**-1 Mpc h**-1 km s**-1',
'mvir_all': 'Msol h**-1',
'm200b': 'Msol h**-1',
'm200c': 'Msol h**-1',
'm500c': 'Msol h**-1',
'm2500c': 'Msol h**-1',
'm_alt': 'Msol h**-1',
#'r_alt': 'kpc a h**-1',
'xoff': 'kpc a h**-1',
'voff': 'km s**-1',
'A': 'kpc a h**-1',
'halfmass_r': 'kpc a h**-1',
'macc': 'Msol h**-1',
'mpeak': 'Msol h**-1',
'vacc': 'km s**-1',
'vpeak': 'km s**-1',
'acc_rate_inst': 'Msol h**-1 yr**-1',
'acc_rate_100myr': 'Msol h**-1 100Myr**-1',
'first_acc_mvir': 'Msol h**-1',
'first_acc_vmax': 'km s**-1',
'vmax_at_mpeak': 'km s**-1'}
def __init__(self, pymses_snapshot, **kwargs):
super(RockstarCatalogue, self).__init__(pymses_snapshot, "Rockstar", **kwargs)
def can_load(self, **kwargs):
import os
# return os.path.isdir("%s/%s/" % (self.base.path, config.get("halo", "rockstar_base"))) and os.path.isfile(self.get_rockstar_info_fname())
if os.path.isdir(self.finder_base_dir):
if os.path.isfile(self.get_rockstar_info_fname()):
return True, "exists"
else:
return False, "Cannot locate info file"
else:
return False, "rockstar directory doesn't exist"
def get_rockstar_info_fname(self):
return "%s/info_rockstar.txt" % self.finder_base_dir
def get_filename(self, **kwargs):
'''
Returns the rockstar catalogue filename
'''
rockstar_info_fname = self.get_rockstar_info_fname()
base_aexp = 1./(1. + self.base.z)
if kwargs.get("strict_so", False):
# Used for accurate comparissons of halo mass-function.
# Uses strict spherical-overdensities for mass calculation, instead
# of FOF group.
self.finder_base_dir = "%s/rockstar_strict_so_mass/" % self.base.path
out_num = []
aexp = []
with open(rockstar_info_fname, "r") as f:
for line in f:
split_line = line.split('\t')
out_num.append( int(split_line[0]) )
aexp.append( float(split_line[1]) )
aexp = np.array(aexp)
idx_closest = (np.abs(aexp - base_aexp)).argmin()
out_fname = "out_%i.list" % (out_num[idx_closest])
#print 'RockstarCatalogue: matched to %s' % out_fname
fname = "%s/%s" % (self.finder_base_dir, out_fname)
return fname
def get_boxsize(self, **kwargs):
'''
Returns boxsize according to rockstar
'''
import re
with open(self.filename, 'r') as f:
for line in f:
if line.startswith('#Box size:'):
boxsize = re.findall("\d+\.\d+", line)[0]
return self.base.array(float(boxsize), "Mpc a h**-1") # Mpc a h**-1
def load(self, **kwargs):
# Ensures file is closed at the end. If within_r is specified, it must be in code units
with open(self.filename, 'r') as f:
haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#")
self._nhalos = len(haloprops)
self._haloprops = haloprops
def _get_halo(self, item):
haloprops = self._haloprops[item]
return Halo(haloprops, self.base, self.units, self.get_boxsize())
class ConsistentTreesCatalogue(HaloCatalogue):
halo_type = np.dtype([('aexp', 'f'),
('id', np.int64),
('desc_aexp', 'f'),
('desc_id', 'f'),
('num_prog', np.int64),
('pid', np.int64),
('upid', np.int64),
('desc_pid', np.int64),
('phantom', 'f'),
('sam_mvir', 'f'),
('mvir', 'f'),
('rvir', 'f'),
('rs', 'f'),
('vrms', 'f'),
('mmp', np.int64), # Bool - most massive progenitor
('scale_of_last_mm', 'f'),
('vmax', 'f'),
('pos', 'f', 3),
('vel', 'f', 3),
('J', 'f', 3),
('spin', 'f'),
('breadth_first_id',
np.int64),
('depth_first_id',
np.int64),
('tree_root_id', np.int64),
('orig_halo_id', np.int64),
('snap_num', np.int64),
('next_coprog_depth_first_id',
np.int64),
('last_prog_depth_first_id',
np.int64),
('last_mainlead_depth_first_id',
np.int64),
('tidal_force', 'f'),
('tidal_id', np.int64),
('rs_klypin', 'f'),
('mvir_all', 'f'),
('m_alt', 'f', 4),
#('r_alt', 'f', 4),
('xoff', 'f'),
('voff', 'f'),
('spin_bullock', 'f'),
('b_to_a', 'f'),
('c_to_a', 'f'),
('A', 'f', 3),
('b_to_a_500c', 'f'),
('c_to_a_500c', 'f'),
('A_500c', 'f', 3),
('T/|U|', 'f'),
('m_pe_behroozi', 'f'),
('m_pe_diemer', 'f'),
('halfmass_r', 'f'),
# Consistent Trees Version 1.0 - Mass at accretion
('macc', 'f'),
('mpeak', 'f'),
# Consistent Trees Version 1.0 - Vmax at accretion
('vacc', 'f'),
('vpeak', 'f'),
('halfmass_scale', 'f'),
('acc_rate_inst', 'f'),
('acc_rate_100myr', 'f'),
('acc_rate_1tdyn', 'f'),
('acc_rate_2tdyn', 'f'),
('acc_rate_mpeak', 'f'),
('mpeak_scale', 'f'),
('acc_scale', 'f'),
('first_acc_scale', 'f'),
('first_acc_mvir', 'f'),
('first_acc_vmax', 'f'),
('vmax_at_mpeak', 'f'),
('tidal_force_tdyn', 'f'),
('log_vmax_vmax_tdyn_dmpeak', 'f'),
('time_to_future_merger', 'f'),
('future_merger_mmp_id', 'f')])
units = {
'sam_mvir': 'Msol h**-1',
'mvir': 'Msol h**-1',
'rvir': 'kpc a h**-1',
'rs': 'kpc a h**-1',
'vrms': 'km s**-1',
'vmax': 'km s**-1',
'pos': 'Mpc a h**-1',
'vel': 'km s**-1',
'J': 'Msol h**-1 Mpc h**-1 km s**-1',
'mvir_all': 'Msol h**-1',
'm_alt': 'Msol h**-1',
#'r_alt': 'kpc a h**-1',
'xoff': 'kpc a h**-1',
'voff': 'km s**-1',
'A': 'kpc a h**-1',
'halfmass_r': 'kpc a h**-1',
'macc': 'Msol h**-1',
'mpeak': 'Msol h**-1',
'vacc': 'km s**-1',
'vpeak': 'km s**-1',
'acc_rate_inst': 'Msol h**-1 yr**-1',
'acc_rate_100myr': 'Msol h**-1 100Myr**-1',
'first_acc_mvir': 'Msol h**-1',
'first_acc_vmax': 'km s**-1',
'vmax_at_mpeak': 'km s**-1'
}
def __init__(self, pymses_snapshot, **kwargs):
super(ConsistentTreesCatalogue, self).__init__(pymses_snapshot, "ConsistentTrees", **kwargs)
def can_load(self, **kwargs):
import glob
if len(glob.glob("%s/hlist_*" % self.finder_base_dir)) > 0.:
return True, "exists"
else:
return False, "Unable to locate hlists files"
def get_filename(self, **kwargs):
import glob, math
from seren3.exceptions import CatalogueNotFoundException
# Filename is hlist_aexp.list
# Look through the outputs and find the closest expansion factor
aexp = self.base.cosmo['aexp']
if kwargs.get("strict_so", False):
# Used for accurate comparissons of halo mass-function.
# Uses strict spherical-overdensities for mass calculation, instead
# of FOF group.
self.finder_base_dir = "%s/rockstar_strict_so_mass/hlists/" % self.base.path
# Scan halo files for available expansion factors
outputs = glob.glob( "%s/hlist_*" % (self.finder_base_dir) )
if len(outputs) == 0:
raise IOError("ConsistentTreesCatalogue: No outputs found")
aexp_hlist = np.zeros(len(outputs))
for i in range(len(outputs)):
output = outputs[i]
# Trim the aexp from the string
aexp_hfile = float(output.split('/')[-1][6:-5])
aexp_hlist[i] = aexp_hfile
# Find the closest match
idx = np.argmin(np.abs(aexp_hlist - aexp))
if min(aexp_hlist[idx] / aexp, aexp / aexp_hlist[idx]) < 0.995:
raise CatalogueNotFoundException("Unable to locate catalogue close to this snapshot.\nHlist aexp: %f, Snap aexp: %f" % (aexp_hlist[idx], aexp))
return outputs[idx]
def get_boxsize(self, **kwargs):
'''
Returns boxsize according to rockstar in Mpc a / h
'''
import re
with open(self.filename, 'r') as f:
for line in f:
if line.startswith('#Full box size'):
boxsize = re.findall("\d+\.\d+", line)[0]
return self.base.array(float(boxsize), "Mpc a h**-1") # Mpc a / h
def load(self, **kwargs):
# Ensures file is closed at the end. If within_r is specified, it must be in code units
with open(self.filename, 'r') as f:
haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#")
self._nhalos = len(haloprops)
self._haloprops = haloprops
def _get_halo(self, item):
haloprops = self._haloprops[item]
return Halo(haloprops, self.base, self.units, self.get_boxsize())
@staticmethod
def _find_mmp(hid, prog_halos):
'''
Returns the id for the most massive progenitor
'''
search_key = lambda halos: halos[:]["desc_id"] == hid
progs = prog_halos.search(search_key)
if len(progs) > 1:
mmp_search_key = lambda x: x["mvir"]
progs_sorted = sorted(progs, key=mmp_search_key, reverse=True)
return progs_sorted[0].hid
elif len(progs) == 1:
return progs[0].hid
else:
return None
def find_mmp(self, halo, back_to_iout=None):
'''
Locates the most massive progenitor
'''
from seren3 import load_snapshot
if back_to_iout is None:
back_to_iout = self.base.ioutput-1
hid = halo.hid
ioutputs = range(back_to_iout, self.base.ioutput)[::-1]
last = self.base.ioutput
for iout_prog in ioutputs:
# Start with the previous snapshot, find the most massive progenitor and use that
prog_snap = load_snapshot(self.base.path, iout_prog)
prog_halos = prog_snap.halos(finder='ctrees')
mmp_id = self._find_mmp(hid, prog_halos)
if mmp_id is None:
print 'Unable to fing progenitor in output %i.\nReturning last know progenitor (output %i)' % (iout_prog, last)
return hid, prog_halos
else:
hid = mmp_id
last = iout_prog
return hid, prog_halos
def iterate_progenitors(self, halo, back_to_aexp=0., verbose=True):
'''
Iterates through list of progenitors without loading halo catalogues completely
'''
import numpy as np
import glob
from seren3.utils import natural_sort
from seren3.core.simulation import Simulation
outputs = natural_sort(glob.glob("%s/hlist_*" % self.finder_base_dir))
aexp_hlist = np.zeros(len(outputs))
for i in range(len(outputs)):
output = outputs[i]
# Trim aexp from string
aexp_hfile = float(output.split('/')[-1][6:-5])
aexp_hlist[i] = aexp_hfile
idx_start = np.abs( aexp_hlist - self.base.info["aexp"] ).argmin()
idx_end = np.abs( aexp_hlist - back_to_aexp ).argmin()
# print idx_start, idx_end
hid = int(halo.hid)
sim = Simulation(halo.base.path)
aexp_to_z = lambda aexp: (1./aexp) - 1.
z_start = aexp_to_z(aexp_hlist[idx_start])
z_end = aexp_to_z(aexp_hlist[idx_end])
# return idx_start, idx_end, z_start, z_end, outputs, aexp_hlist
iout_start = sim.redshift(z_start)
iout_end = sim.redshift(z_end)
for iout in range(iout_end, iout_start)[::-1]:
isnap = sim[iout]
ihalos = isnap.halos()
mmp_props = None
mmp_mass = 0.
ihalo = None
for i in range(len(ihalos)):
props = ihalos._haloprops[i]
# if (props["desc_id"] == hid) and (props["mvir"] > mmp_mass):
if (props["desc_id"] == hid) and (props["mmp"]):
# This halo is a candidate for mmp
# mmp_props = props
# mmp_mass = props["mvir"]
ihalo = i
break
# if (mmp_props != None):
# yield Halo(mmp_props, isnap, self.units, self.get_boxsize()) # the mmp
# hid = int(mmp_props["id"])
if (ihalo != None):
iprog = ihalos[ihalo]
yield iprog
hid = int(iprog["id"])
else:
if (verbose):
print "No descentent found - exiting"
break
# Loop through hlists in reverse and locate progenitors
# for i in range(idx_end, idx_start+1)[::-1]:
# mmp_props = None
# mmp_mass = 0.
# with open( outputs[i], "r" ) as f:
# # print outputs[i]
# haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#")
# for props in haloprops:
# # if (props["desc_id"] == hid) and (props["mvir"] > mmp_mass):
# if (props["desc_id"] == hid) and (props["mmp"]):
# # This halo is a candidate for mmp
# mmp_props = props
# mmp_mass = props["mvir"]
# if (mmp_props != None):
# sim = Simulation(halo.base.path)
# # print aexp_hlist[::-1][i]
# # z = (1./aexp_hlist[::-1][i]) - 1.
# z = (1./aexp_hlist[i]) - 1.
# prog_snap = sim[sim.redshift(z)]
# yield Halo(mmp_props, prog_snap, self.units, self.get_boxsize()) # the mmp
# hid = int(mmp_props["id"])
# else:
# if (verbose):
# print "No descentent found - exiting"
# break
|
from datetime import datetime, timezone
from http import HTTPStatus
from json import dumps, load
from logging import getLogger
from os import environ
from unittest.mock import MagicMock, patch
from mypy_boto3_events import EventBridgeClient
from mypy_boto3_lambda import LambdaClient
from mypy_boto3_sns.type_defs import MessageAttributeValueTypeDef
from pytest import mark
from pytest_subtests import SubTests
from backend.api_keys import EVENT_KEY
from backend.api_responses import STATUS_CODE_KEY
from backend.aws_message_attributes import DATA_TYPE_STRING
from backend.notify_status_update.task import (
EVENT_DETAIL_KEY,
MESSAGE_ATTRIBUTE_DATASET_KEY,
MESSAGE_ATTRIBUTE_STATUS_KEY,
SLACK_URL_ENV_NAME,
STEP_FUNCTION_ARN_KEY,
STEP_FUNCTION_STARTDATE_KEY,
STEP_FUNCTION_STOPDATE_KEY,
WEBHOOK_MESSAGE_BLOCKS_KEY,
lambda_handler,
publish_sns_message,
)
from backend.resources import ResourceName
from backend.step_function import Outcome
from backend.step_function_keys import (
ASSET_UPLOAD_KEY,
DATASET_ID_KEY,
DATASET_PREFIX_KEY,
ERRORS_KEY,
INPUT_KEY,
JOB_STATUS_FAILED,
JOB_STATUS_RUNNING,
JOB_STATUS_SUCCEEDED,
METADATA_UPLOAD_KEY,
NEW_VERSION_S3_LOCATION,
OUTPUT_KEY,
STATUS_KEY,
STEP_FUNCTION_KEY,
UPDATE_DATASET_KEY,
UPLOAD_STATUS_KEY,
VALIDATION_KEY,
VERSION_ID_KEY,
)
from .aws_utils import any_arn_formatted_string, any_lambda_context, any_s3_url
from .general_generators import any_https_url
from .stac_generators import any_dataset_id, any_dataset_prefix, any_dataset_version_id
STEP_FUNCTION_START_MILLISECOND_TIMESTAMP = round(
datetime(
2001, 2, 3, hour=4, minute=5, second=6, microsecond=789876, tzinfo=timezone.utc
).timestamp()
* 1000
)
STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP = STEP_FUNCTION_START_MILLISECOND_TIMESTAMP + 10
@patch("backend.notify_status_update.task.WebhookClient.send")
@patch("backend.notify_status_update.task.get_import_status_given_arn")
def should_notify_slack_with_finished_details_when_url_set(
step_func_status_mock: MagicMock, webhook_client_mock: MagicMock
) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
step_func_status_mock.return_value = {
STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_SUCCEEDED},
VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
}
mock_slack_url = any_https_url()
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_SUCCEEDED,
STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(),
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
VERSION_ID_KEY: any_dataset_version_id(),
}
),
OUTPUT_KEY: dumps(
{
UPLOAD_STATUS_KEY: {
VALIDATION_KEY: "",
ASSET_UPLOAD_KEY: "",
METADATA_UPLOAD_KEY: "",
},
UPDATE_DATASET_KEY: {NEW_VERSION_S3_LOCATION: any_s3_url()},
}
),
STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP,
STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP,
}
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then assert there is 15 slack_sdk message 'blocks' sent to webhook url
webhook_client_mock.assert_called_once()
assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 15
@patch("backend.notify_status_update.task.WebhookClient.send")
def should_not_notify_slack_when_step_function_running(webhook_client_mock: MagicMock) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
mock_slack_url = any_https_url()
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_RUNNING,
STEP_FUNCTION_STOPDATE_KEY: None,
}
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then
webhook_client_mock.assert_not_called()
@patch("backend.notify_status_update.task.WebhookClient.send")
@patch("backend.notify_status_update.task.get_import_status_given_arn")
def should_notify_slack_when_step_function_failed(
step_func_status_mock: MagicMock, webhook_client_mock: MagicMock
) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
mock_slack_url = any_https_url()
step_func_status_mock.return_value = {
STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_FAILED},
VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
}
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_FAILED,
STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(),
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
VERSION_ID_KEY: any_dataset_version_id(),
}
),
STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP,
STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP,
},
OUTPUT_KEY: None,
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then assert there is 13 slack_sdk message 'blocks' sent to webhook url
webhook_client_mock.assert_called_once()
assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 13
@patch("backend.notify_status_update.task.WebhookClient.send")
def should_log_and_not_post_to_slack_when_url_not_set(
webhook_client_mock: MagicMock, subtests: SubTests
) -> None:
# Given
logger = getLogger("backend.notify_status_update.task")
with patch("backend.notify_status_update.task.publish_sns_message"), patch.object(
logger, "debug"
) as logger_mock:
# When
lambda_handler({}, any_lambda_context())
# Then
with subtests.test("no slack message"):
assert not webhook_client_mock.called
with subtests.test("log created"):
expected_log = dumps({EVENT_KEY: {}})
logger_mock.assert_any_call(expected_log)
@patch("backend.notify_status_update.task.get_param")
def should_publish_sns_message(get_param_mock: MagicMock) -> None:
# Given
get_param_mock.return_value = topic_arn = any_arn_formatted_string()
dataset_prefix = any_dataset_prefix()
publish_sns_message_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_SUCCEEDED,
INPUT_KEY: dumps(
{
DATASET_PREFIX_KEY: dataset_prefix,
}
),
}
}
expected_sns_call = {
"TopicArn": topic_arn,
"Message": dumps(publish_sns_message_input),
"MessageAttributes": {
MESSAGE_ATTRIBUTE_DATASET_KEY: MessageAttributeValueTypeDef(
DataType=DATA_TYPE_STRING, StringValue=dataset_prefix
),
MESSAGE_ATTRIBUTE_STATUS_KEY: MessageAttributeValueTypeDef(
DataType=DATA_TYPE_STRING, StringValue=JOB_STATUS_SUCCEEDED
),
},
}
# When
with patch("backend.notify_status_update.task.SNS_CLIENT.publish") as sns_client_mock:
publish_sns_message(publish_sns_message_input)
# Then
assert sns_client_mock.call_args[1] == expected_sns_call
@mark.infrastructure
def should_launch_notify_slack_endpoint_lambda_function(
lambda_client: LambdaClient, events_client: EventBridgeClient
) -> None:
notify_status_lambda_arn = events_client.list_targets_by_rule(
Rule=ResourceName.CLOUDWATCH_RULE_NAME.value
)["Targets"][0]["Arn"]
# When
body = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_FAILED,
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
}
),
},
OUTPUT_KEY: None,
}
resp = load(
lambda_client.invoke(
FunctionName=notify_status_lambda_arn,
Payload=dumps(body).encode(),
)["Payload"]
)
assert resp.get(STATUS_CODE_KEY) == HTTPStatus.OK, resp
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 08:55:39 2020
@author: rolly
"""
import config
from twilio.rest import Client
#https://api.whatsapp.com/send?phone=14155238886&text=join%20actual-nor&source=&data=
def sendMsg(num,msg):
client = Client(config.account_sid, config.auth_token)
message = client.messages.create(
to="whatsapp:+"+num,
from_="whatsapp:+14155238886",
body=msg)
print(message.sid)
|
import logging
from typing import Any, Dict, List, Optional, Union
from ..models import SnsModel
from ..types import Model
from .base import BaseEnvelope
logger = logging.getLogger(__name__)
class SnsEnvelope(BaseEnvelope):
"""SNS Envelope to extract array of Records
The record's body parameter is a string, though it can also be a JSON encoded string.
Regardless of its type it'll be parsed into a BaseModel object.
Note: Records will be parsed the same way so if model is str,
all items in the list will be parsed as str and npt as JSON (and vice versa)
"""
def parse(self, data: Optional[Union[Dict[str, Any], Any]], model: Model) -> List[Optional[Model]]:
"""Parses records found with model provided
Parameters
----------
data : Dict
Lambda event to be parsed
model : Model
Data model provided to parse after extracting data using envelope
Returns
-------
List
List of records parsed with model provided
"""
logger.debug(f"Parsing incoming data with SNS model {SnsModel}")
parsed_envelope = SnsModel.parse_obj(data)
output = []
logger.debug(f"Parsing SNS records in `body` with {model}")
for record in parsed_envelope.Records:
output.append(self._parse(data=record.Sns.Message, model=model))
return output
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_datacenter
short_description: Manage VMware vSphere Datacenters
description:
- This module can be used to manage (create, delete) VMware vSphere Datacenters.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Kamil Szczygiel (@kamsz)
notes:
- Tested on vSphere 6.0, 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
state:
description:
- If the datacenter should be present or absent.
choices: [ present, absent ]
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create Datacenter
vmware_datacenter:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
state: present
delegate_to: localhost
- name: Delete Datacenter
vmware_datacenter:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
state: absent
delegate_to: localhost
register: datacenter_delete_result
'''
RETURN = """#
"""
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
class VmwareDatacenterManager(PyVmomi):
def __init__(self, module):
super(VmwareDatacenterManager, self).__init__(module)
self.datacenter_name = self.params.get('datacenter_name')
self.datacenter_obj = self.get_datacenter()
def ensure(self):
state = self.module.params.get('state')
if state == 'present':
self.create_datacenter()
if state == 'absent':
self.destroy_datacenter()
def get_datacenter(self):
try:
datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name)
return datacenter_obj
except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault:
self.module.fail_json(msg="Failed to get datacenter '%s'"
" due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to get datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def create_datacenter(self):
folder = self.content.rootFolder
changed = False
try:
if not self.datacenter_obj and not self.module.check_mode:
changed = True
folder.CreateDatacenter(name=self.datacenter_name)
self.module.exit_json(changed=changed)
except vim.fault.DuplicateName as duplicate_name:
self.module.exit_json(changed=changed)
except vim.fault.InvalidName as invalid_name:
self.module.fail_json(msg="Specified datacenter name '%s' is an"
" invalid name : %s" % (self.datacenter_name,
to_native(invalid_name.msg)))
except vmodl.fault.NotSupported as not_supported:
# This should never happen
self.module.fail_json(msg="Trying to create a datacenter '%s' on"
" an incorrect folder object : %s" % (self.datacenter_name,
to_native(not_supported.msg)))
except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
self.module.fail_json(msg="Failed to create a datacenter"
" '%s' due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to create a datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def destroy_datacenter(self):
results = dict(changed=False)
try:
if self.datacenter_obj and not self.module.check_mode:
task = self.datacenter_obj.Destroy_Task()
changed, result = wait_for_task(task)
results['changed'] = changed
results['result'] = result
self.module.exit_json(**results)
except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
self.module.fail_json(msg="Failed to delete a datacenter"
" '%s' due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to delete a datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter_name=dict(required=True, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
vmware_datacenter_mgr = VmwareDatacenterManager(module)
vmware_datacenter_mgr.ensure()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=190)),
('body', models.TextField()),
('slug', models.SlugField(max_length=190)),
('status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(related_name=b'blog_article_author', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
bases=(models.Model,),
),
]
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auxiliary module for testing gflags.py.
The purpose of this module is to define a few flags. We want to make
sure the unit tests for gflags.py involve more than one module.
"""
__author__ = 'salcianu@google.com (Alex Salcianu)'
import gflags
from gflags import _helpers
FLAGS = gflags.FLAGS
def DefineFlags(flag_values=FLAGS):
"""Defines some flags.
Args:
flag_values: The FlagValues object we want to register the flags
with.
"""
# The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there
# is no name clash with the existing flags.
gflags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.',
flag_values=flag_values)
gflags.DEFINE_string('tmod_bar_y', 'default', 'String flag.',
flag_values=flag_values)
gflags.DEFINE_boolean('tmod_bar_z', False,
'Another boolean flag from module bar.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.',
flag_values=flag_values)
def RemoveOneFlag(flag_name, flag_values=FLAGS):
"""Removes the definition of one flag from gflags.FLAGS.
Note: if the flag is not defined in gflags.FLAGS, this function does
not do anything (in particular, it does not raise any exception).
Motivation: We use this function for cleanup *after* a test: if
there was a failure during a test and not all flags were declared,
we do not want the cleanup code to crash.
Args:
flag_name: A string, the name of the flag to delete.
flag_values: The FlagValues object we remove the flag from.
"""
if flag_name in flag_values.FlagDict():
flag_values.__delattr__(flag_name)
def NamesOfDefinedFlags():
"""Returns: List of names of the flags declared in this module."""
return ['tmod_bar_x',
'tmod_bar_y',
'tmod_bar_z',
'tmod_bar_t',
'tmod_bar_u',
'tmod_bar_v']
def RemoveFlags(flag_values=FLAGS):
"""Deletes the flag definitions done by the above DefineFlags().
Args:
flag_values: The FlagValues object we remove the flags from.
"""
for flag_name in NamesOfDefinedFlags():
RemoveOneFlag(flag_name, flag_values=flag_values)
def GetModuleName():
"""Uses GetCallingModule() to return the name of this module.
For checking that _GetCallingModule works as expected.
Returns:
A string, the name of this module.
"""
return _helpers.GetCallingModule()
def ExecuteCode(code, global_dict):
"""Executes some code in a given global environment.
For testing of _GetCallingModule.
Args:
code: A string, the code to be executed.
global_dict: A dictionary, the global environment that code should
be executed in.
"""
# Indeed, using exec generates a lint warning. But some user code
# actually uses exec, and we have to test for it ...
exec(code, global_dict) # pylint: disable=exec-used
def DisclaimKeyFlags():
"""Disclaims flags declared in this module."""
gflags.DISCLAIM_key_flags()
|
from pluto.control.modes import mode
from pluto.control.modes.processes import process_manager
from protos import broker_pb2_grpc
class LiveControlMode(mode.ControlCommandHandler):
def __init__(self, server, framework_url, process_factory):
super(LiveControlMode, self).__init__(framework_url, process_factory)
broker_pb2_grpc.add_BrokerServicer_to_server(self._broker, server)
def _create_process_manager(self):
return process_manager.LiveProcessManager()
def _accept_loop(self, loop):
# todo: only accept LiveLoop type or subtypes
return False
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'err-stackstorm'
copyright = '2019, err-stackstorm contributors'
author = 'err-stackstorm contributors'
# The full version, including alpha/beta/rc tags
release = '2.1.4'
# -- General configuration ---------------------------------------------------
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
## @file
# This file is used to define each component of INF file
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import EdkLogger
from CommonDataClass.CommonClass import LibraryClassClass
from CommonDataClass.ModuleClass import *
from String import *
from DataType import *
from Identification import *
from Dictionary import *
from BuildToolError import *
from Misc import sdict
import GlobalData
from Table.TableInf import TableInf
import Database
from Parsing import *
from Common.LongFilePathSupport import OpenLongFilePath as open
#
# Global variable
#
Section = {TAB_UNKNOWN.upper() : MODEL_UNKNOWN,
TAB_INF_DEFINES.upper() : MODEL_META_DATA_HEADER,
TAB_BUILD_OPTIONS.upper() : MODEL_META_DATA_BUILD_OPTION,
TAB_INCLUDES.upper() : MODEL_EFI_INCLUDE,
TAB_LIBRARIES.upper() : MODEL_EFI_LIBRARY_INSTANCE,
TAB_LIBRARY_CLASSES.upper() : MODEL_EFI_LIBRARY_CLASS,
TAB_PACKAGES.upper() : MODEL_META_DATA_PACKAGE,
TAB_NMAKE.upper() : MODEL_META_DATA_NMAKE,
TAB_INF_FIXED_PCD.upper() : MODEL_PCD_FIXED_AT_BUILD,
TAB_INF_PATCH_PCD.upper() : MODEL_PCD_PATCHABLE_IN_MODULE,
TAB_INF_FEATURE_PCD.upper() : MODEL_PCD_FEATURE_FLAG,
TAB_INF_PCD_EX.upper() : MODEL_PCD_DYNAMIC_EX,
TAB_INF_PCD.upper() : MODEL_PCD_DYNAMIC,
TAB_SOURCES.upper() : MODEL_EFI_SOURCE_FILE,
TAB_GUIDS.upper() : MODEL_EFI_GUID,
TAB_PROTOCOLS.upper() : MODEL_EFI_PROTOCOL,
TAB_PPIS.upper() : MODEL_EFI_PPI,
TAB_DEPEX.upper() : MODEL_EFI_DEPEX,
TAB_BINARIES.upper() : MODEL_EFI_BINARY_FILE,
TAB_USER_EXTENSIONS.upper() : MODEL_META_DATA_USER_EXTENSION
}
gComponentType2ModuleType = {
"LIBRARY" : "BASE",
"SECURITY_CORE" : "SEC",
"PEI_CORE" : "PEI_CORE",
"COMBINED_PEIM_DRIVER" : "PEIM",
"PIC_PEIM" : "PEIM",
"RELOCATABLE_PEIM" : "PEIM",
"PE32_PEIM" : "PEIM",
"BS_DRIVER" : "DXE_DRIVER",
"RT_DRIVER" : "DXE_RUNTIME_DRIVER",
"SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
"APPLICATION" : "UEFI_APPLICATION",
"LOGO" : "BASE",
}
gNmakeFlagPattern = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
gNmakeFlagName2ToolCode = {
"C" : "CC",
"LIB" : "SLINK",
"LINK" : "DLINK",
}
class InfHeader(ModuleHeaderClass):
_Mapping_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "Name",
TAB_INF_DEFINES_FILE_GUID : "Guid",
TAB_INF_DEFINES_MODULE_TYPE : "ModuleType",
TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION : "UefiSpecificationVersion",
TAB_INF_DEFINES_EDK_RELEASE_VERSION : "EdkReleaseVersion",
#
# Optional Fields
#
TAB_INF_DEFINES_INF_VERSION : "InfVersion",
TAB_INF_DEFINES_BINARY_MODULE : "BinaryModule",
TAB_INF_DEFINES_COMPONENT_TYPE : "ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "MakefileName",
TAB_INF_DEFINES_BUILD_NUMBER : "BuildNumber",
TAB_INF_DEFINES_BUILD_TYPE : "BuildType",
TAB_INF_DEFINES_FFS_EXT : "FfsExt",
TAB_INF_DEFINES_FV_EXT : "FvExt",
TAB_INF_DEFINES_SOURCE_FV : "SourceFv",
TAB_INF_DEFINES_VERSION_NUMBER : "VersionNumber",
TAB_INF_DEFINES_VERSION_STRING : "VersionString",
TAB_INF_DEFINES_VERSION : "Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "PcdIsDriver",
TAB_INF_DEFINES_TIANO_EDK_FLASHMAP_H : "TianoEdkFlashMap_h",
TAB_INF_DEFINES_SHADOW : "Shadow",
# TAB_INF_DEFINES_LIBRARY_CLASS : "LibraryClass",
# TAB_INF_DEFINES_ENTRY_POINT : "ExternImages",
# TAB_INF_DEFINES_UNLOAD_IMAGE : "ExternImages",
# TAB_INF_DEFINES_CONSTRUCTOR : ,
# TAB_INF_DEFINES_DESTRUCTOR : ,
# TAB_INF_DEFINES_DEFINE : "Define",
# TAB_INF_DEFINES_SPEC : "Specification",
# TAB_INF_DEFINES_CUSTOM_MAKEFILE : "CustomMakefile",
# TAB_INF_DEFINES_MACRO :
}
def __init__(self):
ModuleHeaderClass.__init__(self)
self.VersionNumber = ''
self.VersionString = ''
#print self.__dict__
def __setitem__(self, key, value):
self.__dict__[self._Mapping_[key]] = value
def __getitem__(self, key):
return self.__dict__[self._Mapping_[key]]
## "in" test support
def __contains__(self, key):
return key in self._Mapping_
## InfObject
#
# This class defined basic Inf object which is used by inheriting
#
# @param object: Inherited from object class
#
class InfObject(object):
def __init__(self):
object.__init__()
## Inf
#
# This class defined the structure used in Inf object
#
# @param InfObject: Inherited from InfObject class
# @param Ffilename: Input value for Ffilename of Inf file, default is None
# @param IsMergeAllArches: Input value for IsMergeAllArches
# True is to merge all arches
# Fales is not to merge all arches
# default is False
# @param IsToModule: Input value for IsToModule
# True is to transfer to ModuleObject automatically
# False is not to transfer to ModuleObject automatically
# default is False
# @param WorkspaceDir: Input value for current workspace directory, default is None
#
# @var Identification: To store value for Identification, it is a structure as Identification
# @var UserExtensions: To store value for UserExtensions
# @var Module: To store value for Module, it is a structure as ModuleClass
# @var WorkspaceDir: To store value for WorkspaceDir
# @var KeyList: To store value for KeyList, a list for all Keys used in Inf
#
class Inf(InfObject):
def __init__(self, Filename=None, IsToDatabase=False, IsToModule=False, WorkspaceDir=None, Database=None, SupArchList=DataType.ARCH_LIST):
self.Identification = Identification()
self.Module = ModuleClass()
self.UserExtensions = ''
self.WorkspaceDir = WorkspaceDir
self.SupArchList = SupArchList
self.IsToDatabase = IsToDatabase
self.Cur = Database.Cur
self.TblFile = Database.TblFile
self.TblInf = Database.TblInf
self.FileID = -1
#self.TblInf = TableInf(Database.Cur)
self.KeyList = [
TAB_SOURCES, TAB_BUILD_OPTIONS, TAB_BINARIES, TAB_INCLUDES, TAB_GUIDS,
TAB_PROTOCOLS, TAB_PPIS, TAB_LIBRARY_CLASSES, TAB_PACKAGES, TAB_LIBRARIES,
TAB_INF_FIXED_PCD, TAB_INF_PATCH_PCD, TAB_INF_FEATURE_PCD, TAB_INF_PCD,
TAB_INF_PCD_EX, TAB_DEPEX, TAB_NMAKE, TAB_INF_DEFINES
]
#
# Upper all KEYs to ignore case sensitive when parsing
#
self.KeyList = map(lambda c: c.upper(), self.KeyList)
#
# Init RecordSet
#
self.RecordSet = {}
for Key in self.KeyList:
self.RecordSet[Section[Key]] = []
#
# Load Inf file if filename is not None
#
if Filename != None:
self.LoadInfFile(Filename)
#
# Transfer to Module Object if IsToModule is True
#
if IsToModule:
self.InfToModule()
## Transfer to Module Object
#
# Transfer all contents of an Inf file to a standard Module Object
#
def InfToModule(self):
#
# Init global information for the file
#
ContainerFile = self.Identification.FileFullPath
#
# Generate Package Header
#
self.GenModuleHeader(ContainerFile)
#
# Generate BuildOptions
#
self.GenBuildOptions(ContainerFile)
#
# Generate Includes
#
self.GenIncludes(ContainerFile)
#
# Generate Libraries
#
self.GenLibraries(ContainerFile)
#
# Generate LibraryClasses
#
self.GenLibraryClasses(ContainerFile)
#
# Generate Packages
#
self.GenPackages(ContainerFile)
#
# Generate Nmakes
#
self.GenNmakes(ContainerFile)
#
# Generate Pcds
#
self.GenPcds(ContainerFile)
#
# Generate Sources
#
self.GenSources(ContainerFile)
#
# Generate UserExtensions
#
self.GenUserExtensions(ContainerFile)
#
# Generate Guids
#
self.GenGuidProtocolPpis(DataType.TAB_GUIDS, ContainerFile)
#
# Generate Protocols
#
self.GenGuidProtocolPpis(DataType.TAB_PROTOCOLS, ContainerFile)
#
# Generate Ppis
#
self.GenGuidProtocolPpis(DataType.TAB_PPIS, ContainerFile)
#
# Generate Depexes
#
self.GenDepexes(ContainerFile)
#
# Generate Binaries
#
self.GenBinaries(ContainerFile)
## Parse [Defines] section
#
# Parse [Defines] section into InfDefines object
#
# @param InfFile The path of the INF file
# @param Section The title of "Defines" section
# @param Lines The content of "Defines" section
#
def ParseDefines(self, InfFile, Section, Lines):
TokenList = Section.split(TAB_SPLIT)
if len(TokenList) == 3:
RaiseParserError(Section, "Defines", InfFile, "[xx.yy.%s] format (with platform) is not supported")
if len(TokenList) == 2:
Arch = TokenList[1].upper()
else:
Arch = TAB_ARCH_COMMON
if Arch not in self.Defines:
self.Defines[Arch] = InfDefines()
GetSingleValueOfKeyFromLines(Lines, self.Defines[Arch].DefinesDictionary,
TAB_COMMENT_SPLIT, TAB_EQUAL_SPLIT, False, None)
## Load Inf file
#
# Load the file if it exists
#
# @param Filename: Input value for filename of Inf file
#
def LoadInfFile(self, Filename):
#
# Insert a record for file
#
Filename = NormPath(Filename)
self.Identification.FileFullPath = Filename
(self.Identification.FileRelativePath, self.Identification.FileName) = os.path.split(Filename)
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_INF)
#
# Init InfTable
#
#self.TblInf.Table = "Inf%s" % self.FileID
#self.TblInf.Create()
#
# Init common datas
#
IfDefList, SectionItemList, CurrentSection, ArchList, ThirdList, IncludeFiles = \
[], [], TAB_UNKNOWN, [], [], []
LineNo = 0
#
# Parse file content
#
IsFindBlockComment = False
ReservedLine = ''
for Line in open(Filename, 'r'):
LineNo = LineNo + 1
#
# Remove comment block
#
if Line.find(TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
continue
#
# Remove comments at tail and remove spaces again
#
Line = CleanString(Line)
if Line == '':
continue
#
# Find a new section tab
# First insert previous section items
# And then parse the content of the new section
#
if Line.startswith(TAB_SECTION_START) and Line.endswith(TAB_SECTION_END):
if Line[1:3] == "--":
continue
Model = Section[CurrentSection.upper()]
#
# Insert items data of previous section
#
InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
#
# Parse the new section
#
SectionItemList = []
ArchList = []
ThirdList = []
CurrentSection = ''
LineList = GetSplitValueList(Line[len(TAB_SECTION_START):len(Line) - len(TAB_SECTION_END)], TAB_COMMA_SPLIT)
for Item in LineList:
ItemList = GetSplitValueList(Item, TAB_SPLIT)
if CurrentSection == '':
CurrentSection = ItemList[0]
else:
if CurrentSection != ItemList[0]:
EdkLogger.error("Parser", PARSER_ERROR, "Different section names '%s' and '%s' are found in one section definition, this is not allowed." % (CurrentSection, ItemList[0]), File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
if CurrentSection.upper() not in self.KeyList:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
CurrentSection = TAB_UNKNOWN
continue
ItemList.append('')
ItemList.append('')
if len(ItemList) > 5:
RaiseParserError(Line, CurrentSection, Filename, '', LineNo)
else:
if ItemList[1] != '' and ItemList[1].upper() not in ARCH_LIST_FULL:
EdkLogger.error("Parser", PARSER_ERROR, "Invalid Arch definition '%s' found" % ItemList[1], File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
ArchList.append(ItemList[1].upper())
ThirdList.append(ItemList[2])
continue
#
# Not in any defined section
#
if CurrentSection == TAB_UNKNOWN:
ErrorMsg = "%s is not in any defined section" % Line
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=Filename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
#
# Add a section item
#
SectionItemList.append([Line, LineNo])
# End of parse
#End of For
#
# Insert items data of last section
#
Model = Section[CurrentSection.upper()]
InsertSectionItemsIntoDatabase(self.TblInf, self.FileID, Filename, Model, CurrentSection, SectionItemList, ArchList, ThirdList, IfDefList, self.RecordSet)
#
# Replace all DEFINE macros with its actual values
#
ParseDefineMacro2(self.TblInf, self.RecordSet, GlobalData.gGlobalDefines)
## Show detailed information of Module
#
# Print all members and their values of Module class
#
def ShowModule(self):
M = self.Module
for Arch in M.Header.keys():
print '\nArch =', Arch
print 'Filename =', M.Header[Arch].FileName
print 'FullPath =', M.Header[Arch].FullPath
print 'BaseName =', M.Header[Arch].Name
print 'Guid =', M.Header[Arch].Guid
print 'Version =', M.Header[Arch].Version
print 'InfVersion =', M.Header[Arch].InfVersion
print 'UefiSpecificationVersion =', M.Header[Arch].UefiSpecificationVersion
print 'EdkReleaseVersion =', M.Header[Arch].EdkReleaseVersion
print 'ModuleType =', M.Header[Arch].ModuleType
print 'BinaryModule =', M.Header[Arch].BinaryModule
print 'ComponentType =', M.Header[Arch].ComponentType
print 'MakefileName =', M.Header[Arch].MakefileName
print 'BuildNumber =', M.Header[Arch].BuildNumber
print 'BuildType =', M.Header[Arch].BuildType
print 'FfsExt =', M.Header[Arch].FfsExt
print 'FvExt =', M.Header[Arch].FvExt
print 'SourceFv =', M.Header[Arch].SourceFv
print 'PcdIsDriver =', M.Header[Arch].PcdIsDriver
print 'TianoEdkFlashMap_h =', M.Header[Arch].TianoEdkFlashMap_h
print 'Shadow =', M.Header[Arch].Shadow
print 'LibraryClass =', M.Header[Arch].LibraryClass
for Item in M.Header[Arch].LibraryClass:
print Item.LibraryClass, DataType.TAB_VALUE_SPLIT.join(Item.SupModuleList)
print 'CustomMakefile =', M.Header[Arch].CustomMakefile
print 'Define =', M.Header[Arch].Define
print 'Specification =', M.Header[Arch].Specification
for Item in self.Module.ExternImages:
print '\nEntry_Point = %s, UnloadImage = %s' % (Item.ModuleEntryPoint, Item.ModuleUnloadImage)
for Item in self.Module.ExternLibraries:
print 'Constructor = %s, Destructor = %s' % (Item.Constructor, Item.Destructor)
print '\nBuildOptions =', M.BuildOptions
for Item in M.BuildOptions:
print Item.ToolChainFamily, Item.ToolChain, Item.Option, Item.SupArchList
print '\nIncludes =', M.Includes
for Item in M.Includes:
print Item.FilePath, Item.SupArchList
print '\nLibraries =', M.Libraries
for Item in M.Libraries:
print Item.Library, Item.SupArchList
print '\nLibraryClasses =', M.LibraryClasses
for Item in M.LibraryClasses:
print Item.LibraryClass, Item.RecommendedInstance, Item.FeatureFlag, Item.SupModuleList, Item.SupArchList, Item.Define
print '\nPackageDependencies =', M.PackageDependencies
for Item in M.PackageDependencies:
print Item.FilePath, Item.SupArchList, Item.FeatureFlag
print '\nNmake =', M.Nmake
for Item in M.Nmake:
print Item.Name, Item.Value, Item.SupArchList
print '\nPcds =', M.PcdCodes
for Item in M.PcdCodes:
print '\tCName=', Item.CName, 'TokenSpaceGuidCName=', Item.TokenSpaceGuidCName, 'DefaultValue=', Item.DefaultValue, 'ItemType=', Item.ItemType, Item.SupArchList
print '\nSources =', M.Sources
for Source in M.Sources:
print Source.SourceFile, 'Fam=', Source.ToolChainFamily, 'Pcd=', Source.FeatureFlag, 'Tag=', Source.TagName, 'ToolCode=', Source.ToolCode, Source.SupArchList
print '\nUserExtensions =', M.UserExtensions
for UserExtension in M.UserExtensions:
print UserExtension.UserID, UserExtension.Identifier, UserExtension.Content
print '\nGuids =', M.Guids
for Item in M.Guids:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nProtocols =', M.Protocols
for Item in M.Protocols:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nPpis =', M.Ppis
for Item in M.Ppis:
print Item.CName, Item.SupArchList, Item.FeatureFlag
print '\nDepex =', M.Depex
for Item in M.Depex:
print Item.Depex, Item.SupArchList, Item.Define
print '\nBinaries =', M.Binaries
for Binary in M.Binaries:
print 'Type=', Binary.FileType, 'Target=', Binary.Target, 'Name=', Binary.BinaryFile, 'FeatureFlag=', Binary.FeatureFlag, 'SupArchList=', Binary.SupArchList
## Convert [Defines] section content to ModuleHeaderClass
#
# Convert [Defines] section content to ModuleHeaderClass
#
# @param Defines The content under [Defines] section
# @param ModuleHeader An object of ModuleHeaderClass
# @param Arch The supported ARCH
#
def GenModuleHeader(self, ContainerFile):
EdkLogger.debug(2, "Generate ModuleHeader ...")
File = self.Identification.FileFullPath
#
# Update all defines item in database
#
RecordSet = self.RecordSet[MODEL_META_DATA_HEADER]
for Record in RecordSet:
ValueList = GetSplitValueList(Record[0], TAB_EQUAL_SPLIT)
if len(ValueList) != 2:
RaiseParserError(Record[0], 'Defines', ContainerFile, '<Key> = <Value>', Record[2])
ID, Value1, Value2, Arch, LineNo = Record[3], ValueList[0], ValueList[1], Record[1], Record[2]
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Value1), ConvertToSqlString2(Value2), ID)
self.TblInf.Exec(SqlCommand)
for Arch in DataType.ARCH_LIST:
ModuleHeader = InfHeader()
ModuleHeader.FileName = self.Identification.FileName
ModuleHeader.FullPath = self.Identification.FileFullPath
DefineList = QueryDefinesItem2(self.TblInf, Arch, self.FileID)
NotProcessedDefineList = []
for D in DefineList:
if D[0] in ModuleHeader:
ModuleHeader[D[0]] = GetSplitValueList(D[1])[0]
else:
NotProcessedDefineList.append(D)
if ModuleHeader.ComponentType == "LIBRARY":
Lib = LibraryClassClass()
Lib.LibraryClass = ModuleHeader.Name
Lib.SupModuleList = DataType.SUP_MODULE_LIST
ModuleHeader.LibraryClass.append(Lib)
# we need to make some key defines resolved first
for D in NotProcessedDefineList:
if D[0] == TAB_INF_DEFINES_LIBRARY_CLASS:
List = GetSplitValueList(D[1], DataType.TAB_VALUE_SPLIT, 1)
Lib = LibraryClassClass()
Lib.LibraryClass = CleanString(List[0])
if len(List) == 1:
Lib.SupModuleList = DataType.SUP_MODULE_LIST
elif len(List) == 2:
Lib.SupModuleList = GetSplitValueList(CleanString(List[1]), ' ')
ModuleHeader.LibraryClass.append(Lib)
elif D[0] == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
List = D[1].split(DataType.TAB_VALUE_SPLIT)
if len(List) == 2:
ModuleHeader.CustomMakefile[CleanString(List[0])] = CleanString(List[1])
else:
RaiseParserError(D[1], 'CUSTOM_MAKEFILE of Defines', File, 'CUSTOM_MAKEFILE=<Family>|<Filename>', D[2])
elif D[0] == TAB_INF_DEFINES_ENTRY_POINT:
Image = ModuleExternImageClass()
Image.ModuleEntryPoint = CleanString(D[1])
self.Module.ExternImages.append(Image)
elif D[0] == TAB_INF_DEFINES_UNLOAD_IMAGE:
Image = ModuleExternImageClass()
Image.ModuleUnloadImage = CleanString(D[1])
self.Module.ExternImages.append(Image)
elif D[0] == TAB_INF_DEFINES_CONSTRUCTOR:
LibraryClass = ModuleExternLibraryClass()
LibraryClass.Constructor = CleanString(D[1])
self.Module.ExternLibraries.append(LibraryClass)
elif D[0] == TAB_INF_DEFINES_DESTRUCTOR:
LibraryClass = ModuleExternLibraryClass()
LibraryClass.Destructor = CleanString(D[1])
self.Module.ExternLibraries.append(LibraryClass)
elif D[0] == TAB_INF_DEFINES_DEFINE:
List = D[1].split(DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, 'DEFINE of Defines', File, 'DEFINE <Word> = <Word>', D[2])
else:
ModuleHeader.Define[CleanString(List[0])] = CleanString(List[1])
elif D[0] == TAB_INF_DEFINES_SPEC:
List = D[1].split(DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, 'SPEC of Defines', File, 'SPEC <Word> = <Version>', D[2])
else:
ModuleHeader.Specification[CleanString(List[0])] = CleanString(List[1])
#
# Get version of INF
#
if ModuleHeader.InfVersion != "":
# EdkII inf
VersionNumber = ModuleHeader.VersionNumber
VersionString = ModuleHeader.VersionString
if len(VersionNumber) > 0 and len(VersionString) == 0:
EdkLogger.warn(2000, 'VERSION_NUMBER depricated; INF file %s should be modified to use VERSION_STRING instead.' % self.Identification.FileFullPath)
ModuleHeader.Version = VersionNumber
if len(VersionString) > 0:
if len(VersionNumber) > 0:
EdkLogger.warn(2001, 'INF file %s defines both VERSION_NUMBER and VERSION_STRING, using VERSION_STRING' % self.Identification.FileFullPath)
ModuleHeader.Version = VersionString
else:
# Edk inf
ModuleHeader.InfVersion = "0x00010000"
if ModuleHeader.ComponentType in gComponentType2ModuleType:
ModuleHeader.ModuleType = gComponentType2ModuleType[ModuleHeader.ComponentType]
elif ModuleHeader.ComponentType != '':
EdkLogger.error("Parser", PARSER_ERROR, "Unsupported Edk component type [%s]" % ModuleHeader.ComponentType, ExtraData=File, RaiseError=EdkLogger.IsRaiseError)
self.Module.Header[Arch] = ModuleHeader
## GenBuildOptions
#
# Gen BuildOptions of Inf
# [<Family>:]<ToolFlag>=Flag
#
# @param ContainerFile: The Inf file full path
#
def GenBuildOptions(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_BUILD_OPTIONS)
BuildOptions = {}
#
# Get all BuildOptions
#
RecordSet = self.RecordSet[MODEL_META_DATA_BUILD_OPTION]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Family, ToolChain, Flag) = GetBuildOption(Record[0], ContainerFile, Record[2])
MergeArches(BuildOptions, (Family, ToolChain, Flag), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Family), ConvertToSqlString2(ToolChain), ConvertToSqlString2(Flag), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in BuildOptions.keys():
BuildOption = BuildOptionClass(Key[0], Key[1], Key[2])
BuildOption.SupArchList = BuildOptions[Key]
self.Module.BuildOptions.append(BuildOption)
## GenIncludes
#
# Gen Includes of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenIncludes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_INCLUDES)
Includes = sdict()
#
# Get all Includes
#
RecordSet = self.RecordSet[MODEL_EFI_INCLUDE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Includes, Record[0], Arch)
for Key in Includes.keys():
Include = IncludeClass()
Include.FilePath = NormPath(Key)
Include.SupArchList = Includes[Key]
self.Module.Includes.append(Include)
## GenLibraries
#
# Gen Libraries of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenLibraries(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARIES)
Libraries = sdict()
#
# Get all Includes
#
RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_INSTANCE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Libraries, Record[0], Arch)
for Key in Libraries.keys():
Library = ModuleLibraryClass()
# replace macro and remove file extension
Library.Library = Key.rsplit('.', 1)[0]
Library.SupArchList = Libraries[Key]
self.Module.Libraries.append(Library)
## GenLibraryClasses
#
# Get LibraryClass of Inf
# <LibraryClassKeyWord>|<LibraryInstance>
#
# @param ContainerFile: The Inf file full path
#
def GenLibraryClasses(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_LIBRARY_CLASSES)
LibraryClasses = {}
#
# Get all LibraryClasses
#
RecordSet = self.RecordSet[MODEL_EFI_LIBRARY_CLASS]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(LibClassName, LibClassIns, Pcd, SupModelList) = GetLibraryClassOfInf([Record[0], Record[4]], ContainerFile, self.WorkspaceDir, Record[2])
MergeArches(LibraryClasses, (LibClassName, LibClassIns, Pcd, SupModelList), Arch)
#
# Update to Database
#
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(LibClassName), ConvertToSqlString2(LibClassIns), ConvertToSqlString2(SupModelList), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in LibraryClasses.keys():
KeyList = Key[0].split(DataType.TAB_VALUE_SPLIT)
LibraryClass = LibraryClassClass()
LibraryClass.LibraryClass = Key[0]
LibraryClass.RecommendedInstance = NormPath(Key[1])
LibraryClass.FeatureFlag = Key[2]
LibraryClass.SupArchList = LibraryClasses[Key]
LibraryClass.SupModuleList = GetSplitValueList(Key[3])
self.Module.LibraryClasses.append(LibraryClass)
## GenPackages
#
# Gen Packages of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenPackages(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_PACKAGES)
Packages = {}
#
# Get all Packages
#
RecordSet = self.RecordSet[MODEL_META_DATA_PACKAGE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Package, Pcd) = GetPackage(Record[0], ContainerFile, self.WorkspaceDir, Record[2])
MergeArches(Packages, (Package, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Package), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Packages.keys():
Package = ModulePackageDependencyClass()
Package.FilePath = NormPath(Key[0])
Package.SupArchList = Packages[Key]
Package.FeatureFlag = Key[1]
self.Module.PackageDependencies.append(Package)
## GenNmakes
#
# Gen Nmakes of Inf
#
#
# @param ContainerFile: The Inf file full path
#
def GenNmakes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_NMAKE)
Nmakes = sdict()
#
# Get all Nmakes
#
RecordSet = self.RecordSet[MODEL_META_DATA_NMAKE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
MergeArches(Nmakes, Record[0], Arch)
for Key in Nmakes.keys():
List = GetSplitValueList(Key, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
if len(List) != 2:
RaiseParserError(Key, 'Nmake', ContainerFile, '<MacroName> = <Value>')
continue
Nmake = ModuleNmakeClass()
Nmake.Name = List[0]
Nmake.Value = List[1]
Nmake.SupArchList = Nmakes[Key]
self.Module.Nmake.append(Nmake)
# convert Edk format to EdkII format
if Nmake.Name == "IMAGE_ENTRY_POINT":
Image = ModuleExternImageClass()
Image.ModuleEntryPoint = Nmake.Value
self.Module.ExternImages.append(Image)
elif Nmake.Name == "DPX_SOURCE":
Source = ModuleSourceFileClass(NormPath(Nmake.Value), "", "", "", "", Nmake.SupArchList)
self.Module.Sources.append(Source)
else:
ToolList = gNmakeFlagPattern.findall(Nmake.Name)
if len(ToolList) == 0 or len(ToolList) != 1:
EdkLogger.warn("\nParser", "Don't know how to do with MACRO: %s" % Nmake.Name,
ExtraData=ContainerFile)
else:
if ToolList[0] in gNmakeFlagName2ToolCode:
Tool = gNmakeFlagName2ToolCode[ToolList[0]]
else:
Tool = ToolList[0]
BuildOption = BuildOptionClass("MSFT", "*_*_*_%s_FLAGS" % Tool, Nmake.Value)
BuildOption.SupArchList = Nmake.SupArchList
self.Module.BuildOptions.append(BuildOption)
## GenPcds
#
# Gen Pcds of Inf
# <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
#
# @param ContainerFile: The Dec file full path
#
def GenPcds(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_PCDS)
Pcds = {}
PcdToken = {}
#
# Get all Guids
#
RecordSet1 = self.RecordSet[MODEL_PCD_FIXED_AT_BUILD]
RecordSet2 = self.RecordSet[MODEL_PCD_PATCHABLE_IN_MODULE]
RecordSet3 = self.RecordSet[MODEL_PCD_FEATURE_FLAG]
RecordSet4 = self.RecordSet[MODEL_PCD_DYNAMIC_EX]
RecordSet5 = self.RecordSet[MODEL_PCD_DYNAMIC]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet1:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
if self.Module.Header[Arch].LibraryClass != {}:
pass
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FIXED_AT_BUILD, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet2:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_PATCHABLE_IN_MODULE, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet3:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_FEATURE_FLAG, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet4:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], TAB_PCDS_DYNAMIC_EX, ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
for Record in RecordSet5:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(TokenGuidCName, TokenName, Value, Type) = GetPcdOfInf(Record[0], "", ContainerFile, Record[2])
MergeArches(Pcds, (TokenGuidCName, TokenName, Value, Type), Arch)
PcdToken[Record[3]] = (TokenGuidCName, TokenName)
#
# Update to database
#
if self.IsToDatabase:
for Key in PcdToken.keys():
SqlCommand = """update %s set Value2 = '%s' where ID = %s""" % (self.TblInf.Table, ".".join((PcdToken[Key][0], PcdToken[Key][1])), Key)
self.TblInf.Exec(SqlCommand)
for Key in Pcds.keys():
Pcd = PcdClass()
Pcd.CName = Key[1]
Pcd.TokenSpaceGuidCName = Key[0]
Pcd.DefaultValue = Key[2]
Pcd.ItemType = Key[3]
Pcd.SupArchList = Pcds[Key]
self.Module.PcdCodes.append(Pcd)
## GenSources
#
# Gen Sources of Inf
# <Filename>[|<Family>[|<TagName>[|<ToolCode>[|<PcdFeatureFlag>]]]]
#
# @param ContainerFile: The Dec file full path
#
def GenSources(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_SOURCES)
Sources = {}
#
# Get all Nmakes
#
RecordSet = self.RecordSet[MODEL_EFI_SOURCE_FILE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Filename, Family, TagName, ToolCode, Pcd) = GetSource(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
MergeArches(Sources, (Filename, Family, TagName, ToolCode, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s', Value5 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Filename), ConvertToSqlString2(Family), ConvertToSqlString2(TagName), ConvertToSqlString2(ToolCode), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Sources.keys():
Source = ModuleSourceFileClass(Key[0], Key[2], Key[3], Key[1], Key[4], Sources[Key])
self.Module.Sources.append(Source)
## GenUserExtensions
#
# Gen UserExtensions of Inf
#
def GenUserExtensions(self, ContainerFile):
# #
# # UserExtensions
# #
# if self.UserExtensions != '':
# UserExtension = UserExtensionsClass()
# Lines = self.UserExtensions.splitlines()
# List = GetSplitValueList(Lines[0], DataType.TAB_SPLIT, 2)
# if len(List) != 3:
# RaiseParserError(Lines[0], 'UserExtensions', File, "UserExtensions.UserId.'Identifier'")
# else:
# UserExtension.UserID = List[1]
# UserExtension.Identifier = List[2][0:-1].replace("'", '').replace('\"', '')
# for Line in Lines[1:]:
# UserExtension.Content = UserExtension.Content + CleanString(Line) + '\n'
# self.Module.UserExtensions.append(UserExtension)
pass
## GenDepexes
#
# Gen Depex of Inf
#
# @param ContainerFile: The Inf file full path
#
def GenDepexes(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_DEPEX)
Depex = {}
#
# Get all Depexes
#
RecordSet = self.RecordSet[MODEL_EFI_DEPEX]
#
# Go through each arch
#
for Arch in self.SupArchList:
Line = ''
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
Line = Line + Record[0] + ' '
if Line != '':
MergeArches(Depex, Line, Arch)
for Key in Depex.keys():
Dep = ModuleDepexClass()
Dep.Depex = Key
Dep.SupArchList = Depex[Key]
self.Module.Depex.append(Dep)
## GenBinaries
#
# Gen Binary of Inf
# <FileType>|<Filename>|<Target>[|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param ContainerFile: The Dec file full path
#
def GenBinaries(self, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % TAB_BINARIES)
Binaries = {}
#
# Get all Guids
#
RecordSet = self.RecordSet[MODEL_EFI_BINARY_FILE]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(FileType, Filename, Target, Pcd) = GetBinary(Record[0], ContainerFile, self.Identification.FileRelativePath, Record[2])
MergeArches(Binaries, (FileType, Filename, Target, Pcd), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s', Value3 = '%s', Value4 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(FileType), ConvertToSqlString2(Filename), ConvertToSqlString2(Target), ConvertToSqlString2(Pcd), Record[3])
self.TblInf.Exec(SqlCommand)
for Key in Binaries.keys():
Binary = ModuleBinaryFileClass(NormPath(Key[1]), Key[0], Key[2], Key[3], Binaries[Key])
self.Module.Binaries.append(Binary)
## GenGuids
#
# Gen Guids of Inf
# <CName>=<GuidValue>
#
# @param ContainerFile: The Inf file full path
#
def GenGuidProtocolPpis(self, Type, ContainerFile):
EdkLogger.debug(2, "Generate %s ..." % Type)
Lists = {}
#
# Get all Items
#
RecordSet = self.RecordSet[Section[Type.upper()]]
#
# Go through each arch
#
for Arch in self.SupArchList:
for Record in RecordSet:
if Record[1] == Arch or Record[1] == TAB_ARCH_COMMON:
(Name, Value) = GetGuidsProtocolsPpisOfInf(Record[0], Type, ContainerFile, Record[2])
MergeArches(Lists, (Name, Value), Arch)
if self.IsToDatabase:
SqlCommand = """update %s set Value1 = '%s', Value2 = '%s'
where ID = %s""" % (self.TblInf.Table, ConvertToSqlString2(Name), ConvertToSqlString2(Value), Record[3])
self.TblInf.Exec(SqlCommand)
ListMember = None
if Type == TAB_GUIDS:
ListMember = self.Module.Guids
elif Type == TAB_PROTOCOLS:
ListMember = self.Module.Protocols
elif Type == TAB_PPIS:
ListMember = self.Module.Ppis
for Key in Lists.keys():
ListClass = GuidProtocolPpiCommonClass()
ListClass.CName = Key[0]
ListClass.SupArchList = Lists[Key]
ListClass.FeatureFlag = Key[1]
ListMember.append(ListClass)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
EdkLogger.Initialize()
EdkLogger.SetLevel(EdkLogger.DEBUG_0)
W = os.getenv('WORKSPACE')
F = os.path.join(W, 'MdeModulePkg/Application/HelloWorld/HelloWorld.inf')
Db = Database.Database('Inf.db')
Db.InitDatabase()
P = Inf(os.path.normpath(F), True, True, W, Db)
P.ShowModule()
Db.Close()
|
import pytest
jax = pytest.importorskip("jax", minversion="0.2")
jnp = jax.numpy
import numpy as np
import pennylane as qml
from pennylane.devices.default_qubit_jax import DefaultQubitJax
pytestmark = pytest.mark.usefixtures("tape_mode")
class TestQNodeIntegration:
"""Integration tests for default.qubit.jax. This test ensures it integrates
properly with the PennyLane UI, in particular the new QNode."""
def test_defines_correct_capabilities(self):
"""Test that the device defines the right capabilities"""
dev = qml.device("default.qubit.jax", wires=1)
cap = dev.capabilities()
capabilities = {
"model": "qubit",
"supports_finite_shots": True,
"supports_tensor_observables": True,
"returns_probs": True,
"returns_state": True,
"supports_reversible_diff": False,
"supports_inverse_operations": True,
"supports_analytic_computation": True,
"passthru_interface": "jax",
}
assert cap == capabilities
def test_defines_correct_capabilities_directly_from_class(self):
"""Test that the device defines the right capabilities"""
dev = DefaultQubitJax(wires=1)
cap = dev.capabilities()
assert cap["supports_reversible_diff"] == False
assert cap["passthru_interface"] == "jax"
def test_load_device(self):
"""Test that the plugin device loads correctly"""
dev = qml.device("default.qubit.jax", wires=2)
assert dev.num_wires == 2
assert dev.shots == 1000
assert dev.analytic
assert dev.short_name == "default.qubit.jax"
assert dev.capabilities()["passthru_interface"] == "jax"
def test_qubit_circuit(self, tol):
"""Test that the device provides the correct
result for a simple circuit."""
p = jnp.array(0.543)
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -jnp.sin(p)
if not qml.tape_mode_active():
assert isinstance(circuit, qml.qnodes.PassthruQNode)
assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_qubit_circuit_with_jit(self, tol):
"""Test that the device provides the correct
result for a simple circuit under a jax.jit."""
p = jnp.array(0.543)
dev = qml.device("default.qubit.jax", wires=1)
@jax.jit
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -jnp.sin(p)
# Do not test isinstance here since the @jax.jit changes the function
# type.
# Just test that it works and spits our the right value.
assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_correct_state(self, tol):
"""Test that the device state is correct after applying a
quantum function on the device"""
dev = qml.device("default.qubit.jax", wires=2)
state = dev.state
expected = jnp.array([1, 0, 0, 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(wires=0)
qml.RZ(jnp.pi / 4, wires=0)
return qml.expval(qml.PauliZ(0))
circuit()
state = dev.state
amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)
expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
def test_correct_state_returned(self, tol):
"""Test that the device state is correct after applying a
quantum function on the device"""
if not qml.tape_mode_active():
pytest.skip("Only supported in tape mode")
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(wires=0)
qml.RZ(jnp.pi / 4, wires=0)
return qml.state()
state = circuit()
amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)
expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
def test_sampling_with_jit(self):
"""Test that sampling works with a jax.jit"""
@jax.jit
def circuit(key):
dev = qml.device("default.qubit.jax", wires=1, prng_key=key)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def inner_circuit():
qml.Hadamard(0)
return qml.sample(qml.PauliZ(wires=0))
return inner_circuit()
a = circuit(jax.random.PRNGKey(0))
b = circuit(jax.random.PRNGKey(0))
c = circuit(jax.random.PRNGKey(1))
np.testing.assert_array_equal(a, b)
assert not np.all(a == c)
def test_sampling_op_by_op(self):
"""Test that op-by-op sampling works as a new user would expect"""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(0)
return qml.sample(qml.PauliZ(wires=0))
a = circuit()
b = circuit()
assert not np.all(a == b)
def test_gates_dont_crash(self):
"""Test for gates that weren't covered by other tests. """
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.CRZ(0.0, wires=[0, 1])
qml.CRot(1.0, 0.0, 0.0, wires=[0, 1])
qml.CRY(0.0, wires=[0, 1])
return qml.sample(qml.PauliZ(wires=0))
circuit() # Just don't crash.
def test_diagonal_doesnt_crash(self):
"""Test that diagonal gates can be used."""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.DiagonalQubitUnitary(np.array([1.0, 1.0]), wires=0)
return qml.sample(qml.PauliZ(wires=0))
circuit() # Just don't crash.
class TestPassthruIntegration:
"""Tests for integration with the PassthruQNode"""
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_jacobian_variable_multiply(self, tol, jacobian_transform):
"""Test that jacobian of a QNode with an attached default.qubit.jax device
gives the correct result in the case of parameters multiplied by scalars"""
x = 0.43316321
y = 0.2162158
z = 0.75110998
weights = jnp.array([x, y, z])
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(p):
qml.RX(3 * p[0], wires=0)
qml.RY(p[1], wires=0)
qml.RX(p[2] / 2, wires=0)
return qml.expval(qml.PauliZ(0))
if not qml.tape_mode_active():
assert isinstance(circuit, qml.qnodes.PassthruQNode)
res = circuit(weights)
expected = jnp.cos(3 * x) * jnp.cos(y) * jnp.cos(z / 2) - jnp.sin(3 * x) * jnp.sin(z / 2)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad_fn = jacobian_transform(circuit, 0)
res = grad_fn(jnp.array(weights))
expected = jnp.array(
[
-3
* (jnp.sin(3 * x) * jnp.cos(y) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.sin(z / 2)),
-jnp.cos(3 * x) * jnp.sin(y) * jnp.cos(z / 2),
-0.5
* (jnp.sin(3 * x) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.cos(y) * jnp.sin(z / 2)),
]
)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_jacobian_repeated(self, tol, jacobian_transform):
"""Test that jacobian of a QNode with an attached default.qubit.jax device
gives the correct result in the case of repeated parameters"""
x = 0.43316321
y = 0.2162158
z = 0.75110998
p = jnp.array([x, y, z])
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x[1], wires=0)
qml.Rot(x[0], x[1], x[2], wires=0)
return qml.expval(qml.PauliZ(0))
res = circuit(p)
expected = jnp.cos(y) ** 2 - jnp.sin(x) * jnp.sin(y) ** 2
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad_fn = jacobian_transform(circuit, 0)
res = grad_fn(p)
expected = jnp.array(
[-jnp.cos(x) * jnp.sin(y) ** 2, -2 * (jnp.sin(x) + 1) * jnp.sin(y) * jnp.cos(y), 0]
)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
def test_state_differentiability(self, tol):
"""Test that the device state can be differentiated"""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a):
qml.RY(a, wires=0)
return qml.expval(qml.PauliZ(0))
a = jnp.array(0.54)
def cost(a):
"""A function of the device quantum state, as a function
of ijnput QNode parameters."""
circuit(a)
res = jnp.abs(dev.state) ** 2
return res[1] - res[0]
grad = jax.grad(cost)(a)
expected = jnp.sin(a)
assert jnp.allclose(grad, expected, atol=tol, rtol=0)
def test_prob_differentiability(self, tol):
"""Test that the device probability can be differentiated"""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a, b):
qml.RX(a, wires=0)
qml.RY(b, wires=1)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[1])
a = jnp.array(0.54)
b = jnp.array(0.12)
def cost(a, b):
prob_wire_1 = circuit(a, b).squeeze()
return prob_wire_1[1] - prob_wire_1[0]
res = cost(a, b)
expected = -jnp.cos(a) * jnp.cos(b)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad = jax.jit(jax.grad(cost, argnums=(0, 1)))(a, b)
expected = [jnp.sin(a) * jnp.cos(b), jnp.cos(a) * jnp.sin(b)]
assert jnp.allclose(grad, expected, atol=tol, rtol=0)
def test_backprop_gradient(self, tol):
"""Tests that the gradient of the qnode is correct"""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a, b):
qml.RX(a, wires=0)
qml.CRX(b, wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
a = jnp.array(-0.234)
b = jnp.array(0.654)
res = circuit(a, b)
expected_cost = 0.5 * (jnp.cos(a) * jnp.cos(b) + jnp.cos(a) - jnp.cos(b) + 1)
assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)
res = jax.grad(lambda x, y: circuit(x, y).reshape(()), argnums=(0, 1))(a, b)
expected_grad = jnp.array(
[-0.5 * jnp.sin(a) * (jnp.cos(b) + 1), 0.5 * jnp.sin(b) * (1 - jnp.cos(a))]
)
assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)
@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop"])
def test_jax_interface_gradient(self, operation, diff_method, tol):
"""Tests that the gradient of an arbitrary U3 gate is correct
using the Jax interface, using a variety of differentiation methods."""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, diff_method=diff_method, interface="jax")
def circuit(x, weights, w=None):
"""In this example, a mixture of scalar
arguments, array arguments, and keyword arguments are used."""
qml.QubitStateVector(1j * jnp.array([1, -1]) / jnp.sqrt(2), wires=w)
operation(x, weights[0], weights[1], wires=w)
return qml.expval(qml.PauliX(w))
# Check that the correct QNode type is being used.
if not qml.tape_mode_active():
if diff_method == "backprop":
assert isinstance(circuit, qml.qnodes.PassthruQNode)
assert not hasattr(circuit, "jacobian")
else:
assert not isinstance(circuit, qml.qnodes.PassthruQNode)
assert hasattr(circuit, "jacobian")
def cost(params):
"""Perform some classical processing"""
return (circuit(params[0], params[1:], w=0) ** 2).reshape(())
theta = 0.543
phi = -0.234
lam = 0.654
params = jnp.array([theta, phi, lam])
res = cost(params)
expected_cost = (
jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi)
) ** 2
assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)
res = jax.grad(cost)(params)
expected_grad = (
jnp.array(
[
jnp.sin(theta) * jnp.cos(lam) * jnp.cos(phi),
jnp.cos(theta) * jnp.cos(lam) * jnp.sin(phi) + jnp.sin(lam) * jnp.cos(phi),
jnp.cos(theta) * jnp.sin(lam) * jnp.cos(phi) + jnp.cos(lam) * jnp.sin(phi),
]
)
* 2
* (jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi))
)
assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)
@pytest.mark.parametrize("interface", ["autograd", "tf", "torch"])
def test_error_backprop_wrong_interface(self, interface, tol):
"""Tests that an error is raised if diff_method='backprop' but not using
the Jax interface"""
dev = qml.device("default.qubit.jax", wires=1)
def circuit(x, w=None):
qml.RZ(x, wires=w)
return qml.expval(qml.PauliX(w))
error_type = qml.QuantumFunctionError if qml.tape_mode_active() else ValueError
with pytest.raises(
error_type,
match="default.qubit.jax only supports diff_method='backprop' when using the jax interface",
):
qml.qnode(dev, diff_method="backprop", interface=interface)(circuit)
class TestHighLevelIntegration:
"""Tests for integration with higher level components of PennyLane."""
def test_template_integration(self):
"""Test that a PassthruQNode using default.qubit.jax works with templates."""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))
grad = jax.grad(lambda a: circuit(a).reshape(()))(weights)
assert grad.shape == weights.shape
def test_qnode_collection_integration(self):
"""Test that a PassthruQNode using default.qubit.jax works with QNodeCollections."""
dev = qml.device("default.qubit.jax", wires=2)
def ansatz(weights, **kwargs):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)]
qnodes = qml.map(ansatz, obs_list, dev, interface="jax")
if not qml.tape_mode_active():
assert qnodes.interface == "jax"
weights = jnp.array([0.1, 0.2])
def cost(weights):
return jnp.sum(jnp.array(qnodes(weights)))
grad = jax.grad(cost)(weights)
assert grad.shape == weights.shape
def test_non_backprop_error(self):
"""Test that an error is raised in tape mode if the diff method is not backprop"""
if not qml.tape_mode_active():
pytest.skip("Test only applies in tape mode")
dev = qml.device("default.qubit.jax", wires=2)
def circuit(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
qnode = qml.QNode(circuit, dev, interface="jax", diff_method="parameter-shift")
weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))
with pytest.raises(qml.QuantumFunctionError, match="The JAX interface can only be used with"):
qnode(weights)
class TestOps:
"""Unit tests for operations supported by the default.qubit.jax device"""
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_multirz_jacobian(self, jacobian_transform):
"""Test that the patched numpy functions are used for the MultiRZ
operation and the jacobian can be computed."""
wires = 4
dev = qml.device("default.qubit.jax", wires=wires)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(param):
qml.MultiRZ(param, wires=[0, 1])
return qml.probs(wires=list(range(wires)))
param = 0.3
res = jacobian_transform(circuit)(param)
assert jnp.allclose(res, jnp.zeros(wires ** 2))
def test_full_subsystem(self, mocker):
"""Test applying a state vector to the full subsystem"""
dev = DefaultQubitJax(wires=["a", "b", "c"])
state = jnp.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.0
state_wires = qml.wires.Wires(["a", "b", "c"])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
assert jnp.all(dev._state.flatten() == state)
spy.assert_not_called()
def test_partial_subsystem(self, mocker):
"""Test applying a state vector to a subset of wires of the full subsystem"""
dev = DefaultQubitJax(wires=["a", "b", "c"])
state = jnp.array([1, 0, 1, 0]) / jnp.sqrt(2.0)
state_wires = qml.wires.Wires(["a", "c"])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
res = jnp.sum(dev._state, axis=(1,)).flatten()
assert jnp.all(res == state)
spy.assert_called()
|
import pytest
from pytest_mock_resources.fixture.database.generic import assign_fixture_credentials
from pytest_mock_resources.fixture.database.relational.generic import EngineManager
from pytest_mock_resources.fixture.database.relational.postgresql import (
_create_clean_database,
get_sqlalchemy_engine,
)
from pytest_mock_resources.patch.redshift import psycopg2, sqlalchemy
def create_redshift_fixture(*ordered_actions, scope="function", tables=None, session=None):
"""Produce a Redshift fixture.
Any number of fixture functions can be created. Under the hood they will all share the same
database server.
Arguments:
ordered_actions: Any number of ordered actions to be run on test setup.
scope: Passthrough pytest's fixture scope.
tables: Subsets the tables created by `ordered_actions`. This is generally
most useful when a model-base was specified in `ordered_actions`.
session: Whether to return a session instead of an engine directly. This can
either be a bool or a callable capable of producing a session.
"""
from pytest_mock_resources.fixture.database.relational.redshift.udf import REDSHIFT_UDFS
ordered_actions = ordered_actions + (REDSHIFT_UDFS,)
@pytest.fixture(scope=scope)
def _(_redshift_container, pmr_postgres_config):
database_name = _create_clean_database(pmr_postgres_config)
engine = get_sqlalchemy_engine(pmr_postgres_config, database_name)
assign_fixture_credentials(
engine,
drivername="postgresql+psycopg2",
host=pmr_postgres_config.host,
port=pmr_postgres_config.port,
database=database_name,
username=pmr_postgres_config.username,
password=pmr_postgres_config.password,
)
engine = sqlalchemy.substitute_execute_with_custom_execute(engine)
engine_manager = EngineManager(
engine, ordered_actions, tables=tables, default_schema="public"
)
with psycopg2.patch_connect(pmr_postgres_config):
for engine in engine_manager.manage(session=session):
yield engine
return _
|
from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
class PolynomialFitting(BaseEstimator):
"""
Polynomial Fitting using Least Squares estimation
"""
def __init__(self, k: int) -> PolynomialFitting:
"""
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
"""
super().__init__()
self.degree = k
self.linear_regression_model = LinearRegression(
include_intercept=False)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
x = self.__transform(X)
self.linear_regression_model.fit(x, y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
x = self.__transform(X)
return self.linear_regression_model.predict(x)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
x = self.__transform(X)
return self.linear_regression_model.loss(x, y)
def __transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform given input according to the univariate polynomial
transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
"""
return np.vander(X, N=self.degree+1, increasing=True)
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.campaign import Campaign
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'name': 'My First Campaign',
'objective': 'PAGE_LIKES',
'status': 'PAUSED',
}
print AdAccount(id).create_campaign(
fields=fields,
params=params,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PriceInformation(object):
def __init__(self):
self._amount = None
self._type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PriceInformation()
if 'amount' in d:
o.amount = d['amount']
if 'type' in d:
o.type = d['type']
return o
|
# Copyright 2020 The StackStorm Authors.
# Copyright (C) 2020 Extreme Networks, Inc - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2tests.fixturesloader import FixturesLoader
from st2api.controllers.v1.traces import TracesController
from tests.base import APIControllerWithRBACTestCase
from st2tests.api import APIControllerWithIncludeAndExcludeFilterTestCase
http_client = six.moves.http_client
__all__ = [
'TraceControllerRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'traces': ['trace_for_test_enforce.yaml', 'trace_for_test_enforce_2.yaml',
'trace_for_test_enforce_3.yaml'],
}
class TraceControllerRBACTestCase(APIControllerWithRBACTestCase,
APIControllerWithIncludeAndExcludeFilterTestCase):
# Attributes used by APIControllerWithIncludeAndExcludeFilterTestCase
get_all_path = '/v1/traces'
controller_cls = TracesController
include_attribute_field_name = 'trace_tag'
exclude_attribute_field_name = 'start_timestamp'
rbac_enabled = True
fixtures_loader = FixturesLoader()
def setUp(self):
super(TraceControllerRBACTestCase, self).setUp()
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
file_name = 'trace_for_test_enforce.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
file_name = 'trace_for_test_enforce_2.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
file_name = 'trace_for_test_enforce_3.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='trace_list')
user_1_db = User.add_or_update(user_1_db)
self.users['trace_list'] = user_1_db
user_2_db = UserDB(name='trace_view')
user_2_db = User.add_or_update(user_2_db)
self.users['trace_view'] = user_2_db
# Roles
# trace_list
grant_db = PermissionGrantDB(resource_uid=None,
resource_type=ResourceType.TRACE,
permission_types=[PermissionType.TRACE_LIST])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='trace_list', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['trace_list'] = role_1_db
# trace_view on trace 1
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
grant_db = PermissionGrantDB(resource_uid=trace_uid,
resource_type=ResourceType.TRACE,
permission_types=[PermissionType.TRACE_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='trace_view', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['trace_view'] = role_1_db
# Role assignments
role_assignment_db = UserRoleAssignmentDB(
user=self.users['trace_list'].name,
role=self.roles['trace_list'].name,
source='assignments/%s.yaml' % self.users['trace_list'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
role_assignment_db = UserRoleAssignmentDB(
user=self.users['trace_view'].name,
role=self.roles['trace_view'].name,
source='assignments/%s.yaml' % self.users['trace_view'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_get_all_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
resp = self.app.get('/v1/traces', expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_list"')
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_view"'
' on resource "%s"' % (trace_uid))
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_all_permission_success_get_one_no_permission_failure(self):
user_db = self.users['trace_list']
self.use_user(user_db)
# trace_list permission, but no trace_view permission
resp = self.app.get('/v1/traces')
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(len(resp.json), 3)
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True)
expected_msg = ('User "trace_list" doesn\'t have required permission "trace_view"'
' on resource "%s"' % (trace_uid))
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_permission_success_get_all_no_permission_failure(self):
user_db = self.users['trace_view']
self.use_user(user_db)
# trace_view permission, but no trace_list permission
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['uid'], trace_uid)
resp = self.app.get('/v1/traces', expect_errors=True)
expected_msg = ('User "trace_view" doesn\'t have required permission "trace_list"')
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def _insert_mock_models(self):
trace_ids = [trace['id'] for trace in self.models['traces'].values()]
return trace_ids
|
import requests
import json
url = "https://www.cbr-xml-daily.ru/daily_json.js"
response = requests.get(url)
data = json.loads(response.text)
print(data)
|
from django.db import models
from authors import settings
from authors.apps.articles.models import Article
from authors.apps.profiles.models import Profile
# Create your models here.
class ReportArticle(models.Model):
"""model for reporting an article"""
reporter = models.ForeignKey(Profile, on_delete=models.CASCADE)
article = models.ForeignKey(Article, to_field="slug", on_delete=models.CASCADE)
violation_subject = models.CharField(max_length=100, blank=False, null=False)
violation_report = models.CharField(max_length=300, blank=True, null=True)
report_status = models.CharField(max_length=20, default='pending')
submission_date = models.DateTimeField(auto_now_add=True, editable=False)
|
# -*- coding: utf-8 -*-
import re
from six.moves import http_client
from six.moves import urllib
from wsgiref.headers import Headers
class Request(object):
def __init__(self, environ):
self.environ = environ
@property
def path(self):
return self.environ['PATH_INFO']
@property
def args(self):
""" 把查询参数转成字典形式 """
get_arguments = urllib.parse.parse_qs(self.environ['QUERY_STRING'])
return {k: v[0] for k, v in get_arguments.items()}
class Response(object):
def __init__(self, response=None, status=200, charset='utf-8', content_type='text/html'):
self.response = [] if response is None else response
self.charset = charset
self.headers = Headers([])
content_type = '{content_type}; charset={charset}'.format(content_type=content_type, charset=charset)
self.headers.add_header('content-type', content_type)
self._status = status
@property
def status(self):
status_string = http_client.responses.get(self._status, 'UNKNOWN')
return '{status} {status_string}'.format(status=self._status, status_string=status_string)
def __iter__(self):
for val in self.response:
if isinstance(val, bytes):
yield val
else:
yield val.encode(self.charset)
# 试试结合了 Resquest 和 Response 的新 application:
def request_response_application(func):
def application(environ, start_response):
request = Request(environ)
response = func(request)
start_response(
response.status,
response.headers.items()
)
return iter(response)
return application
class NotFoundError(Exception):
""" url pattern not found """
pass
class DecoratorRouter:
def __init__(self):
self.routing_table = [] # 保存 url pattern 和 可调用对象
def match(self, path):
for (pattern, callback) in self.routing_table:
m = re.match(pattern, path)
if m:
return (callback, m.groups())
raise NotFoundError()
def __call__(self, pattern):
def _(func):
self.routing_table.append((pattern, func))
return _
routers = DecoratorRouter()
@routers(r'/hello/(.*)/$')
def hello(request, name):
return Response("<h1>Hello, {name}</h1>".format(name=name))
@routers(r'/goodbye/(.*)/$')
def goodbye(request, name):
return Response("<h1>Goodbye, {name}</h1>".format(name=name))
class Application(object):
def __init__(self, routers, **kwargs):
self.routers = routers
def __call__(self, environ, start_response):
try:
request = Request(environ)
callback, args = routers.match(request.path)
response = callback(request, *args)
except NotFoundError:
response = Response("<h1>Not found</h1>", status=404)
start_response(response.status, response.headers.items())
return iter(response)
application = Application(routers)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('127.0.0.1', 8000, application)
httpd.serve_forever()
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_ips_rule_settings
short_description: Configure IPS rule setting in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify ips feature and rule_settings category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
ips_rule_settings:
description:
- Configure IPS rule setting.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
id:
description:
- Rule ID.
required: true
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPS rule setting.
fortios_ips_rule_settings:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
ips_rule_settings:
id: "3"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_ips_rule_settings_data(json):
option_list = ['id']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def ips_rule_settings(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['ips_rule_settings'] and data['ips_rule_settings']:
state = data['ips_rule_settings']['state']
else:
state = True
ips_rule_settings_data = data['ips_rule_settings']
filtered_data = underscore_to_hyphen(filter_ips_rule_settings_data(ips_rule_settings_data))
if state == "present":
return fos.set('ips',
'rule-settings',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('ips',
'rule-settings',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_ips(data, fos):
if data['ips_rule_settings']:
resp = ips_rule_settings(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"ips_rule_settings": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"id": {"required": True, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_ips(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_ips(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
import torch
import torchvision
# An instance of your model.
model = torchvision.models.resnet18()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
# save
traced_script_module.save("model.pt")
|
import glob
import logging
import os
from typing import Any, Dict, List, Optional
from django.conf import settings
from zerver.lib.storage import static_path
# See https://jackstromberg.com/2013/01/useraccountcontrol-attributeflag-values/
# for docs on what these values mean.
LDAP_USER_ACCOUNT_CONTROL_NORMAL = '512'
LDAP_USER_ACCOUNT_CONTROL_DISABLED = '514'
def generate_dev_ldap_dir(mode: str, num_users: int=8) -> Dict[str, Dict[str, Any]]:
mode = mode.lower()
ldap_data = []
for i in range(1, num_users+1):
name = 'LDAP User %d' % (i,)
email = 'ldapuser%d@zulip.com' % (i,)
phone_number = '999999999%d' % (i,)
birthdate = '19%02d-%02d-%02d' % (i, i, i)
ldap_data.append((name, email, phone_number, birthdate))
profile_images = [open(path, "rb").read() for path in
glob.glob(os.path.join(static_path("images/team"), "*"))]
ldap_dir = {}
for i, user_data in enumerate(ldap_data):
email = user_data[1].lower()
email_username = email.split('@')[0]
common_data = {
'cn': [user_data[0]],
'userPassword': [email_username],
'phoneNumber': [user_data[2]],
'birthDate': [user_data[3]],
}
if mode == 'a':
ldap_dir['uid=' + email + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email],
thumbnailPhoto=[profile_images[i % len(profile_images)]],
userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL],
**common_data)
elif mode == 'b':
ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email_username],
jpegPhoto=[profile_images[i % len(profile_images)]],
**common_data)
elif mode == 'c':
ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email_username],
email=[email],
**common_data)
return ldap_dir
def init_fakeldap(directory: Optional[Dict[str, Dict[str, List[str]]]]=None) -> None: # nocoverage
# We only use this in development. Importing mock inside
# this function is an import time optimization, which
# avoids the expensive import of the mock module (slow
# because its dependency pbr uses pkgresources, which is
# really slow to import.)
from unittest import mock
from fakeldap import MockLDAP
# Silent `django_auth_ldap` logger in dev mode to avoid
# spammy user not found log messages.
ldap_auth_logger = logging.getLogger('django_auth_ldap')
ldap_auth_logger.setLevel(logging.CRITICAL)
fakeldap_logger = logging.getLogger('fakeldap')
fakeldap_logger.setLevel(logging.CRITICAL)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = directory or generate_dev_ldap_dir(settings.FAKE_LDAP_MODE,
settings.FAKE_LDAP_NUM_USERS)
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.core.paginator import Paginator
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.views.decorators.cache import cache_page
from django.db.models import Count
from .forms import CreatePost, CreateComment
from .models import Post, User, Comment, Follow
def _create_paginator(request, post):
paginator = Paginator(post, 10)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return page, paginator
def _search_text(request):
keyword = request.GET.get("q", None)
posts_list = Post.objects.select_related(
"author", "group").filter(
text__contains=keyword
).prefetch_related("comments")
data_paginator = _create_paginator(request, posts_list)
return data_paginator
@cache_page(20, key_prefix="index_page")
def index(request):
if request.GET.get("q") is None:
posts_list = Post.objects.order_by("-pub_date")\
.all()\
.select_related("author", "group", )\
.prefetch_related("comments",)
data_paginator = _create_paginator(request, posts_list)
else:
data_paginator = _search_text(request)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Последние обновления",
"description": "Последние обновления на сайте",
"changing_it": "index"})
@login_required
def new_post(request):
content = {"title_name": "Новый пост", "btn_name": "Добавить пост"}
if request.method == "POST":
form = CreatePost(request.POST, files=request.FILES or None)
if form.is_valid():
author = request.user
form.cleaned_data['author'] = author
date_clean = form.cleaned_data
post = Post.objects.create(**date_clean)
messages.success(request, "Пост добавлен")
return redirect("index")
else:
form = CreatePost()
return render(request, "add_post.html", {"form": form, "content": content})
def profile(request, username):
user_name = get_object_or_404(User, username=username)
following = None
if request.user != AnonymousUser():
following = Follow.objects.filter(user=request.user, author=user_name)
print(following)
posts = Post.objects.filter(author_id__username=user_name)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, posts)
return render(request, "profile.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"author": user_name,
"following": following})
def post_view(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
print(type(profile_person))
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
# comments = select_post.comments.all()
comments = list(Comment.objects.filter(post_id=post_id).select_related("author", "post"))
return render(request, "post.html", {"user_post": select_post,
"author": profile_person,
"comments": comments})
def post_edit(request, username, post_id):
content = {"title_name": "Редактировать запись", "btn_name": "Сохранить"}
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
if request.user != profile_person:
return redirect("post", username=username, post_id=post_id)
form = CreatePost(request.POST or None,
instance=select_post,
files=request.FILES or None)
if form.is_valid():
form.save()
print("Post can editable")
return redirect("post", username=username, post_id=post_id)
return render(request, "add_post.html", {"form": form,
"selected_post": select_post,
"content": content})
def page_not_found(request, exeption):
return render(request, "misc/404.html", {"path": request.path}, status=404)
def server_error(request):
return render(request, "misc/500.html", status=500)
@login_required
def add_comment(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person)
if request.method == "POST":
form = CreateComment(request.POST)
print(form)
if form.is_valid():
author = request.user
form.cleaned_data["post"] = select_post
form.cleaned_data["author"] = author
data_clean = form.cleaned_data
comment = Comment.objects.create(**data_clean)
messages.success(request, "Коммент поставлен")
return redirect("post", username=username, post_id=post_id)
else:
form = CreateComment()
return render(request, "comments.html", {"form": form})
@login_required
def follow_index(request):
my_follow = Post.objects.filter(author__following__user=request.user)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, my_follow)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Подписки",
"description": "Последние обновления твоих людей",
"changing_it": "follow"})
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.get_or_create(author=author, user=request.user)
return redirect("profile", username=username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.filter(author=author, user=request.user).delete()
return redirect('profile', username=username)
|
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo import MongoClient
__all__ = ['PymongoConnection', 'MotorConnection']
class PymongoConnection:
def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None):
"""Create database connection."""
if user and password:
self.db_client = MongoClient(f"mongodb://{user}:{password}@{host}:{port}")
else:
self.db_client = MongoClient(f"mongodb://{host}:{port}")
self.db_name = db
def get_db_client(self) -> MongoClient:
"""Return database client instance."""
return self.db_client
def get_db(self):
"""Return database instance."""
return self.get_db_client()[self.db_name]
def close_db(self):
"""Close database connection."""
self.db_client.close()
class MotorConnection:
def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None):
"""Create database connection."""
if user and password:
self.db_client = AsyncIOMotorClient(f"mongodb://{user}:{password}@{host}:{port}")
else:
self.db_client = AsyncIOMotorClient(f"mongodb://{host}:{port}")
self.db_name = db
def get_db_client(self) -> AsyncIOMotorClient:
"""Return database client instance."""
return self.db_client
def get_db(self):
"""Return database instance."""
return self.get_db_client()[self.db_name]
def close_db(self):
"""Close database connection."""
self.db_client.close()
|
import base64
import json
import os
import os.path
import shlex
import string
from datetime import datetime
from distutils.version import StrictVersion
from .. import errors
from .. import tls
from ..constants import DEFAULT_HTTP_HOST
from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_pool has been removed. Please use a '
'docker.types.IPAMPool object instead.'
)
def create_ipam_config(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_config has been removed. Please use a '
'docker.types.IPAMConfig object instead.'
)
def decode_json_header(header):
data = base64.b64decode(header)
data = data.decode('utf-8')
return json.loads(data)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
str('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
str('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
def parse_host(addr, is_win32=False, tls=False):
path = ''
port = None
host = None
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
# https://bugs.python.org/issue754016
parsed_url = urlparse('//' + addr, 'tcp')
proto = 'tcp'
if proto == 'fd':
raise errors.DockerException('fd protocol is not implemented')
# These protos are valid aliases for our library but not for the
# official spec
if proto == 'http' or proto == 'https':
tls = proto == 'https'
proto = 'tcp'
elif proto == 'http+unix':
proto = 'unix'
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
"Invalid bind address protocol: {}".format(addr)
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if any([
parsed_url.params, parsed_url.query, parsed_url.fragment,
parsed_url.password
]):
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if parsed_url.path and proto == 'ssh':
raise errors.DockerException(
'Invalid bind address format: no path allowed for this protocol:'
' {}'.format(addr)
)
else:
path = parsed_url.path
if proto == 'unix' and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = '/'.join((parsed_url.hostname, path))
if proto in ('tcp', 'ssh'):
# parsed_url.hostname strips brackets from IPv6 addresses,
# which can be problematic hence our use of splitnport() instead.
host, port = splitnport(parsed_url.netloc)
if port is None or port < 0:
if proto != 'ssh':
raise errors.DockerException(
'Invalid bind address format: port is required:'
' {}'.format(addr)
)
port = 22
if not host:
host = DEFAULT_HTTP_HOST
# Rewrite schemes to fit library internals (requests adapters)
if proto == 'tcp':
proto = 'http{}'.format('s' if tls else '')
elif proto == 'unix':
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
return "{}://{}".format(proto, path).rstrip('/')
return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, str):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def normalize_links(links):
if isinstance(links, dict):
links = iter(links.items())
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
line = line.strip()
if not line:
continue
parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, bytes):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
'{} {}'.format(v, k) for k, v in sorted(iter(extra_hosts.items()))
]
return [
'{}:{}'.format(k, v) for k, v in sorted(iter(extra_hosts.items()))
]
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
'docker.types.HostConfig object instead.'
)
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
# generated by datamodel-codegen:
# filename: airbyte_protocol.yaml
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyUrl, BaseModel, Extra, Field
class Type(Enum):
RECORD = "RECORD"
STATE = "STATE"
LOG = "LOG"
SPEC = "SPEC"
CONNECTION_STATUS = "CONNECTION_STATUS"
CATALOG = "CATALOG"
class AirbyteRecordMessage(BaseModel):
class Config:
extra = Extra.allow
stream: str = Field(..., description="the name of this record's stream")
data: Dict[str, Any] = Field(..., description="the record data")
emitted_at: int = Field(
...,
description="when the data was emitted from the source. epoch in millisecond.",
)
namespace: Optional[str] = Field(None, description="the namespace of this record's stream")
class AirbyteStateMessage(BaseModel):
class Config:
extra = Extra.allow
data: Dict[str, Any] = Field(..., description="the state data")
class Level(Enum):
FATAL = "FATAL"
ERROR = "ERROR"
WARN = "WARN"
INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
class AirbyteLogMessage(BaseModel):
class Config:
extra = Extra.allow
level: Level = Field(..., description="the type of logging")
message: str = Field(..., description="the log message")
class Status(Enum):
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
class AirbyteConnectionStatus(BaseModel):
class Config:
extra = Extra.allow
status: Status
message: Optional[str] = None
class SyncMode(Enum):
full_refresh = "full_refresh"
incremental = "incremental"
class DestinationSyncMode(Enum):
append = "append"
overwrite = "overwrite"
append_dedup = "append_dedup"
class OAuth2Specification(BaseModel):
class Config:
extra = Extra.allow
rootObject: Optional[List[Union[str, int]]] = Field(
None,
description="A list of strings representing a pointer to the root object which contains any oauth parameters in the ConnectorSpecification.\nExamples:\nif oauth parameters were contained inside the top level, rootObject=[] If they were nested inside another object {'credentials': {'app_id' etc...}, rootObject=['credentials'] If they were inside a oneOf {'switch': {oneOf: [{client_id...}, {non_oauth_param]}}, rootObject=['switch', 0] ",
)
oauthFlowInitParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject needed to obtain the initial refresh/access tokens for the OAuth flow. Each inner array represents the path in the rootObject of the referenced field. For example. Assume the rootObject contains params 'app_secret', 'app_id' which are needed to get the initial refresh token. If they are not nested in the rootObject, then the array would look like this [['app_secret'], ['app_id']] If they are nested inside an object called 'auth_params' then this array would be [['auth_params', 'app_secret'], ['auth_params', 'app_id']]",
)
oauthFlowOutputParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject which can be populated from successfully completing the oauth flow using the init parameters. This is typically a refresh/access token. Each inner array represents the path in the rootObject of the referenced field.",
)
class AuthType(Enum):
oauth2_0 = "oauth2.0"
class AuthSpecification(BaseModel):
auth_type: Optional[AuthType] = None
oauth2Specification: Optional[OAuth2Specification] = Field(
None,
description="If the connector supports OAuth, this field should be non-null.",
)
class AuthFlowType(Enum):
oauth2_0 = "oauth2.0"
oauth1_0 = "oauth1.0"
class OAuthConfigSpecification(BaseModel):
oauth_user_input_from_connector_config_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations used as input to OAuth.\nMust be a valid non-nested JSON that refers to properties from ConnectorSpecification.connectionSpecification\nusing special annotation 'path_in_connector_config'.\nThese are input values the user is entering through the UI to authenticate to the connector, that might also shared\nas inputs for syncing data via the connector.\n\nExamples:\n\nif no connector values is shared during oauth flow, oauth_user_input_from_connector_config_specification=[]\nif connector values such as 'app_id' inside the top level are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['app_id']\n }\n }\nif connector values such as 'info.app_id' nested inside another object are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['info', 'app_id']\n }\n }",
)
complete_oauth_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations produced by the OAuth flows as they are\nreturned by the distant OAuth APIs.\nMust be a valid JSON describing the fields to merge back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_output_specification={\n refresh_token: {\n type: string,\n path_in_connector_config: ['credentials', 'refresh_token']\n }\n }",
)
complete_oauth_server_input_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations.\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nserver when completing an OAuth flow (typically exchanging an auth code for refresh token).\n\nExamples:\n\n complete_oauth_server_input_specification={\n client_id: {\n type: string\n },\n client_secret: {\n type: string\n }\n }",
)
complete_oauth_server_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations that\nalso need to be merged back into the connector configuration at runtime.\nThis is a subset configuration of `complete_oauth_server_input_specification` that filters fields out to retain only the ones that\nare necessary for the connector to function with OAuth. (some fields could be used during oauth flows but not needed afterwards, therefore\nthey would be listed in the `complete_oauth_server_input_specification` but not `complete_oauth_server_output_specification`)\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nconnector when using OAuth flow APIs.\nThese fields are to be merged back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_server_output_specification={\n client_id: {\n type: string,\n path_in_connector_config: ['credentials', 'client_id']\n },\n client_secret: {\n type: string,\n path_in_connector_config: ['credentials', 'client_secret']\n }\n }",
)
class AirbyteStream(BaseModel):
class Config:
extra = Extra.allow
name: str = Field(..., description="Stream's name.")
json_schema: Dict[str, Any] = Field(..., description="Stream schema using Json Schema specs.")
supported_sync_modes: Optional[List[SyncMode]] = None
source_defined_cursor: Optional[bool] = Field(
None,
description="If the source defines the cursor field, then any other cursor field inputs will be ignored. If it does not, either the user_provided one is used, or the default one is used as a backup.",
)
default_cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. If not provided by the source, the end user will have to specify the comparable themselves.",
)
source_defined_primary_key: Optional[List[List[str]]] = Field(
None,
description="If the source defines the primary key, paths to the fields that will be used as a primary key. If not provided by the source, the end user will have to specify the primary key themselves.",
)
namespace: Optional[str] = Field(
None,
description="Optional Source-defined namespace. Currently only used by JDBC destinations to determine what schema to write to. Airbyte streams from the same sources should have the same namespace.",
)
class ConfiguredAirbyteStream(BaseModel):
class Config:
extra = Extra.allow
stream: AirbyteStream
sync_mode: SyncMode
cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. This field is REQUIRED if `sync_mode` is `incremental`. Otherwise it is ignored.",
)
destination_sync_mode: DestinationSyncMode
primary_key: Optional[List[List[str]]] = Field(
None,
description="Paths to the fields that will be used as primary key. This field is REQUIRED if `destination_sync_mode` is `*_dedup`. Otherwise it is ignored.",
)
class AdvancedAuth(BaseModel):
auth_flow_type: Optional[AuthFlowType] = None
predicate_key: Optional[List[str]] = Field(
None,
description="Json Path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.",
)
predicate_value: Optional[str] = Field(
None,
description="Value of the predicate_key fields for the advanced auth to be applicable.",
)
oauth_config_specification: Optional[OAuthConfigSpecification] = None
class ConnectorSpecification(BaseModel):
class Config:
extra = Extra.allow
documentationUrl: Optional[AnyUrl] = None
changelogUrl: Optional[AnyUrl] = None
connectionSpecification: Dict[str, Any] = Field(
...,
description="ConnectorDefinition specific blob. Must be a valid JSON string.",
)
supportsIncremental: Optional[bool] = Field(None, description="If the connector supports incremental mode or not.")
supportsNormalization: Optional[bool] = Field(False, description="If the connector supports normalization or not.")
supportsDBT: Optional[bool] = Field(False, description="If the connector supports DBT or not.")
supported_destination_sync_modes: Optional[List[DestinationSyncMode]] = Field(
None, description="List of destination sync modes supported by the connector"
)
authSpecification: Optional[AuthSpecification] = Field(None, description="deprecated, switching to advanced_auth instead")
advanced_auth: Optional[AdvancedAuth] = Field(
None,
description="Additional and optional specification object to describe what an 'advanced' Auth flow would need to function.\n - A connector should be able to fully function with the configuration as described by the ConnectorSpecification in a 'basic' mode.\n - The 'advanced' mode provides easier UX for the user with UI improvements and automations. However, this requires further setup on the\n server side by instance or workspace admins beforehand. The trade-off is that the user does not have to provide as many technical\n inputs anymore and the auth process is faster and easier to complete.",
)
class AirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[AirbyteStream]
class ConfiguredAirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[ConfiguredAirbyteStream]
class AirbyteMessage(BaseModel):
class Config:
extra = Extra.allow
type: Type = Field(..., description="Message type")
log: Optional[AirbyteLogMessage] = Field(
None,
description="log message: any kind of logging you want the platform to know about.",
)
spec: Optional[ConnectorSpecification] = None
connectionStatus: Optional[AirbyteConnectionStatus] = None
catalog: Optional[AirbyteCatalog] = Field(None, description="catalog message: the catalog")
record: Optional[AirbyteRecordMessage] = Field(None, description="record message: the record")
state: Optional[AirbyteStateMessage] = Field(
None,
description="schema message: the state. Must be the last message produced. The platform uses this information",
)
class AirbyteProtocol(BaseModel):
airbyte_message: Optional[AirbyteMessage] = None
configured_airbyte_catalog: Optional[ConfiguredAirbyteCatalog] = None
|
import os, collections, sqlite3
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from AsciiDammit import asciiDammit
app = Flask(__name__)
bootstrap = Bootstrap(app)
import util as wpu
configDict = {}
appDataDirDict = {}
appName = "waypointapp"
@app.route('/')
def index():
appNames = appDataDirDict.keys()
return render_template('index.html', appNames=appNames)
@app.route('/reportAppIndex/<appName>')
def reportAppIndex(appName):
'''
Lists the runs for the assay.
'''
answer = []
for app_name, app_dir in appDataDirDict.items():
if appName == app_name:
dirname, dirnames, filenames = next(os.walk(app_dir))
# ignore the folder named "scrap"
answer.extend([(app_name, run_id) for run_id in [x for x in dirnames if x != "scrap"]])
return render_template('reportAppIndex.html', app_name=appName, answer=answer)
@app.route('/report_app/<app_name>/<run_id>')
def report_app(app_name, run_id):
return reportHelper(appDataDirDict[app_name], run_id, app_name)
def reportHelper(localAppDatadir, run_id, app_name):
# list all files in the report folder
dirname, dirnames, filenames = next(os.walk(localAppDatadir+'/'+run_id))
filepaths = ["file://localhost/"+dirname+"/"+z for z in filenames ]
# identify all png files in the directory and encode it into database
images = [x for x in filenames if str(x).endswith('.png')]
imagepaths = [dirname+"/"+x for x in images]
imagetags = []
for ipath in imagepaths:
data_uri = open(ipath, 'rb').read().encode('base64').replace('\n', '')
img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
imagetags.append(img_tag)
# identify waypoint databases in the folder
databases = [dirname+'/'+x for x in filenames if str(x).endswith('waypoint.sqlite') ]
dbTables = collections.OrderedDict()
colnames = {}
if databases:
for db in databases:
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
tblNms = sorted([tblNm[0] for tblNm in c.fetchall()])
# reorder tblNms according to tableOrder
x = [d for d in configDict['applications'] if d['appName'] == app_name][0]
if x and 'tableOrder' in x.keys():
tableOrder = x['tableOrder']
tn_in_db = []
for tn in tableOrder:
if tn in tblNms:
tn_in_db.append(tn)
tblNms.remove(tn)
tblNms = tn_in_db + tblNms
tblTags= ["#%s"%tblNm for tblNm in tblNms]
# Iterate over individual tables and retrieve the row data for display
for tblNm in tblNms:
rowcount = [row for row in c.execute("SELECT count(*) row_count FROM %s"%tblNm)][0][0]
if rowcount < 500:
rows = c.execute('select * from %s'%tblNm)
# force ascii conversion for display
colnames[tblNm] = [asciiDammit(description[0]) for description in c.description]
dbTables[tblNm] = [[wpu.renderHtmlTableCell(x) for x in row] for row in rows]
conn.close()
return render_template('report.html', dbpaths=databases, run_id=run_id, tableNames=tblTags, filenames=filenames, filepaths=filepaths, imagetags=imagetags, dbTables=dbTables, colnames=colnames, app_name=app_name)
if __name__ == '__main__':
# read in the configuration file, then run the server
configDict, appDataDirDict = wpu.loadConfig(configFile = 'appconfig.json')
app.run(debug=True, host='0.0.0.0', port=5757)
|
"""
Created: 16 August 2018
Last Updated: 16 August 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Class for performing deep learning in pytorch
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT directly.
Instead, this is setup to use flat ntuples
that are accessed via uproot.
> UPROOT: https://github.com/scikit-hep/uproot
> KERAS: https://keras.io/
> TENSORFLOW: https://www.tensorflow.org/
> PYTORCH: http://pytorch.org/
> LWTNN: https://github.com/lwtnn/lwtnn
"""
import json
import util
import datetime
import collections
from deepLearning import DeepLearning
import uproot
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as tf
from torch.autograd import Variable
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve
class LeopardNet(nn.Module):
"""Neural Network for Leopard in PyTorch
Adapted from (16 August 2018)
https://github.com/thongonary/surf18-tutorial/blob/master/tuto-8-torch.ipynb
"""
def __init__(self,layers):
super(LeopardNet,self).__init__()
self.dense = nn.ModuleList()
for l,layer in enumerate(layers):
self.dense.append( nn.Linear(layer['in'],layer['out']) )
def forward(self, x):
"""All the computation steps of the input are defined in this function"""
nlayers = len(self.dense)
for i,d in enumerate(self.dense):
x = d(x)
x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x)
return x
class DeepLearningTorch(DeepLearning):
"""Deep Learning pytorch class"""
def __init__(self):
DeepLearning.__init__(self)
## PyTorch objects
self.loss_fn = None # pytorch loss function
self.torch_opt = None # pytorch optimizer
def initialize(self): #,config):
"""Initialize a few parameters after they've been set by user"""
DeepLearning.initialize(self)
return
## Specific functions to perform training/inference tasks
def build_model(self):
"""Construct the NN model -- only Keras support for now"""
self.msg_svc.INFO("DLPYTORCH : Build the neural network model")
## Declare the model
layers = []
layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} )
for i,n in enumerate(self.nNodes):
if i==len(self.nNodes)-1: continue
layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} )
layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} )
self.model = LeopardNet(layers)
self.model.cuda()
self.loss_fn = torch.nn.BCELoss()
self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) #1e-4)
return
def train_epoch(self,X,Y):
""""""
losses = []
for beg_i in range(0, len(X), self.batch_size):
x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:])
y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size])
x_batch = Variable(x_batch).cuda()
y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() # modify dimensions (X,) -> (X,1)
self.torch_opt.zero_grad()
y_hat = self.model(x_batch) # forward
loss = self.loss_fn(y_hat, y_batch) # compute loss
loss.backward() # compute gradients
self.torch_opt.step() # update weights
losses.append(loss.data.cpu().numpy())
return losses
def train_model(self):
"""Setup for training the model using k-fold cross-validation"""
X = self.df[self.features].values
Y = self.df['target'].values
kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)
nsplits = kfold.get_n_splits(X,Y)
cvpredictions = [] # compare outputs from each cross-validation
self.msg_svc.INFO("DLPYTORCH : Fitting K-Fold cross validations")
for ind,(train,test) in enumerate(kfold.split(X,Y)):
self.msg_svc.INFO("DLPYTORCH : - Fitting K-Fold {0}".format(ind))
Y_train = Y[train]
Y_test = Y[test]
# -- store test/train data from each k-fold as histograms (to compare later)
h_tests = {}
h_trains = {}
for n,v in self.targets.iteritems():
h_tests[n] = ROOT.TH1D("test_"+n,"test_"+n,10,0,10)
h_trains[n] = ROOT.TH1D("train_"+n,"train_"+n,10,0,10)
# fill histogram for each target
for (n,v) in enumerate(self.targets.iteritems()):
[h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]]
[h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]]
## Fit the model to training data & save the history
self.model.train()
e_losses = []
for t in range(self.epochs):
e_losses += self.train_epoch(X[train],Y_train)
self.msg_svc.INFO("DLPYTORCH : Epoch {0} -- Loss {1}".format(t,e_losses[-1]))
self.histories.append(e_losses)
# evaluate the model
self.msg_svc.DEBUG("DLPYTORCH : Evaluate the model: ")
self.model.eval()
# Evaluate training sample
self.msg_svc.INFO("DLPYTORCH : Predictions from training sample")
train_predictions = self.predict(X[train])
self.train_predictions.append(train_predictions)
# Evaluate test sample
self.msg_svc.INFO("DLPYTORCH : Predictions from testing sample")
test_predictions = self.predict(X[test])
self.test_predictions.append(test_predictions)
# Make ROC curve from test sample
self.msg_svc.INFO("DLPYTORCH : Make ROC curves")
fpr,tpr,_ = roc_curve(Y[test], test_predictions)
self.fpr.append(fpr)
self.tpr.append(tpr)
# Plot the predictions to compare test/train
self.msg_svc.INFO("DLPYTORCH : Plot the train/test predictions")
self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets
self.msg_svc.INFO("DLPYTORCH : Finished K-Fold cross-validation: ")
self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}
self.msg_svc.INFO("DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions)))
return
def predict(self,data=None):
"""Return the prediction from a test sample"""
self.msg_svc.DEBUG("DLPYTORCH : Get the DNN prediction")
if data is None:
self.msg_svc.ERROR("DLPYTORCH : predict() given NoneType data. Returning -999.")
return -999.
data = torch.from_numpy(data)
return self.model( Variable(data,volatile=True).cuda() )
def load_model(self,from_lwtnn=False):
"""Load existing model to make plots or predictions"""
output = self.output_dir+'/'+self.model_name
self.model.load_state_dict(torch.load(output))
self.model.eval()
return
def save_model(self,to_lwtnn=False):
"""Save the model for use later"""
output = self.output_dir+'/'+self.model_name
torch.save(self.model.state_dict(),output)
return
## THE END ##
|
import torch
import os.path as osp
import sys
from torch.autograd import Variable
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, cur_dir)
import torch_nndistance as NND
p1 = torch.rand(10, 1000, 3)
p2 = torch.rand(10, 1500, 3)
points1 = Variable(p1, requires_grad=True)
points2 = p2
points1 = points1.cuda()
print(points1.requires_grad)
points2 = points2.cuda()
dist1, dist2 = NND.nnd(points1, points2)
print(dist1, dist2)
loss = torch.sum(dist1)
print("loss", loss)
loss.backward()
print(points1.grad, points2.grad)
print("====================")
points1 = Variable(p1.cuda(), requires_grad=True)
points2 = p2.cuda()
dist1, dist2 = NND.nnd(points1, points2)
print(dist1, dist2)
loss = torch.sum(dist1)
print("loss", loss)
loss.backward()
print(points1.grad, points2.grad)
|
from geographiclib.geodesic import Geodesic
from pyproj import CRS, Transformer
from .geometry import Vector, Line
def azimuth(p1: Vector, p2: Vector):
""":return: azimuth of geodesic through p1 and p2 in p1 with WGS84"""
res = Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)
return res['azi1']
def dist_m(a, b):
"""
:param a: lon lat point
:param b: lon lat point
:return: distance between a and b in meters
"""
res = Geodesic.WGS84.Inverse(a.y, a.x, b.y, b.x)
return res['s12']
def mercator_project(origin: Vector, azimuth, points: [Vector], ellps='WGS84'):
"""
Perform a oblique mercator projection of a given list of points with the
pseudoequator defined by the given line.
Formulas from DOI 10.3133/pp1395 p.69 (Map projections: A working manual)
:param origin: (lon, lat) that will become (0, 0) in projection
:param azimuth: azimuth in degrees of origin defining the direction of the
geodesic that becomes the new equator (y=0) in projection
:param points: iterable of (lon,lat) Vector instance
:param ellps: proj ellipsoid identifier for ellipsoid to use as model for
the globe. Defaults to WGS84.
:return: iterable of (x, y) Vector instances in the coordinate system with
unit 1 meter
"""
base = CRS.from_user_input(4326)
mercator = CRS(f'+proj=omerc +lonc={origin.x} +lat_0={origin.y} '
f'+alpha={azimuth} +gamma=0 +ellps={ellps}')
t = Transformer.from_crs(base, mercator)
for p in points:
res = t.transform(p.y, p.x)
yield Vector(res[1], res[0])
|
from enum import Enum
class Color(Enum):
red = 1
green = 2
blue = 3
print(Color.red.name, Color.red.name.upper())
print(Color.red.name.<warning descr="Unresolved attribute reference 'foo' for class 'str'">foo</warning>)
print(Color.red.value.<warning descr="Unresolved attribute reference 'foo' for class 'int'">foo</warning>)
print(Color.red.<warning descr="Unresolved attribute reference 'foo' for class 'Color'">foo</warning>)
print(Color.__members__.items())
print(Color.__members__.<warning descr="Unresolved attribute reference 'foo' for class 'dict'">foo</warning>)
|
"""
Segment_tree creates a segment tree with a given array and function,
allowing queries to be done later in log(N) time
function takes 2 values and returns a same type value
"""
class SegmentTree:
def __init__(self, arr, function):
self.segment = [0 for x in range(3 * len(arr) + 3)]
self.arr = arr
self.fn = function
self.maketree(0, 0, len(arr) - 1)
def make_tree(self, i, l, r):
if l == r:
self.segment[i] = self.arr[l]
elif l < r:
self.make_tree(2 * i + 1, l, int((l + r) / 2))
self.make_tree(2 * i + 2, int((l + r) / 2) + 1, r)
self.segment[i] = self.fn(self.segment[2 * i + 1], self.segment[2 * i + 2])
def __query(self, i, L, R, l, r):
if l > R or r < L or L > R or l > r:
return None
if L >= l and R <= r:
return self.segment[i]
val1 = self.__query(2 * i + 1, L, int((L + R) / 2), l, r)
val2 = self.__query(2 * i + 2, int((L + R + 2) / 2), R, l, r)
print(L, R, " returned ", val1, val2)
if val1 != None:
if val2 != None:
return self.fn(val1, val2)
return val1
return val2
def query(self, L, R):
return self.__query(0, 0, len(self.arr) - 1, L, R)
"""
Example -
mytree = SegmentTree([2,4,5,3,4],max)
mytree.query(2,4)
mytree.query(0,3) ...
mytree = SegmentTree([4,5,2,3,4,43,3],sum)
mytree.query(1,8)
...
"""
|
import unittest
from rfapi.error import JsonParseError, MissingAuthError
class ApiClientTest(unittest.TestCase):
def test_json_parse_error(self):
resp = type('', (object,), {"content": ""})()
msg = "Could not parse"
e = JsonParseError(msg, resp)
self.assertEqual(str(e), msg)
def test_missing_auth_error(self):
e = MissingAuthError()
self.assertTrue("API" in str(e))
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def is_valid(array, exception=False):
pass
# """
# Args:
# array (#ak.Array, #ak.Record, #ak.layout.Content, #ak.layout.Record, #ak.ArrayBuilder, #ak.layout.ArrayBuilder):
# Array or record to check.
# exception (bool): If True, validity errors raise exceptions.
# Returns True if there are no errors and False if there is an error.
# Checks for errors in the structure of the array, such as indexes that run
# beyond the length of a node's `content`, etc. Either an error is raised or
# the function returns a boolean.
# See also #ak.validity_error.
# """
# out = validity_error(array, exception=exception)
# return out is None
|
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import os,sys,inspect
import imageio
sys.path.insert(1, os.path.join(sys.path[0], '..')) #go up a dir to import
import CodePy2.funmath as funmath
#import imageio
n = 1.0
sizes = [i/n for i in range(33*int(n))]
xvals = sizes
filenames = []
for expectedlength in sizes:
yvals = []
fig = plt.figure()
for i in sizes:
variance = 1
strength = 1
yvals.append(funmath.getnormval(i,expectedlength,strength,variance))
maxval = mlab.normpdf(expectedlength, expectedlength, np.sqrt(variance))
#yvals[-1] = yvals[-1]*strength/maxval
plt.plot(xvals,yvals)
plt.grid(True)
plt.ylabel('Adjusted weight (A)')
plt.xlabel('Manhatten distance (M)')
plt.axis([0, 30, 0, 30])
plt.title('Gaussian adjusted matching distances')
plt.suptitle('variance = '+str(variance)+', w = '+str(expectedlength))
filename = 'gaussian/'+'gaussian-'+str(int(expectedlength*n))+'.png'
plt.savefig(filename)
filenames.append(filename)
plt.close()
#plt.show()
#os.system("avconv -y -f image2 -i figs/gaussian-%d.png -r 10 -s 800x600 gaussianvideo.avi")
#turn into gif
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('xbar_demo.gif', images)
|
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.utils import clones
class LayerNormGoogle(nn.Module):
def __init__(self, features, epsilon=1e-6):
super(LayerNormGoogle, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.epsilon) + self.b_2
class EncoderBlockGoogle(nn.Module):
def __init__(self, layer, num_layers):
super(EncoderBlockGoogle, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = LayerNormGoogle(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class ResidualConnectionGoogle(nn.Module):
def __init__(self, size, keep_prob):
super(ResidualConnectionGoogle, self).__init__()
self.norm = LayerNormGoogle(size)
# TODO: Use dropout interface
self.dropout = nn.Dropout(keep_prob)
def forward(self, input, sublayer):
return input + self.dropout(sublayer(self.norm(input)))
class EncoderLayerGoogle(nn.Module):
def __init__(self, size, attention, feed_forward, keep_prob):
super(EncoderLayerGoogle, self).__init__()
self.size = size
self.attention = attention
self.feed_forward = feed_forward
# Each encoder layer has two sublayers
self.sublayer = clones(ResidualConnectionGoogle(size, keep_prob), 2)
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.attention(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class EncoderClassifier(nn.Module):
def __init__(self, embedding, encoder, classifier, device, is_average=True):
super(EncoderClassifier, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.classifier = classifier
self.device = device
self.is_average = is_average
def forward(self, x, mask=None):
kl_loss = torch.Tensor([0.0])
# Initial x.size() = [length, batch_size]
x = x.permute(1, 0)
# After permute x.size = [batch_size, length]
x = self.embedding(x)
if "cuda" in str(self.device):
x = x.cuda()
kl_loss = kl_loss.cuda()
x = self.encoder(x, mask)
if self.is_average:
# Averaged sentence representation
x = torch.mean(x, dim=1)
x = self.classifier(x)
return x, kl_loss
class Classifier(nn.Module):
def __init__(self, d_model, d_hidden, num_classes, keep_prob):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(d_hidden, num_classes)
def forward(self, x):
x = self.dropout(self.relu(self.linear1(x)))
x = self.linear2(x)
return x
class MultiHeadedAttentionGoogle(nn.Module):
def __init__(self, heads=8, d_model=512, keep_prob=0.1):
super(MultiHeadedAttentionGoogle, self).__init__()
assert d_model % heads == 0
self.d_k = d_model // heads
self.heads = heads
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(keep_prob)
def attention(self, query, key, value, mask=None):
# Dot product attention
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
num_batches = query.size(0)
if mask is not None:
mask = mask.unsqueeze(1)
# Apply linear projection on the input sequence and split the heads.
query, key, value = [linear(x).view(num_batches, -1, self.heads, self.d_k).transpose(1, 2)
for linear, x in zip(self.linears, (query, key, value))]
# Apply attention on the projected and splitted vectors
x, self.attn = self.attention(query, key, value, mask=mask)
# Concat vectors and apply linear
x = x.transpose(1, 2).contiguous().view(num_batches, -1, self.heads * self.d_k)
return self.linears[-1](x)
class PositionalFeedForwardGoogle(nn.Module):
def __init__(self, d_model, d_ff, keep_prob=0.1):
super(PositionalFeedForwardGoogle, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
def forward(self, input):
return self.w_2(self.dropout(self.relu(self.w_1(input))))
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, padding_id, use_pretrained_embed, pretrained_weights,
optional_sqrt_mul=False):
super(Embeddings, self).__init__()
# Initialize embeddings
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_id).cpu()
if use_pretrained_embed:
self.embedding.from_pretrained(pretrained_weights)
self.embed_dim = embed_dim
self.optional_sqrt_mul = optional_sqrt_mul
def forward(self, input):
if self.optional_sqrt_mul:
return self.embedding(input) * math.sqrt(self.embed_dim)
else:
return self.embedding(input)
class PositionalEncodingGoogle(nn.Module):
def __init__(self, d_model, keep_prob=0.1, max_len=5000):
super(PositionalEncodingGoogle, self).__init__()
self.dropout = nn.Dropout(keep_prob)
positional_encoding = torch.zeros(max_len, d_model)
pos = torch.arange(0., max_len).unsqueeze(1)
# Log space
div_term = torch.exp(torch.arange(0., d_model, 2) * (-math.log(10000) / d_model))
positional_encoding[:, 0::2] = torch.sin(pos * div_term)
positional_encoding[:, 1::2] = torch.cos(pos * div_term)
positional_encoding = positional_encoding.unsqueeze(0)
self.register_buffer("pe", positional_encoding)
def forward(self, input):
return self.dropout(input + Variable(self.pe[:, :input.size(1)], requires_grad=False))
class TransformerGoogle:
def __init__(self, args):
super(TransformerGoogle, self).__init__()
self.args_common = args["common_model_properties"]
self.args_specific = args["transformer_google"]
# Device
self.device = self.args_common["device"]
# Input/Output dimensions
self.vocab_size = self.args_common["vocab_size"]
self.embed_dim = self.args_common["embed_dim"]
self.num_class = self.args_common["num_class"]
# Embedding parameters
self.padding_id = self.args_common["padding_id"]
# Condition parameters
self.use_pretrained_embed = self.args_common["use_pretrained_embed"]
self.use_embed_sqrt_mul = self.args_specific["use_embed_sqrt_mul"]
# Pretrained embedding weights
self.pretrained_weights = self.args_common["pretrained_weights"]
# Dropout probabilities for each individual part of the full model.
self.keep_prob_encoder = self.args_specific["keep_prob_encoder"]
self.keep_prob_pe = self.args_specific["keep_prob_pe"]
self.kee_prob_pff = self.args_specific["keep_prob_pff"]
self.keep_prob_attn = self.args_specific["keep_prob_attn"]
self.keep_prob_clf = self.args_specific["keep_prob_clf"]
# Condition parameter for the transformer type (It only supports classification for now)
self.transformer_type = self.args_specific["transformer_type"]
# Number of parallel attention layers for MultiHeadedAttention
self.heads = self.args_specific["heads"]
# Number of encoder layers
self.num_encoder_layers = self.args_specific["num_encoder_layers"]
# Number of hidden count units for Position-Wise Feed-Forward Network
self.num_hidden_pos_ff = self.args_specific["num_hidden_pos_ff"]
# Maximum length of an input
self.max_length = self.args_specific["max_length"]
if self.transformer_type == "classifier":
self.model = self.create_classifier_transformer()
else:
raise ValueError("Transformer can be created as classifier for now!")
def create_classifier_transformer(self):
c = copy.deepcopy
# Initialize individual parts of the full model
# attention = torch.nn.MultiheadAttention(num_heads=self.heads, embed_dim=self.embed_dim,
# dropout=self.keep_prob_attn)
attention = MultiHeadedAttentionGoogle(heads=self.heads, d_model=self.embed_dim, keep_prob=self.keep_prob_attn)
ff = PositionalFeedForwardGoogle(d_model=self.embed_dim, d_ff=self.num_hidden_pos_ff,
keep_prob=self.kee_prob_pff)
embeddings = Embeddings(self.embed_dim, self.vocab_size, self.padding_id, self.use_pretrained_embed,
self.pretrained_weights, optional_sqrt_mul=self.use_embed_sqrt_mul)
positional_embeddings = PositionalEncodingGoogle(d_model=self.embed_dim, keep_prob=self.keep_prob_pe,
max_len=self.max_length)
# Initialize the full model
model = EncoderClassifier(nn.Sequential(embeddings, c(positional_embeddings)),
EncoderBlockGoogle(
EncoderLayerGoogle(self.embed_dim, c(attention), c(ff), self.keep_prob_encoder),
self.num_encoder_layers),
Classifier(self.embed_dim, d_hidden=self.embed_dim // 2, num_classes=self.num_class,
keep_prob=self.keep_prob_clf),
device=self.device)
# Initialize model parameters
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
if __name__ == '__main__':
print("Transformer tests")
plt.figure(figsize=(15, 5))
pe = PositionalEncodingGoogle(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
plt.show()
|
from tensorflow import keras
from constants import TRADING_DAYS_PER_WEEK, INDEX_RETURN_INDICATOR_NUMBER
from ..constants import *
MODEL_NAME = 'ifcp_model_ver1_2'
ROLLING_WINDOW_SIZE = TRADING_DAYS_PER_WEEK
def build_model():
fund1_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_RETURN_NAME)
fund1_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_BENCHMARK_RETURN_NAME)
fund2_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_RETURN_NAME)
fund2_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_BENCHMARK_RETURN_NAME)
fund1_performance = keras.layers.subtract([fund1_return, fund1_benchmark_return], name='fund1_performance')
fund2_performance = keras.layers.subtract([fund2_return, fund2_benchmark_return], name='fund2_performance')
fund1_attributes = keras.layers.concatenate(
[fund1_return, fund1_benchmark_return, fund1_performance], name='fund1_attributes')
fund2_attributes = keras.layers.concatenate(
[fund2_return, fund2_benchmark_return, fund2_performance], name='fund2_attributes')
fund_attributes_gru = keras.layers.GRU(
12,
kernel_regularizer=keras.regularizers.l2(0.01),
recurrent_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01),
name='fund_attributes_gru',
)
fund1_attributes_after_gru = fund_attributes_gru(fund1_attributes)
fund2_attributes_after_gru = fund_attributes_gru(fund2_attributes)
fund_attributes_after_gru = keras.layers.concatenate(
[fund1_attributes_after_gru, fund2_attributes_after_gru], name='fund_attributes_after_gru')
auxiliary_output = keras.layers.Dense(1, activation='sigmoid', name=AUXILIARY_OUTPUT_NAME)(
fund_attributes_after_gru)
index_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, INDEX_RETURN_INDICATOR_NUMBER), name=INDEX_RETURN_NAME)
index_return_gru = keras.layers.GRU(
35,
kernel_regularizer=keras.regularizers.l2(0.01),
recurrent_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01),
name='index_return_gru',
)
index_return_after_gru = index_return_gru(index_return)
merge = keras.layers.concatenate([fund_attributes_after_gru, index_return_after_gru], name='merge')
x = keras.layers.Dense(64, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(merge)
x = keras.layers.Dense(64, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(x)
x = keras.layers.Dense(16, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(x)
main_output = keras.layers.Dense(1, activation='sigmoid', name=MAIN_OUTPUT_NAME)(x)
model = keras.Model(inputs=[
fund1_return, fund1_benchmark_return, fund2_return, fund2_benchmark_return, index_return],
outputs=[main_output, auxiliary_output])
return model
|
# flake8: noqa
from fugue.extensions.creator.creator import Creator
from fugue.extensions.creator.convert import creator, _to_creator
|
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
import time
import tempfile
import subprocess
class Process:
def __init__(self, command):
self._command = command
self._process = None
self._error = None
self._out_file = None
self._out_path = None
self._out_fd = None
print(command)
def start(self):
self._out_fd, self._out_path = tempfile.mkstemp(prefix='rfproc_', suffix='.txt', text=True)
self._out_file = open(self._out_path)
try:
self._process = subprocess.Popen(self._command, stdout=self._out_fd,
stderr=subprocess.STDOUT)
except OSError as err:
self._error = str(err)
def is_finished(self):
return self._error is not None or self._process.poll() is not None
def stop(self):
self._process.kill()
def wait(self):
if self._process is not None:
self._process.wait()
def get_output(self, wait_until_finished=False):
"""Returns the output produced by the process.
If ``wait_until_finished`` is True, blocks until the process is
finished and returns all output. Otherwise the currently available
output is returned immediately.
Currently available output depends on buffering and might not include
everything that has been written by the process.
"""
if self._error:
self._close_outputs()
return self._error
if wait_until_finished:
self._process.wait()
output = self._out_file.read()
if self.is_finished():
self._close_outputs()
return output
def _close_outputs(self):
self._out_file.close()
os.close(self._out_fd)
self._remove_tempfile()
def _remove_tempfile(self, attempts=10):
try:
os.remove(self._out_path)
except OSError:
if not attempts:
raise
time.sleep(1)
self._remove_tempfile(attempts - 1)
|
"""Inventory requests."""
from collections import defaultdict
from typing import Any, MutableMapping
import requests
from linnapi.request import LinnworksAPIRequest
class GetStockItemIDsBySKU(LinnworksAPIRequest):
"""Return the stock item ID for a SKU."""
URL = "https://eu-ext.linnworks.net/api/Inventory/GetStockItemIdsBySKU"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
skus: list[str] = kwargs["skus"]
return {"request": {"SKUS": skus}}
class GetStockLevel(LinnworksAPIRequest):
"""Return the current stock level for a product by stock item ID."""
URL = "https://eu-ext.linnworks.net/api/Stock/GetStockLevel"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
stock_item_id: str = kwargs["stock_item_id"]
return {"stockItemId": stock_item_id}
class GetStockLevelBatch(LinnworksAPIRequest):
"""Return the stock level for multiple products by stock item ID."""
URL = "https://eu-ext.linnworks.net/api/Stock/GetStockLevel_Batch"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
stock_item_ids: list[str] = kwargs["stock_item_ids"]
return {"request": {"StockItemIDs": stock_item_ids}}
class SetStockLevelBySKU(LinnworksAPIRequest):
"""Update the stock level for a product."""
URL = "https://eu-ext.linnworks.net/api/Stock/UpdateStockLevelsBySKU"
METHOD = LinnworksAPIRequest.POST
@classmethod
def params(cls, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Return request URL parameters."""
return {"changeSource": str(kwargs["change_source"])}
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
location_id: str = kwargs["location_id"]
changes: tuple[tuple[str, int]] = kwargs["changes"]
stock_levels = [
{"SKU": str(sku), "LocationID": location_id, "Level": int(level)}
for sku, level in changes
]
return {"stockLevels": stock_levels}
class AddImageToInventoryItem(LinnworksAPIRequest):
"""
Adds an image to a stock item.
Either `item_number` or `stock_item_id` must be passed.
Kwargs:
image_url (str): The URL of the image to be added.
item_number (str): The SKU of the product to add the image to.
stock_item_id (str): The ID (GUID) of the product to add the image to.
is_main (bool): Is the image the main image for the product.
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/AddImageToInventoryItem"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
item_number: str = kwargs.get("item_number", "")
stock_item_id: str = kwargs.get("stock_item_id", "")
is_main: bool = kwargs["is_main"]
image_url: str = kwargs["image_url"]
request_data = {
"IsMain": is_main,
"ImageUrl": image_url,
}
if not item_number and not stock_item_id:
raise ValueError("Either `stock_item_id` or `sku` must be passed.")
if item_number:
request_data["ItemNumber"] = item_number
if stock_item_id:
request_data["StockItemId"] = stock_item_id
return {"request": request_data}
class UpdateImages(LinnworksAPIRequest):
"""
Update properties on images.
Kwargs:
row_id (str): ID (GUID) of image, passed as "pkRowId". Required.
stock_item_id (str): The ID (GUID) of the stock item to which the image
belongs. Requred.
is_main (bool): Set weather the image is the main image or not, passed as "IsMain".
sort_order (int): The position of the image, passed as "SortOrder".
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/UpdateImages"
METHOD = LinnworksAPIRequest.POST
@classmethod
def item_json(cls, **kwargs: Any) -> dict[str, Any]:
"""Return request data for a single image."""
row_id = kwargs.get("row_id")
is_main = kwargs.get("is_main")
sort_order = kwargs.get("sort_order")
checksum_value = kwargs.get("checksum_value")
raw_checksum = kwargs.get("raw_checksum")
stock_item_id = kwargs.get("stock_item_id")
stock_item_int_id = kwargs.get("stock_item_id_int")
image_data = {
"pkRowId": row_id,
"IsMain": is_main,
"SortOrder": sort_order,
"ChecksumValue": checksum_value,
"RawChecksum": raw_checksum,
"StockItemId": stock_item_id,
"StockItemIntId": stock_item_int_id,
}
return {key: value for key, value in image_data.items() if value is not None}
@classmethod
def multi_json(
cls, requests: list[MutableMapping[Any, Any]]
) -> dict[str, Any] | list[Any]:
"""Return request JSON with multiple updates."""
return {"images": [cls.item_json(**request) for request in requests]}
@classmethod
def parse_response(
cls, response: requests.models.Response, *args: Any, **kwargs: Any
) -> str:
"""Parse the request response."""
return response.text
class GetInventoryItemImages(LinnworksAPIRequest):
"""
Use this call to Get inventory item images.
Args:
inventory_item_id (str): The ID (GUID) of the stock item to retrive images for,
passed as "InventoryItemId".
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/GetInventoryItemImages"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
inventory_item_id = kwargs.get("inventory_item_id")
return {"inventoryItemId": inventory_item_id}
class DeleteImagesFromInventoryItem(LinnworksAPIRequest):
"""
Remove an image from an inventory item.
Kwargs:
image_id (str): ID (GUID) of image, passed as "pkRowId". Required.
stock_item_id (str): The ID (GUID) of the stock item to which the image
belongs. Requred.
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/DeleteImagesFromInventoryItem"
METHOD = LinnworksAPIRequest.POST
@classmethod
def item_json(cls, **kwargs: Any) -> dict[str, Any]:
"""Return request data for a single image."""
stock_item_id = kwargs["stock_item_id"]
image_url = kwargs["image_url"]
return {stock_item_id: [image_url]}
@classmethod
def multi_json(
cls, requests: list[MutableMapping[Any, Any]]
) -> dict[str, Any] | list[Any]:
"""Return request JSON with multiple updates."""
stock_items = defaultdict(list)
for request in requests:
for key, images in cls.item_json(**request).items():
stock_items[key].extend(images)
return {"inventoryItemImages": dict(stock_items)}
@classmethod
def parse_response(
cls, response: requests.models.Response, *args: Any, **kwargs: Any
) -> str:
"""Parse the request response."""
return response.text
class GetItemChangesHistory(LinnworksAPIRequest):
"""Get the stock change history for an item.
Kwargs:
"""
URL = "https://eu-ext.linnworks.net/api/Stock/GetItemChangesHistory"
METHOD = LinnworksAPIRequest.POST
@classmethod
def params(cls, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Return request JSON post data."""
stock_item_id = kwargs.get("stock_item_id")
location_id = kwargs.get("location_id", "")
entries_per_page = kwargs.get("entries_per_page", 500)
page_number = kwargs.get("page_number", 1)
return {
"stockItemId": stock_item_id,
"locationId": location_id,
"entriesPerPage": entries_per_page,
"pageNumber": page_number,
}
|
from flask import Flask, render_template, request
from dashboard_forms import Dashform
#import create_pickle as p_j
import json
import os
app = Flask(__name__)
app.secret_key = 'dash_flask_key'
creddir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'credentials/dash_id.json')
# creddir_2 = os.path.join(os.path.dirname(
# os.path.dirname(os.path.realpath(__file__))), 'credentials')
tempdir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'www/templates/dash_id_template.json')
def Convert(string):
li = list(string.split(","))
k = []
for i in li:
str(i).replace(' ', '')
k.append(i)
return k
def formatting(string):
string = string.replace("[", "")
string = string.replace("]", "")
string = string.replace("'", "")
string = string.replace(" ", "")
return string
def json_exists(file_name):
return os.path.exists(file_name)
def getinfo():
data = []
if json_exists(creddir):
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
else:
with open(tempdir, "r") as f1, open(creddir, "w+") as f2:
f2.write(f1.read())
f2.close
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
def save_json(res):
with open(creddir, 'r') as f:
data = json.load(f)
data["Transit"]["T_URL"] = res["T_URL"]
data["Transit"]["T_API_KEY"] = res["T_API_KEY"]
data["Transit"]["Stops"] = Convert(res["Stops"])
data["Transit"]["T_BUS"] = res["T_BUS"]
data["Transit"]["T_BUS_TIME"] = res["T_BUS_TIME"]
data["Weather"]["W_URL"] = res["W_URL"]
data["Weather"]["UNITS"] = res["UNITS"]
data["Weather"]["W_API_KEY"] = res["W_API_KEY"]
data["Geolocation"]["G_URL"] = res["G_URL"]
data["Geolocation"]["G_API_KEY"] = res["G_API_KEY"]
data["Currency"]["C_URL_1"] = res["C_URL_1"]
data["Currency"]["C_API_KEY_1"] = res["C_API_KEY_1"]
data["Currency"]["C_URL_3"] = res["C_URL_3"]
data["Currency"]["C_URL_4"] = res["C_URL_4"]
data["Currency"]["CURR_CHECK"] = Convert(res["CURR_CHECK"])
data["Stocks"]["STOCK_W_URL"] = res["STOCK_W_URL"]
data["Stocks"]["STOCK_WE_URL"] = res["STOCK_WE_URL"]
data["Stocks"]["STOCK_API"] = res["STOCK_API"]
data["Stocks"]["STOCK_CHECK"] = Convert(res["STOCK_CHECK"])
data["Tasklist"]["gsheet_json"] = res["gsheet_json"]
data["Tasklist"]["sheetname"] = res["sheetname"]
data["G_Meetings"]["CREDENTIALS_FILE"] = res["CREDENTIALS_FILE"]
data["News"]["NEWS_URL"] = res["NEWS_URL"]
data["News"]["NEWS_API"] = res["NEWS_API"]
data["News"]["NEWS_SOURCES"] = str(res["NEWS_SOURCES"]).replace(' ', '')
data["System"]["waking_time"] = res["waking_time"]
data["System"]["sleeping_time"] = res["sleeping_time"]
data["System"]["mod_1_choice"] = res["mod_1_choice"]
data["System"]["mod_2_choice"] = res["mod_2_choice"]
data["System"]["mod_3_choice"] = res["mod_3_choice"]
data["System"]["mod_4_choice"] = res["mod_4_choice"]
data["System"]["refresh_time"] = res["refresh_time"]
data["System"]["awake"] = res["awake"]
os.remove(creddir)
with open(creddir, 'w+') as f:
json.dump(data, f, indent=4)
@ app.route('/', methods=['POST', 'GET'])
def login():
form = Dashform()
d_data = getinfo()
form.res_msg.label = ""
if request.method == 'POST':
form.res_msg.label = ""
if request.form['btn'] == 'Submit':
results = request.form
save_json(results)
form.res_msg.label = "Information saved successfully"
'''elif request.form['btn'] == 'Generate Pickle File':
results = request.form
p_j.get_calendar_service(results["CREDENTIALS_FILE"], creddir_2)
'''
d_data = getinfo()
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
elif request.method == 'GET':
# populate the form on start
d_data = getinfo()
form.res_msg.label = ""
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@ app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
class BaseDatabaseClient:
"""Encapsulate backend-specific methods for opening a client shell."""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self, parameters):
raise NotImplementedError(
"subclasses of BaseDatabaseClient must provide a runshell() method"
)
|
"""
Unit tests for symupy.api.stream
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
import pytest
# ============================================================================
# INTERNAL IMPORTS
# ============================================================================
from symupy.runtime.logic.publisher import Publisher
from symupy.runtime.logic.subscriber import Subscriber
# ============================================================================
# TESTS AND DEFINITIONS
# ============================================================================
@pytest.fixture
def default_channel():
return ("default",)
@pytest.fixture
def channels():
return ("channel 1", "channel 2")
def test_default_constructor(default_channel):
p = Publisher()
assert p.channels == default_channel
def test_default_attach_observer(default_channel):
p = Publisher()
s = Subscriber(p)
assert p.channels == default_channel
assert p._channels[default_channel[0]][s] == s.update
def test_constructor_channels(channels):
p = Publisher(channels)
assert p.channels == channels
def test_attach_observer(channels):
p = Publisher(channels)
s = Subscriber(p, channels[0])
assert p.channels == channels
assert p._channels[channels[0]][s] == s.update
def test_attach_detach_observer(channels):
p = Publisher(channels)
s = Subscriber(p, channels[0])
assert p._channels[channels[0]][s] == s.update
def test_context_publisher(channels):
with Publisher(channels) as p:
s1 = Subscriber(p, channels[0])
s2 = Subscriber(p, channels[0])
p.dispatch(channels[0])
assert s1._call == 1
assert s2._call == 1
def test_context_observer(channels):
with Publisher(channels) as p:
with Subscriber(p, channels[0]), Subscriber(p, channels[1]):
p.dispatch(channels[0])
def test_context_dispatch(channels):
pass
|
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{
'includes': [
'../../common.gypi',
],
'targets': [
{
'target_name': 'ionimage_test',
'includes': [ '../../dev/test_target.gypi' ],
'sources' : [
'conversionutils_test.cc',
'ninepatch_test.cc',
'renderutils_test.cc',
],
'dependencies' : [
'image_tests_assets',
'<(ion_dir)/image/image.gyp:ionimage_for_tests',
'<(ion_dir)/base/base.gyp:ionbase_for_tests',
'<(ion_dir)/external/gtest.gyp:iongtest_safeallocs',
'<(ion_dir)/port/port.gyp:ionport',
'<(ion_dir)/gfx/gfx.gyp:iongfx_for_tests',
'<(ion_dir)/gfxutils/gfxutils.gyp:iongfxutils_for_tests',
'<(ion_dir)/portgfx/portgfx.gyp:ionportgfx_for_tests',
],
},
{
'target_name': 'image_tests_assets',
'type': 'static_library',
'includes': [
'../../dev/zipasset_generator.gypi',
],
'sources' : [
'data/images.iad',
],
'dependencies' : [
'<(ion_dir)/base/base.gyp:ionbase_for_tests',
],
},
],
}
|
exec(open("Modified_data/next_level.py").read())
|
from .base import lr
from . import het
from .merge import merge
from .permutation import permutation
|
# encoding: utf-8
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from dateutil.tz import tzlocal
from core.models import Event, Venue
from programme.models import ProgrammeEventMeta, TimeBlock, SpecialStartTime
class Command(BaseCommand):
args = ''
help = 'Setup tracon8 specific stuff'
def handle(*args, **options):
tz = tzlocal()
venue, unused = Venue.objects.get_or_create(name='Tampere-talo')
event, unused = Event.objects.get_or_create(slug='tracon8', defaults=dict(
name='Tracon 8',
name_genitive='Tracon 8 -tapahtuman',
name_illative='Tracon 8 -tapahtumaan',
name_inessive='Tracon 8 -tapahtumassa',
homepage_url='http://2013.tracon.fi',
organization_name='Tracon ry',
organization_url='http://ry.tracon.fi',
start_time=datetime(2013, 9, 14, 10, 0, tzinfo=tz),
end_time=datetime(2013, 9, 15, 18, 0, tzinfo=tz),
venue=venue,
))
admin_group_name = "{installation_name}-{event_slug}-programme-admins".format(
installation_name=settings.KOMPASSI_INSTALLATION_SLUG,
event_slug=event.slug,
)
admin_group, unused = Group.objects.get_or_create(name=admin_group_name)
programme_event_meta, unused = ProgrammeEventMeta.objects.get_or_create(event=event, defaults=dict(
public=True,
admin_group=admin_group
))
# v5
if not programme_event_meta.contact_email:
programme_event_meta.contact_email = 'ohjelma@tracon.fi'
programme_event_meta.save()
# v6
for start_time, end_time in [
(
datetime(2013, 9, 14, 11, 0, 0, tzinfo=tz),
datetime(2013, 9, 15, 1 , 0, 0, tzinfo=tz)
),
(
datetime(2013, 9, 15, 9 , 0, 0, tzinfo=tz),
datetime(2013, 9, 15, 17, 0, 0, tzinfo=tz)
)
]:
TimeBlock.objects.get_or_create(
event=event,
start_time=start_time,
defaults=dict(
end_time=end_time
)
)
SpecialStartTime.objects.get_or_create(
event=event,
start_time=datetime(2013, 9, 14, 10, 30, 0, tzinfo=tz),
)
|
import svm as SVM
import numpy as np
data_dict = { -1:np.array( [[10,9,1],
[2,8,1],
[3,8,1],]),
1:np.array( [[5,1,1],
[6,-1,1],
[7,3,1],])}
svm = SVM.Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10,1],
[1,3,1],
[3,4,1],
[3,5,1],
[5,5,1],
[5,6,1],
[6,-5,1],
[5,8,1]]
for p in predict_us:
svm.predict(p)
svm.visualize()
|
#!/usr/bin/python3
import configparser
config = configparser.ConfigParser()
config.read('eve-conf.ini')
def int_imp(inp):
while True:
try:
int(inp)
break
except ValueError:
print('Input has to be a number.')
inp = input('Select again: ')
return int(inp)
def section_select(config):
csections = config.sections()
for section in csections:
print('{:>2}. {}'.format(csections.index(section),section))
num = len(csections)
print('% 2.0f. View All' % (num))
num2 = num + 1
print('%- 2.0f. Save File' % (num2))
num3 = num2 + 1
print('% 2.0f. Exit' % (num3))
while True:
inp = input('Select section to edit/option: ')
inp = int_imp(inp)
print()
if inp == num:
print_conf(config)
break
elif inp == num2:
save_file(config)
break
elif inp == num3:
print('Editor Closed')
break
elif inp < 0 or inp > num3:
print('Try again')
else:
item_editor(config, csections[inp])
break
def menu():
print()
print('Menu')
print('{:>2}. Edit a Section'.format(0))
print('{:>2}. View File'.format(1))
print('{:>2}. Save File'.format(2))
print('{:>2}. Exit'.format(3))
while True:
inp = input('Select option: ')
inp = int_imp(inp)
print()
if inp == 0:
section_select(config)
break
elif inp == 1:
print_conf(config)
break
elif inp == 2:
save_file(config)
break
elif inp == 3:
print('Editor Closed')
break
elif inp < 0 or inp > 3:
print('Try again')
def print_conf(config):
csections = config.sections()
for section in csections:
print()
print('Section: %s' % (csections[csections.index(section)]))
items = config.items(csections[csections.index(section)])
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
menu()
def save_file(config):
with open('eve-conf.ini', 'w') as cfgfile:
config.write(cfgfile)
cfgfile.close()
print('Config Saved')
menu()
def item_editor(config, section):
csections = config.sections()
items = config.items(section)
print('Section: {}'.format(section))
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
print()
menu_b = items.index(item) + 1
print('{:>2}. Back'.format(menu_b))
inp2 = input('Select key to edit: ')
inp2 = int_imp(inp2)
if inp2 == menu_b:
menu()
elif inp2 < 0 or inp2 > menu_b:
print('Try Agin')
item_editor(config, section)
else:
inp2 = int_imp(inp2)
change = input('New value: ')
old_value = config[section][items[inp2][0]]
config.set(section,items[inp2][0],change)
print()
print('Section: %s' % (section))
items = config.items(section)
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
conf = input('Confim Change [y,N]: ')
if conf == 'y' or conf == 'Y':
print('Config File Edited.')
else:
config.set(section,items[inp2][0],old_value)
print('Config File Not Changed.')
print()
another = input('Edit another key in this section [y,N]: ')
if another == 'y' or another == 'Y':
print()
item_editor(config,section)
else:
menu()
section_select(config)
|
import argparse
import numpy as np
import cv2
from TauLidarCommon.frame import FrameType
from TauLidarCamera.camera import Camera
def setup(serialPort=None):
port = None
camera = None
# if no serial port is specified, scan for available Tau Camera devices
if serialPort is None:
ports = Camera.scan() ## Scan for available Tau Camera devices
if len(ports) > 0:
port = ports[0]
else:
port = serialPort
if port is not None:
Camera.setRange(0, 4500) ## points in the distance range to be colored
camera = Camera.open(port) ## Open the first available Tau Camera
camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0
camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000
camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80
cameraInfo = camera.info()
print("\nToF camera opened successfully:")
print(" model: %s" % cameraInfo.model)
print(" firmware: %s" % cameraInfo.firmware)
print(" uid: %s" % cameraInfo.uid)
print(" resolution: %s" % cameraInfo.resolution)
print(" port: %s" % cameraInfo.port)
print("\nPress Esc key over GUI or Ctrl-c in terminal to shutdown ...")
cv2.namedWindow('Depth Map')
cv2.namedWindow('Amplitude')
cv2.moveWindow('Depth Map', 20, 20)
cv2.moveWindow('Amplitude', 20, 360)
return camera
def run(camera):
while True:
frame = camera.readFrame(FrameType.DISTANCE_AMPLITUDE)
if frame:
mat_depth_rgb = np.frombuffer(frame.data_depth_rgb, dtype=np.uint16, count=-1, offset=0).reshape(frame.height, frame.width, 3)
mat_depth_rgb = mat_depth_rgb.astype(np.uint8)
mat_amplitude = np.frombuffer(frame.data_amplitude, dtype=np.float32, count=-1, offset=0).reshape(frame.height, frame.width)
mat_amplitude = mat_amplitude.astype(np.uint8)
# Upscalling the image
upscale = 4
depth_img = cv2.resize(mat_depth_rgb, (frame.width*upscale, frame.height*upscale))
amplitude_img = cv2.resize(mat_amplitude, (frame.width*upscale, frame.height*upscale))
cv2.imshow('Depth Map', depth_img)
cv2.imshow('Amplitude', amplitude_img)
if cv2.waitKey(1) == 27: break
def cleanup(camera):
print('\nShutting down ...')
cv2.destroyAllWindows()
camera.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sample program to demonstrate acquiring frames with both distance / depth and amplitude data from the Tau LiDAR Camera')
parser.add_argument('--port', metavar='<serial port>', default=None,
help='Specify a serial port for the Tau Camera')
args = parser.parse_args()
camera = setup(args.port)
if camera:
try:
run(camera)
except Exception as e:
print(e)
cleanup(camera)
|
# Created by Thomas Jones on 06/11/15 - thomas@tomtecsolutions.com
# branding.py, a plugin for minqlx to brand your server.
# This plugin is released to everyone, for any purpose. It comes with no warranty, no guarantee it works, it's released AS IS.
# You can modify everything, except for lines 1-4 and the !tomtec_versions code. They're there to indicate I whacked this together originally. Please make it better :D
"""
Branding.py is a minqlx plugin that permits you to personalise your server with your own information.
Simply put the plugin in the 'minqlx-plugins' folder, !load the plugin, and set these cvars:
qlx_serverBrandName - Where the map name usually appears, the text set in this cvar will appear instead.
qlx_serverBrandTopField - Where the map author credit (line 1) appears, the text set in this cvar will appear after the credit.
qlx_serverBrandBottomField - Where the map author credit (line 2) appears, the text set in this cvar will appear after the credit.
qlx_connectMessage - When the player is at the awaiting challenge screen when they first connect to the server, text will appear here.
qlx_loadedMessage - When the player gets to the menu after connecting, and clicks Join or Spectate, they'll get centre print from this cvar.
qlx_countdownMessage - When the countdown begins, this text will appear mid-screen. (like the qlx_loadedMessage does)
qlx_endOfGameMessage - When the game finishes, it'll put the text in this cvar in the text box on the left.
qlx_brandingPrependMapName - This cvar will put the map name before your qlx_serverBrandName. Default: 0
qlx_brandingAppendGameType - Will add the game type after your qlx_serverBrandName. Default: 0
qlx_rainbowBrandName - Make the entire map name (qlx_serverBrandName) appear in rainbow colouring. Default: 0
Once set, change maps, and you'll see the map loading screen is changed.
"""
import minqlx
class branding(minqlx.Plugin):
def __init__(self):
self.add_hook("new_game", self.brand_map)
self.add_hook("player_connect", self.player_connect)
self.add_hook("player_loaded", self.player_loaded)
self.add_hook("game_countdown", self.game_countdown)
self.add_hook("game_end", self.game_end)
self.add_command("tomtec_versions", self.cmd_showversion)
self.set_cvar_once("qlx_brandingPrependMapName", "0")
self.set_cvar_once("qlx_brandingAppendGameType", "0")
self.set_cvar_once("qlx_rainbowBrandName", "0")
self.plugin_version = "2.1"
self.playerConnectedYetList = []
def brand_map(self):
if self.get_cvar("qlx_serverBrandName") == None:
self.set_cvar("qlx_serverBrandName", self.game.map_title)
if self.get_cvar("qlx_brandingPrependMapName", bool):
topBranding = self.game.map_title + " " + self.get_cvar("qlx_serverBrandName")
else:
topBranding = self.get_cvar("qlx_serverBrandName")
if self.get_cvar("qlx_brandingAppendGameType", bool):
minqlx.set_configstring(3, topBranding + " " + self.game.type)
else:
minqlx.set_configstring(3, topBranding)
if self.get_cvar("qlx_serverBrandTopField") != None:
cs = self.game.map_subtitle1
if cs:
cs += " - "
minqlx.set_configstring(678, cs + (self.get_cvar("qlx_serverBrandTopField")))
if self.get_cvar("qlx_serverBrandBottomField") != None:
cs = self.game.map_subtitle2
if cs:
cs += " - "
minqlx.set_configstring(679, cs + (self.get_cvar("qlx_serverBrandBottomField")))
if self.get_cvar("qlx_rainbowBrandName", bool):
# Thanks Mino for this bit!
def rotating_colors():
i = 0
while True:
res = (i % 7) + 1
i += 1
yield res
map_name = self.clean_text(minqlx.get_configstring(3))
r = rotating_colors()
res = ""
for i in range(len(map_name)):
res += "^{}{}".format(next(r), map_name[i])
minqlx.set_configstring(3, res)
def player_connect(self, player):
if self.get_cvar("qlx_connectMessage") != None:
if player not in self.playerConnectedYetList:
self.playerConnectedYetList.append(player)
return "{}\n^7This server is running ^4branding.py^7. ^2http://github.com/tjone270/Quake-Live^7.\n".format(self.get_cvar("qlx_connectMessage"))
def player_loaded(self, player):
if self.get_cvar("qlx_loadedMessage") != None:
self.center_print(self.get_cvar("qlx_loadedMessage"), player.id)
try:
self.playerConnectedYetList.remove(player)
except:
return
def game_countdown(self):
if self.get_cvar("qlx_countdownMessage") != None:
self.center_print(self.get_cvar("qlx_countdownMessage"))
def game_end(self, data):
if self.get_cvar("qlx_endOfGameMessage") != None:
self.msg(self.get_cvar("qlx_endOfGameMessage"))
def cmd_showversion(self, player, msg, channel):
channel.reply("^4branding.py^7 - version {}, created by Thomas Jones on 06/11/2015.".format(self.plugin_version))
|
import os
from django.utils.translation import gettext_lazy as _
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
# SHOP_CURRENCY_LOCALE = ""
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (("en", _("English")),)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, MEDIA_URL.strip("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(PROJECT_ROOT, "templates")],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"loaders": [
"mezzanine.template.loaders.host_themes.Loader",
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
},
]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.messages",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
# "mezzanine.twitter",
# "mezzanine.accounts",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import imp
import sys
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been defined so far, in
# order to provide some better defaults where applicable.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
# HitObject class
class HitObject:
def __init__(self, start_x, start_y, end_x, end_y, time, object_type):
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
self.time = time
self.object_type = object_type # hit_circle, even_repeat_slider, odd_repeat_slider, spinner
# Finds the line number in which the hit objects start.
def find_start(lines):
line_number = 0
for x in lines:
if x == "[HitObjects]":
return line_number + 1
line_number += 1
# Converts a line from .osu file into HitObject.
def convert_hit_object(line):
split_line = line.split(",")
start_x = int(split_line[0])
start_y = int(split_line[1])
end_x = int(split_line[0])
end_y = int(split_line[1])
time = int(split_line[2])
if int(split_line[3]) & 0b1:
object_type = "hit_circle"
elif int(split_line[3]) & 0b1000:
object_type = "spinner"
elif int(split_line[6]) % 2 == 0:
object_type = "even_repeat_slider"
else:
object_type = "odd_repeat_slider"
slider_point_list = split_line[5].split("|")
end_point = slider_point_list[-1].split(":")
end_x = int(end_point[0])
end_y = int(end_point[1])
return HitObject(start_x, start_y, end_x, end_y, time, object_type)
# Finds distance snap by multiplying distance and time of two objects.
def calculate_distance_snap(first_object, second_object):
first_x = first_object.end_x
first_y = first_object.end_y
first_time = first_object.time
second_x = second_object.start_x
second_y = second_object.start_y
second_time = second_object.time
difference_x = abs(first_x - second_x)
difference_y = abs(first_y - second_y)
difference_time = second_time - first_time
calculation_time = difference_time
if difference_time < 100: # 2x bonus for objects unsingletappable (Detected as streams)
calculation_time = difference_time / 2.0
elif difference_time < 120: # For the grey spot around 300bpm which can be either jumps or streams.
calculation_time = difference_time / (((120 - difference_time) ** 2) / 400.0 + 1)
calculation_time = 1.0 / calculation_time
# 1/time has to be used for calculation as smaller time difference means bigger distance snap.
distance = (difference_x ** 2 + difference_y ** 2) ** 0.5
return distance * calculation_time
# Calculates weighting of objects.
def calculate_weighting(average_distance, max_distance, distance_snap):
second_half = max_distance - average_distance # used to calculate distance snap above the average
if distance_snap < average_distance:
raw_weight = (distance_snap / average_distance) / 2.0 # this is the raw weighting, range from 0 to 1
# if distance snap is under the average, put it somewhere between 0 and 0.5
else:
raw_weight = ((distance_snap - average_distance) / second_half) / 2.0 + 0.5
# if distance snap is above average, put it somewhere between 0.5 and 1
# spacing below ~0.67 is weighted just as much as spacing above it, so only relatively
# BIG jumps will make much of a difference
print (raw_weight * 1.5) ** 1.7
return (raw_weight * 1.5) ** 1.7
# Calculates nerf/buff based on percentage change from old objects.
def calculate_percentage_change(old_percentage):
if old_percentage < 0.65:
# Nerf all maps which reach under 65%.
# 55% would get around 5% nerf, while 50% would get around 10% nerf.
return 1 - (((0.65 - old_percentage) ** 1.5) / 0.524)
else:
return 1
|
r"""
Isomorphisms between Weierstrass models of elliptic curves
AUTHORS:
- Robert Bradshaw (2007): initial version
- John Cremona (Jan 2008): isomorphisms, automorphisms and twists
in all characteristics
"""
#*****************************************************************************
# Copyright (C) 2007 Robert Bradshaw <robertwb@math.washington.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.morphism import Morphism
from constructor import EllipticCurve
from sage.categories.homset import Hom
class baseWI:
r"""
This class implements the basic arithmetic of isomorphisms between
Weierstrass models of elliptic curves. These are specified by
lists of the form `[u,r,s,t]` (with `u\not=0`) which specifies a
transformation `(x,y) \mapsto (x',y')` where
`(x,y) = (u^2x'+r , u^3y' + su^2x' + t).`
INPUT:
- ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an isomorphism between Weierstrass models.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI()
(1, 0, 0, 0)
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
sage: R.<u,r,s,t>=QQ[]; baseWI(u,r,s,t)
(u, r, s, t)
"""
def __init__(self, u=1, r=0, s=0, t=0):
r"""
Constructor: check for valid parameters (defaults to identity)
INPUT:
- ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an isomorphism between Weierstrass models.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI()
(1, 0, 0, 0)
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
sage: R.<u,r,s,t>=QQ[]; baseWI(u,r,s,t)
(u, r, s, t)
"""
if u==0:
raise ValueError("u!=0 required for baseWI")
self.u=u; self.r=r; self.s=s; self.t=t
def __cmp__(self, other):
"""
Standard comparison function.
The ordering is just lexicographic on the tuple `(u,r,s,t)`.
.. note::
In a list of automorphisms, there is no guarantee that the
identity will be first!
EXAMPLE::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(1,2,3,4)==baseWI(1,2,3,4)
True
sage: baseWI(1,2,3,4)<baseWI(1,2,3,5)
True
sage: baseWI(1,2,3,4)>baseWI(1,2,3,4)
False
::
It will never return equality if other is of another type:
sage: baseWI() == 1
False
"""
if not isinstance(other, baseWI):
return cmp(type(self), type(other))
return cmp(self.tuple(), other.tuple())
def tuple(self):
r"""
Returns the parameters `u,r,s,t` as a tuple.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: u,r,s,t=baseWI(2,3,4,5).tuple()
sage: w=baseWI(2,3,4,5)
sage: u,r,s,t=w.tuple()
sage: u
2
"""
return (self.u,self.r,self.s,self.t)
def __mul__(self, other):
r"""
Returns the Composition of this isomorphism and another.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(1,2,3,4)*baseWI(5,6,7,8)
(5, 56, 22, 858)
sage: baseWI()*baseWI(1,2,3,4)*baseWI()
(1, 2, 3, 4)
"""
u1,r1,s1,t1=other.tuple()
u2,r2,s2,t2=self.tuple()
return baseWI(u1*u2,(u1**2)*r2+r1,u1*s2+s1,(u1**3)*t2+s1*(u1**2)*r2+t1)
def __invert__(self):
r"""
Returns the inverse of this isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: w=baseWI(2,3,4,5)
sage: ~w
(1/2, -3/4, -2, 7/8)
sage: w*~w
(1, 0, 0, 0)
sage: ~w*w
(1, 0, 0, 0)
sage: R.<u,r,s,t>=QQ[]; w=baseWI(u,r,s,t)
sage: ~w
(1/u, (-r)/u^2, (-s)/u, (r*s - t)/u^3)
sage: ~w*w
(1, 0, 0, 0)
"""
u,r,s,t=self.tuple()
return baseWI(1/u,-r/(u**2),-s/u,(r*s-t)/(u**3))
def __repr__(self):
r"""
Returns the string representation of this isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: baseWI(2,3,4,5)
(2, 3, 4, 5)
"""
return self.tuple().__repr__()
def is_identity(self):
r"""
Returns True if this is the identity isomorphism.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: w=baseWI(); w.is_identity()
True
sage: w=baseWI(2,3,4,5); w.is_identity()
False
"""
return self.tuple()==(1,0,0,0)
def __call__(self, EorP):
r"""
Base application of isomorphisms to curves and points: a
baseWI `w` may be applied to a list `[a1,a2,a3,a4,a6]`
representing the `a`-invariants of an elliptic curve `E`,
returning the `a`-invariants of `w(E)`; or to `P=[x,y]` or
`P=[x,y,z]` representing a point in `\mathbb{A}^2` or
`\mathbb{P}^2`, returning the transformed point.
INPUT:
- ``EorP`` -- either an elliptic curve, or a point on an elliptic curve.
OUTPUT:
The transformed curve or point.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve([0,0,1,-7,6])
sage: w=baseWI(2,3,4,5);
sage: w(E.ainvs())
[4, -7/4, 11/8, -3/2, -9/32]
sage: P=E(-2,3)
sage: w(P.xy())
[-5/4, 9/4]
sage: EllipticCurve(w(E.ainvs()))(w(P.xy()))
(-5/4 : 9/4 : 1)
"""
u,r,s,t=self.tuple()
if len(EorP)==5:
a1,a2,a3,a4,a6=EorP
a6 += r*(a4 + r*(a2 + r)) - t*(a3 + r*a1 + t);
a4 += -s*a3 + 2*r*a2 - (t + r*s)*a1 + 3*r*r - 2*s*t;
a3 += r*a1 +t+t;
a2 += -s*a1 + 3*r - s*s;
a1 += 2*s;
return [a1/u,a2/u**2,a3/u**3,a4/u**4,a6/u**6]
if len(EorP)==2:
x,y=EorP
x-=r
y-=(s*x+t)
return [x/u**2,y/u**3]
if len(EorP)==3:
x,y,z=EorP
x-=r*z
y-=(s*x+t*z)
return [x/u**2,y/u**3,z]
raise ValueError("baseWI(a) only for a=(x,y), (x:y:z) or (a1,a2,a3,a4,a6)")
def isomorphisms(E,F,JustOne=False):
r"""
Returns one or all isomorphisms between two elliptic curves.
INPUT:
- ``E``, ``F`` (EllipticCurve) -- Two elliptic curves.
- ``JustOne`` (bool) If True, returns one isomorphism, or None if
the curves are not isomorphic. If False, returns a (possibly
empty) list of isomorphisms.
OUTPUT:
Either None, or a 4-tuple `(u,r,s,t)` representing an isomorphism,
or a list of these.
.. note::
This function is not intended for users, who should use the
interface provided by ``ell_generic``.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'))
[(-1, 0, 0, -1), (1, 0, 0, 0)]
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'),JustOne=True)
(1, 0, 0, 0)
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'))
[]
sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'),JustOne=True)
"""
from ell_generic import is_EllipticCurve
if not is_EllipticCurve(E) or not is_EllipticCurve(F):
raise ValueError("arguments are not elliptic curves")
K = E.base_ring()
# if not K == F.base_ring(): return []
j=E.j_invariant()
if j != F.j_invariant():
if JustOne: return None
return []
from sage.rings.all import PolynomialRing
x=PolynomialRing(K,'x').gen()
a1E, a2E, a3E, a4E, a6E = E.ainvs()
a1F, a2F, a3F, a4F, a6F = F.ainvs()
char=K.characteristic()
if char==2:
if j==0:
ulist=(x**3-(a3E/a3F)).roots(multiplicities=False)
ans=[]
for u in ulist:
slist=(x**4+a3E*x+(a2F**2+a4F)*u**4+a2E**2+a4E).roots(multiplicities=False)
for s in slist:
r=s**2+a2E+a2F*u**2
tlist= (x**2 + a3E*x + r**3 + a2E*r**2 + a4E*r + a6E + a6F*u**6).roots(multiplicities=False)
for t in tlist:
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
else:
ans=[]
u=a1E/a1F
r=(a3E+a3F*u**3)/a1E
slist=[s[0] for s in (x**2+a1E*x+(r+a2E+a2F*u**2)).roots()]
for s in slist:
t = (a4E+a4F*u**4 + s*a3E + r*s*a1E + r**2)
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
b2E, b4E, b6E, b8E = E.b_invariants()
b2F, b4F, b6F, b8F = F.b_invariants()
if char==3:
if j==0:
ulist=(x**4-(b4E/b4F)).roots(multiplicities=False)
ans=[]
for u in ulist:
s=a1E-a1F*u
t=a3E-a3F*u**3
rlist=(x**3-b4E*x+(b6E-b6F*u**6)).roots(multiplicities=False)
for r in rlist:
if JustOne: return (u,r,s,t+r*a1E)
ans.append((u,r,s,t+r*a1E))
if JustOne: return None
ans.sort()
return ans
else:
ulist=(x**2-(b2E/b2F)).roots(multiplicities=False)
ans=[]
for u in ulist:
r = (b4F*u**4 -b4E)/b2E
s = (a1E-a1F*u)
t = (a3E-a3F*u**3 + a1E*r)
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
# now char!=2,3:
c4E,c6E = E.c_invariants()
c4F,c6F = F.c_invariants()
if j==0:
m,um = 6,c6E/c6F
elif j==1728:
m,um=4,c4E/c4F
else:
m,um=2,(c6E*c4F)/(c6F*c4E)
ulist=(x**m-um).roots(multiplicities=False)
ans=[]
for u in ulist:
s = (a1F*u - a1E)/2
r = (a2F*u**2 + a1E*s + s**2 - a2E)/3
t = (a3F*u**3 - a1E*r - a3E)/2
if JustOne: return (u,r,s,t)
ans.append((u,r,s,t))
if JustOne: return None
ans.sort()
return ans
class WeierstrassIsomorphism(baseWI,Morphism):
r"""
Class representing a Weierstrass isomorphism between two elliptic curves.
"""
def __init__(self, E=None, urst=None, F=None):
r"""
Constructor for WeierstrassIsomorphism class,
INPUT:
- ``E`` -- an EllipticCurve, or None (see below).
- ``urst`` -- a 4-tuple `(u,r,s,t)`, or None (see below).
- ``F`` -- an EllipticCurve, or None (see below).
Given two Elliptic Curves ``E`` and ``F`` (represented by
Weierstrass models as usual), and a transformation ``urst``
from ``E`` to ``F``, construct an isomorphism from ``E`` to
``F``. An exception is raised if ``urst(E)!=F``. At most one
of ``E``, ``F``, ``urst`` can be None. If ``F==None`` then
``F`` is constructed as ``urst(E)``. If ``E==None`` then
``E`` is constructed as ``urst^-1(F)``. If ``urst==None``
then an isomorphism from ``E`` to ``F`` is constructed if
possible, and an exception is raised if they are not
isomorphic. Otherwise ``urst`` can be a tuple of length 4 or
a object of type ``baseWI``.
Users will not usually need to use this class directly, but instead use
methods such as ``isomorphism`` of elliptic curves.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: WeierstrassIsomorphism(EllipticCurve([0,1,2,3,4]),(-1,2,3,4))
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 - 6*x*y - 10*y = x^3 - 2*x^2 - 11*x - 2 over Rational Field
Via: (u,r,s,t) = (-1, 2, 3, 4)
sage: E=EllipticCurve([0,1,2,3,4])
sage: F=EllipticCurve(E.cremona_label())
sage: WeierstrassIsomorphism(E,None,F)
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 = x^3 + x^2 + 3*x + 5 over Rational Field
Via: (u,r,s,t) = (1, 0, 0, -1)
sage: w=WeierstrassIsomorphism(None,(1,0,0,-1),F)
sage: w._domain_curve==E
True
"""
from ell_generic import is_EllipticCurve
if E!=None:
if not is_EllipticCurve(E):
raise ValueError("First argument must be an elliptic curve or None")
if F!=None:
if not is_EllipticCurve(F):
raise ValueError("Third argument must be an elliptic curve or None")
if urst!=None:
if len(urst)!=4:
raise ValueError("Second argument must be [u,r,s,t] or None")
if len([par for par in [E,urst,F] if par!=None])<2:
raise ValueError("At most 1 argument can be None")
if F==None: # easy case
baseWI.__init__(self,*urst)
F=EllipticCurve(baseWI.__call__(self,list(E.a_invariants())))
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
if E==None: # easy case in reverse
baseWI.__init__(self,*urst)
inv_urst=baseWI.__invert__(self)
E=EllipticCurve(baseWI.__call__(inv_urst,list(F.a_invariants())))
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
if urst==None: # try to construct the morphism
urst=isomorphisms(E,F,True)
if urst==None:
raise ValueError("Elliptic curves not isomorphic.")
baseWI.__init__(self, *urst)
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
# none of the parameters is None:
baseWI.__init__(self,*urst)
if F!=EllipticCurve(baseWI.__call__(self,list(E.a_invariants()))):
raise ValueError("second argument is not an isomorphism from first argument to third argument")
else:
Morphism.__init__(self, Hom(E(0).parent(), F(0).parent()))
self._domain_curve = E
self._codomain_curve = F
return
def __cmp__(self, other):
r"""
Standard comparison function for the WeierstrassIsomorphism class.
EXAMPLE::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve('389a1')
sage: F=E.change_weierstrass_model(1,2,3,4)
sage: w1=E.isomorphism_to(F)
sage: w1==w1
True
sage: w2 = F.automorphisms()[0] *w1
sage: w1==w2
False
::
sage: E=EllipticCurve_from_j(GF(7)(0))
sage: F=E.change_weierstrass_model(2,3,4,5)
sage: a=E.isomorphisms(F)
sage: b=[w*a[0] for w in F.automorphisms()]
sage: b.sort()
sage: a==b
True
sage: c=[a[0]*w for w in E.automorphisms()]
sage: c.sort()
sage: a==c
True
"""
if not isinstance(other, WeierstrassIsomorphism):
return cmp(type(self), type(other))
t = cmp(self._domain_curve, other._domain_curve)
if t: return t
t = cmp(self._codomain_curve, other._codomain_curve)
if t: return t
return baseWI.__cmp__(self,other)
def __call__(self, P):
r"""
Call function for WeierstrassIsomorphism class.
INPUT:
- ``P`` (Point) -- a point on the domain curve.
OUTPUT:
(Point) the transformed point on the codomain curve.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *
sage: E=EllipticCurve('37a1')
sage: w=WeierstrassIsomorphism(E,(2,3,4,5))
sage: P=E(0,-1)
sage: w(P)
(-3/4 : 3/4 : 1)
sage: w(P).curve()==E.change_weierstrass_model((2,3,4,5))
True
"""
if P[2] == 0:
return self._codomain_curve(0)
else:
return self._codomain_curve.point(baseWI.__call__(self,tuple(P._coords)), check=False)
def __invert__(self):
r"""
Returns the inverse of this WeierstrassIsomorphism.
EXAMPLES::
sage: E = EllipticCurve('5077')
sage: F = E.change_weierstrass_model([2,3,4,5]); F
Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
sage: w = E.isomorphism_to(F)
sage: P = E(-2,3,1)
sage: w(P)
(-5/4 : 9/4 : 1)
sage: ~w
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field
Via: (u,r,s,t) = (1/2, -3/4, -2, 7/8)
sage: Q = w(P); Q
(-5/4 : 9/4 : 1)
sage: (~w)(Q)
(-2 : 3 : 1)
"""
winv=baseWI.__invert__(self).tuple()
return WeierstrassIsomorphism(self._codomain_curve, winv, self._domain_curve)
def __mul__(self,other):
r"""
Returns the composition of this WeierstrassIsomorphism and the other,
WeierstrassMorphisms can be composed using ``*`` if the
codomain & domain match: `(w1*w2)(X)=w1(w2(X))`, so we require
``w1.domain()==w2.codomain()``.
EXAMPLES::
sage: E1 = EllipticCurve('5077')
sage: E2 = E1.change_weierstrass_model([2,3,4,5])
sage: w1 = E1.isomorphism_to(E2)
sage: E3 = E2.change_weierstrass_model([6,7,8,9])
sage: w2 = E2.isomorphism_to(E3)
sage: P = E1(-2,3,1)
sage: (w2*w1)(P)==w2(w1(P))
True
"""
if self._domain_curve==other._codomain_curve:
w=baseWI.__mul__(self,other)
return WeierstrassIsomorphism(other._domain_curve, w.tuple(), self._codomain_curve)
else:
raise ValueError("Domain of first argument must equal codomain of second")
def __repr__(self):
r"""
Returns the string representation of this WeierstrassIsomorphism.
OUTPUT:
(string) The underlying morphism, together with an extra line
showing the `(u,r,s,t)` parameters.
EXAMPLES::
sage: E1 = EllipticCurve('5077')
sage: E2 = E1.change_weierstrass_model([2,3,4,5])
sage: E1.isomorphism_to(E2)
Generic morphism:
From: Abelian group of points on Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field
To: Abelian group of points on Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field
Via: (u,r,s,t) = (2, 3, 4, 5)
"""
return Morphism.__repr__(self)+"\n Via: (u,r,s,t) = "+baseWI.__repr__(self)
|
"""
Settings specific to prod-like deployable code, reading values from system environment variables.
"""
import os
from conf.configs import common
from conf.settings import PROJECT_ID
__author__ = "Alex Laird"
__copyright__ = "Copyright 2018, Helium Edu"
__version__ = "1.1.15"
# Define the base working directory of the application
BASE_DIR = os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", ".."))
# Application definition
INSTALLED_APPS = common.INSTALLED_APPS
MIDDLEWARE = common.MIDDLEWARE + (
"rollbar.contrib.django.middleware.RollbarNotifierMiddleware",
)
TEMPLATES = common.TEMPLATES
if common.DEBUG:
TEMPLATES[0]["OPTIONS"]["context_processors"] += (
"django.template.context_processors.debug",
)
#############################
# Django configuration
#############################
# Security
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Logging
ROLLBAR = {
"access_token": os.environ.get("PLATFORM_ROLLBAR_POST_SERVER_ITEM_ACCESS_TOKEN"),
"environment": os.environ.get("ENVIRONMENT"),
"branch": "main",
"root": BASE_DIR,
}
if not common.DEBUG:
ADMINS = (
(common.PROJECT_NAME, common.ADMIN_EMAIL_ADDRESS),
)
MANAGERS = ADMINS
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
},
},
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
}
},
"handlers": {
"rollbar": {
"level": "WARN",
"class": "rollbar.logger.RollbarHandler",
"filters": ["require_debug_false"],
},
"django": {
"level": "ERROR",
"class": "logging.handlers.RotatingFileHandler",
"filename": f"/var/log/{PROJECT_ID}/django.log",
"maxBytes": 50000000,
"backupCount": 3,
"formatter": "standard",
},
f"{PROJECT_ID}_app": {
"level": "INFO",
"class": "logging.handlers.RotatingFileHandler",
"filename": f"/var/log/{PROJECT_ID}/app.log",
"maxBytes": 50000000,
"backupCount": 3,
"formatter": "standard",
},
},
"loggers": {
"django.request": {
"handlers": ["django", "rollbar"],
"level": "ERROR",
"propagate": False,
},
"{%PROJECT_ID_LOWER%}.app": {
"handlers": [f"{PROJECT_ID}_app", "rollbar"],
"level": "INFO",
},
}
}
# Cache
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get("{%PROJECT_ID_UPPER%}_REDIS_HOST"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# Database
DATABASES = {
"default": {
"NAME": os.environ.get("{%PROJECT_ID_UPPER%}_DB_NAME"),
"ENGINE": "django.db.backends.mysql",
"HOST": os.environ.get("{%PROJECT_ID_UPPER%}_DB_HOST"),
"USER": os.environ.get("{%PROJECT_ID_UPPER%}_DB_USER"),
"PASSWORD": os.environ.get("{%PROJECT_ID_UPPER%}_DB_PASSWORD"),
}
}
|
import operator
from functools import reduce
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Sum
from django.shortcuts import HttpResponse, get_object_or_404, redirect, render
from django.views.generic import View
from django.views.generic.base import TemplateView
from .forms import RecipeForm
from .models import (Purchase, Recipe, Subscription)
from .utils import paginator_data
User = get_user_model()
def index(request):
'''Вьюха отображения главной страницы'''
# получаем список тегов из GET запроса
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub')
else:
recipies = Recipe.objects.all().order_by('-date_pub')
# Т.к. паджинатор есть почти на каждой странице - вынес некоторые моменты
# в отдельную функцию в utils.py
page, paginator = paginator_data(request, recipies)
return render(request, 'index.html', context={'page': page,
'paginator': paginator,
'tags': tags})
def recipe_detail(request, slug):
'''Вьюха отображения страницы рецепта'''
recipe = get_object_or_404(Recipe, slug__iexact=slug)
return render(request, 'recipe_detail.html', context={'recipe': recipe})
def profile_index(request, username):
'''Персональная страница пользователя'''
author = get_object_or_404(User, username=username)
user = request.user
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = author.recipes.filter(query).order_by('-date_pub')
else:
recipies = author.recipes.all().order_by('-date_pub')
following = Subscription.objects.filter(user__username=user,
author=author).count()
return render(request, 'profile.html', context={'recipies': recipies,
'author': author,
'user': user,
'following': following,
'tags': tags})
@login_required
def subscription_index(request):
'''Страница подписок пользователя'''
follow_authors = User.objects.filter(
following__user=request.user).prefetch_related('recipes')
page, paginator = paginator_data(request, follow_authors)
return render(request, 'subscription_index.html',
context={'page': page, 'paginator': paginator, })
@login_required
def favorite_index(request):
'''Страница подписок пользователя'''
tags = request.GET.getlist('tag')
if tags:
# фильтрация по совокупности выбранных тегов
query = reduce(operator.or_, (Q(tags__contains=tag) for tag in tags))
recipies = Recipe.objects.filter(query).order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
else:
recipies = Recipe.objects.all().order_by('-date_pub').filter(
favorites__user=request.user).select_related('author')
page, paginator = paginator_data(request, recipies)
return render(request, 'favorite_index.html',
context={'page': page,
'paginator': paginator,
'tags': tags})
@login_required
def purchase_index(request):
'''Список покупок'''
recipies = Recipe.objects.filter(
purchases__user=request.user)
return render(request, 'purchase_index.html', context={
'recipies': recipies})
@login_required
def get_purchase_list(request):
'''Загрузка txt файла со списком ингридиентов выбранных рецептов'''
file_name = 'Purchase_list.txt'
txt = ''
purchase = Purchase.objects.filter(user=request.user)
ingredients = purchase.values('recipe__ingredients__title',
'recipe__ingredients__dimension').annotate(
total_amount=Sum('recipe__ingredients__ingredient_recipe__amount'
''))
result = set()
for ingredient in ingredients:
if ingredient['recipe__ingredients__title'] not in result:
item = (f'{ingredient["recipe__ingredients__title"]} '
f'{ingredient["total_amount"]} '
f'{ingredient["recipe__ingredients__dimension"]}'
)
result.add(ingredient['recipe__ingredients__title'])
txt += item + '\n'
response = HttpResponse(txt, content_type='application/text charset=utf-8')
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class RecipeCreateUpdate(View):
'''Создание или редактирование рецепта'''
def get(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
form = RecipeForm(instance=recipe)
title = 'Редактирование рецепта'
botton_name = 'Изменить рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title,
'recipe': recipe,
}
else:
form = RecipeForm()
title = 'Создание рецепта'
botton_name = 'Создать рецепт'
context = {
'form': form,
'botton_name': botton_name,
'title': title
}
template = 'recipe_create_or_update.html'
return render(request, template, context)
def post(self, request, slug=None):
if slug:
recipe = get_object_or_404(Recipe,
author__username=(self.request.
user.username),
slug__iexact=slug)
if request.user != recipe.author:
return redirect('index')
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
instance=recipe,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Редактирование рецепта',
'botton_name': 'Редактирование рецепта',
'recipe': recipe
}
else:
bound_form = RecipeForm(request.POST or None,
files=request.FILES or None,
initial={"request": request})
context = {
'form': bound_form,
'title': 'Создание рецепта',
'botton_name': 'Создать рецепт'
}
if bound_form.is_valid():
new_recipe = bound_form.save(commit=False)
new_recipe.tags = request.POST.getlist('tags')
return redirect(new_recipe)
return render(request, 'recipe_create_or_update.html',
context=context)
class RecipeDelete(View):
'''Удаление рецепта'''
def get(self, request, pk):
recipe = get_object_or_404(Recipe, author=request.user, id=pk)
recipe.delete()
return redirect('index')
class About(TemplateView):
'''Об авторе'''
template_name = 'about.html'
class Technologies(TemplateView):
'''Технологии'''
template_name = 'technologies.html'
|
import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from backend.services.toxic_comment_jigsaw.application.ai.model import BERTClassifier
from backend.services.toxic_comment_jigsaw.application.ai.training.src.dataset import BERTDataset
from backend.services.toxic_comment_jigsaw.application.ai.training.src.preprocess import Preprocess
from backend.services.toxic_comment_jigsaw.application.ai.training.src.engine import Engine
from backend.services.toxic_comment_jigsaw.application.ai.settings import Settings
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader
class Train:
def __init__(self):
# initialize required class
self.settings = Settings
self.engine = Engine()
self.preprocess = Preprocess()
# initialize required variables
self.bert_classifier = None
self.optimizer = None
self.scheduler = None
self.train_data_loader = None
self.val_data_loader = None
self.total_steps = None
self.best_accuracy = 0
def __initialize(self):
# Instantiate Bert Classifier
self.bert_classifier = BERTClassifier(freeze_bert=False)
self.bert_classifier.to(self.settings.DEVICE)
# Create the optimizer
self.optimizer = AdamW(self.bert_classifier.parameters(),
lr=5e-5, # Default learning rate
eps=1e-8 # Default epsilon value
)
# Set up the learning rate scheduler
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=0, # Default value
num_training_steps=self.total_steps)
def crete_data_loaders(self, dataset):
pass
def load_data(self):
train_df = pd.read_csv(self.settings.TRAIN_DATA).fillna("none")
train_df['comment_text'] = train_df['comment_text'].apply(lambda x: self.preprocess.clean_text(x))
X = list(train_df['comment_text'])
y = np.array(train_df.loc[:, 'toxic':])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=self.settings.RANDOM_STATE)
# training dataset
train_dataset = BERTDataset(X_train, y_train)
# validation dataset
val_dataset = BERTDataset(X_val, y_val)
self.train_data_loader = DataLoader(train_dataset,
batch_size=self.settings.TRAIN_BATCH_SIZE,
shuffle=True,
num_workers=self.settings.TRAIN_NUM_WORKERS)
self.val_data_loader = DataLoader(val_dataset,
batch_size=self.settings.VALID_BATCH_SIZE,
shuffle=True,
num_workers=self.settings.VAL_NUM_WORKERS)
self.total_steps = int(len(X_train) / self.settings.TRAIN_BATCH_SIZE * self.settings.EPOCHS)
def train(self):
for epochs in range(self.settings.EPOCHS):
# calling the training function in engine.py file
self.engine.train_fn(data_loader=self.train_data_loader,
model=self.bert_classifier,
optimizer=self.optimizer,
device=self.settings.DEVICE,
schedular=self.scheduler)
# calling the evaluation function from the engine.py file to compute evaluation
val_loss, val_accuracy = self.engine.eval_fn(data_loader=self.val_data_loader,
model=self.bert_classifier,
device=self.settings.DEVICE)
# updating the accuracy
if val_accuracy > self.best_accuracy:
torch.save(self.bert_classifier.state_dict(), self.settings.MODEL_PATH)
self.best_accuracy = val_accuracy
def run(self):
try:
print("Loading and Preparing the Dataset-----!! ")
self.load_data()
print("Dataset Successfully Loaded and Prepared-----!! ")
print()
print("-" * 70)
print("Loading and Initializing the Bert Model -----!! ")
self.__initialize()
print("Model Successfully Loaded and Initialized-----!! ")
print()
print("-" * 70)
print("------------------Starting Training-----------!!")
self.engine.set_seed()
self.train()
print("Training complete-----!!!")
except BaseException as ex:
print("Following Exception Occurred---!! ", str(ex))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Guidelines for writing new hacking checks
- Use only for Rally specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to tests/unit/test_hacking.py
"""
import functools
import re
import tokenize
re_assert_equal_end_with_true_or_false = re.compile(
r"assertEqual\(.*?, \s+(True|False)\)$")
re_assert_equal_start_with_true_or_false = re.compile(
r"assertEqual\((True|False),")
re_assert_true_instance = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
r"(\w|\.|\'|\"|\[|\])+\)\)")
re_assert_equal_type = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
r"(\w|\.|\'|\"|\[|\])+\)")
re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$")
re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,")
re_assert_not_equal_end_with_none = re.compile(
r"assertNotEqual\(.*?,\s+None\)$")
re_assert_not_equal_start_with_none = re.compile(r"assertNotEqual\(None,")
re_assert_true_false_with_in_or_not_in = re.compile(
r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
re_assert_true_false_with_in_or_not_in_spaces = re.compile(
r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
re_assert_equal_in_end_with_true_or_false = re.compile(
r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)")
re_assert_equal_in_start_with_true_or_false = re.compile(
r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)")
re_no_construct_dict = re.compile(
r"\sdict\(\)")
re_no_construct_list = re.compile(
r"\slist\(\)")
re_str_format = re.compile(r"""
% # start of specifier
\(([^)]+)\) # mapping key, in group 1
[#0 +\-]? # optional conversion flag
(?:-?\d*)? # optional minimum field width
(?:\.\d*)? # optional precision
[hLl]? # optional length modifier
[A-z%] # conversion modifier
""", re.X)
re_raises = re.compile(
r"\s:raise[^s] *.*$|\s:raises *:.*$|\s:raises *[^:]+$")
re_db_import = re.compile(r"^from rally.common import db")
re_objects_import = re.compile(r"^from rally.common import objects")
re_old_type_class = re.compile(r"^\s*class \w+(\(\))?:")
re_datetime_alias = re.compile(r"^(from|import) datetime(?!\s+as\s+dt$)")
re_log_warn = re.compile(r"(.)*LOG\.(warn)\(\s*('|\"|_)")
def skip_ignored_lines(func):
@functools.wraps(func)
def wrapper(logical_line, physical_line, filename):
line = physical_line.strip()
if not line or line.startswith("#") or line.endswith("# noqa"):
return
yield next(func(logical_line, physical_line, filename))
return wrapper
def _parse_assert_mock_str(line):
point = line.find(".assert_")
if point == -1:
point = line.find(".called_once_with(")
if point != -1:
end_pos = line[point:].find("(") + point
return point, line[point + 1: end_pos], line[: point]
else:
return None, None, None
@skip_ignored_lines
def check_assert_methods_from_mock(logical_line, physical_line, filename):
"""Ensure that ``assert_*`` methods from ``mock`` library is used correctly
N301 - base error number
N302 - related to nonexistent "assert_called"
N303 - related to nonexistent "assert_called_once"
N304 - related to nonexistent "called_once_with"
"""
correct_names = ["assert_any_call", "assert_called_once_with",
"assert_called_with", "assert_has_calls",
"assert_not_called"]
ignored_files = ["./tests/unit/test_hacking.py"]
if filename.startswith("./tests") and filename not in ignored_files:
pos, method_name, obj_name = _parse_assert_mock_str(logical_line)
if pos:
if method_name not in correct_names:
error_number = "N301"
msg = ("%(error_number)s:'%(method)s' is not present in `mock`"
" library. %(custom_msg)s For more details, visit "
"http://www.voidspace.org.uk/python/mock/ .")
if method_name == "assert_called":
error_number = "N302"
custom_msg = ("Maybe, you should try to use "
"'assertTrue(%s.called)' instead." %
obj_name)
elif method_name == "assert_called_once":
# For more details, see a bug in Rally:
# https://bugs.launchpad.net/rally/+bug/1305991
error_number = "N303"
custom_msg = ("Maybe, you should try to use "
"'assertEqual(1, %s.call_count)' "
"or '%s.assert_called_once_with()'"
" instead." % (obj_name, obj_name))
elif method_name == "called_once_with":
error_number = "N304"
custom_msg = ("Maybe, you should try to use "
"'%s.assert_called_once_with()'"
" instead." % obj_name)
else:
custom_msg = ("Correct 'assert_*' methods: '%s'."
% "', '".join(correct_names))
yield (pos, msg % {
"error_number": error_number,
"method": method_name,
"custom_msg": custom_msg})
@skip_ignored_lines
def check_import_of_logging(logical_line, physical_line, filename):
"""Check correctness import of logging module
N310
"""
excluded_files = ["./rally/common/logging.py",
"./tests/unit/test_logging.py",
"./tests/ci/rally_verify.py",
"./tests/ci/sync_requirements.py"]
forbidden_imports = ["from oslo_log",
"import oslo_log",
"import logging"]
if filename not in excluded_files:
for forbidden_import in forbidden_imports:
if logical_line.startswith(forbidden_import):
yield (0, "N310 Wrong module for logging is imported. Please "
"use `rally.common.logging` instead.")
@skip_ignored_lines
def check_import_of_config(logical_line, physical_line, filename):
"""Check correctness import of config module
N311
"""
excluded_files = ["./rally/common/cfg.py"]
forbidden_imports = ["from oslo_config",
"import oslo_config"]
if filename not in excluded_files:
for forbidden_import in forbidden_imports:
if logical_line.startswith(forbidden_import):
yield (0, "N311 Wrong module for config is imported. Please "
"use `rally.common.cfg` instead.")
@skip_ignored_lines
def no_use_conf_debug_check(logical_line, physical_line, filename):
"""Check for "cfg.CONF.debug"
Rally has two DEBUG level:
- Full DEBUG, which include all debug-messages from all OpenStack services
- Rally DEBUG, which include only Rally debug-messages
so we should use custom check to know debug-mode, instead of CONF.debug
N312
"""
excluded_files = ["./rally/common/logging.py"]
point = logical_line.find("CONF.debug")
if point != -1 and filename not in excluded_files:
yield(point, "N312 Don't use `CONF.debug`. "
"Function `rally.common.logging.is_debug` "
"should be used instead.")
@skip_ignored_lines
def assert_true_instance(logical_line, physical_line, filename):
"""Check for assertTrue(isinstance(a, b)) sentences
N320
"""
if re_assert_true_instance.match(logical_line):
yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_type(logical_line, physical_line, filename):
"""Check for assertEqual(type(A), B) sentences
N321
"""
if re_assert_equal_type.match(logical_line):
yield (0, "N321 assertEqual(type(A), B) sentences not allowed, "
"you should use assertIsInstance(a, b) instead.")
@skip_ignored_lines
def assert_equal_none(logical_line, physical_line, filename):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N322
"""
res = (re_assert_equal_start_with_none.search(logical_line) or
re_assert_equal_end_with_none.search(logical_line))
if res:
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed, you should use assertIsNone(A) "
"instead.")
@skip_ignored_lines
def assert_true_or_false_with_in(logical_line, physical_line, filename):
"""Check assertTrue/False(A in/not in B) with collection contents
Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N323
"""
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
" instead.")
@skip_ignored_lines
def assert_equal_in(logical_line, physical_line, filename):
"""Check assertEqual(A in/not in B, True/False) with collection contents
Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B),
assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B)
sentences.
N324
"""
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
re_assert_equal_in_start_with_true_or_false.search(logical_line))
if res:
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in/not in B, True/False) when checking "
"collection contents.")
@skip_ignored_lines
def assert_not_equal_none(logical_line, physical_line, filename):
"""Check for assertNotEqual(A, None) or assertEqual(None, A) sentences
N325
"""
res = (re_assert_not_equal_start_with_none.search(logical_line) or
re_assert_not_equal_end_with_none.search(logical_line))
if res:
yield (0, "N325 assertNotEqual(A, None) or assertNotEqual(None, A) "
"sentences not allowed, you should use assertIsNotNone(A) "
"instead.")
@skip_ignored_lines
def assert_equal_true_or_false(logical_line, physical_line, filename):
"""Check for assertEqual(A, True/False) sentences
Check for assertEqual(A, True/False) sentences or
assertEqual(True/False, A)
N326
"""
res = (re_assert_equal_end_with_true_or_false.search(logical_line) or
re_assert_equal_start_with_true_or_false.search(logical_line))
if res:
yield (0, "N326 assertEqual(A, True/False) or "
"assertEqual(True/False, A) sentences not allowed,"
"you should use assertTrue(A) or assertFalse(A) instead.")
@skip_ignored_lines
def check_no_direct_rally_objects_import(logical_line, physical_line,
filename):
"""Check if rally.common.objects are properly imported.
If you import "from rally.common import objects" you are able to use
objects directly like objects.Task.
N340
"""
if filename == "./rally/common/objects/__init__.py":
return
if filename == "./rally/common/objects/endpoint.py":
return
if (logical_line.startswith("from rally.common.objects")
or logical_line.startswith("import rally.common.objects.")):
yield (0, "N340: Import objects module:"
"`from rally.common import objects`. "
"After that you can use directly objects e.g. objects.Task")
@skip_ignored_lines
def check_no_oslo_deprecated_import(logical_line, physical_line, filename):
"""Check if oslo.foo packages are not imported instead of oslo_foo ones.
Libraries from oslo.foo namespace are deprecated because of namespace
problems.
N341
"""
if (logical_line.startswith("from oslo.")
or logical_line.startswith("import oslo.")):
yield (0, "N341: Import oslo module: `from oslo_xyz import ...`. "
"The oslo.xyz namespace was deprecated, use oslo_xyz "
"instead")
@skip_ignored_lines
def check_quotes(logical_line, physical_line, filename):
"""Check that single quotation marks are not used
N350
"""
in_string = False
in_multiline_string = False
single_quotas_are_used = False
check_tripple = (
lambda line, i, char: (
i + 2 < len(line) and
(char == line[i] == line[i + 1] == line[i + 2])
)
)
i = 0
while i < len(logical_line):
char = logical_line[i]
if in_string:
if char == "\"":
in_string = False
if char == "\\":
i += 1 # ignore next char
elif in_multiline_string:
if check_tripple(logical_line, i, "\""):
i += 2 # skip next 2 chars
in_multiline_string = False
elif char == "#":
break
elif char == "'":
single_quotas_are_used = True
break
elif char == "\"":
if check_tripple(logical_line, i, "\""):
in_multiline_string = True
i += 3
continue
in_string = True
i += 1
if single_quotas_are_used:
yield (i, "N350 Remove Single quotes")
@skip_ignored_lines
def check_no_constructor_data_struct(logical_line, physical_line, filename):
"""Check that data structs (lists, dicts) are declared using literals
N351
"""
match = re_no_construct_dict.search(logical_line)
if match:
yield (0, "N351 Remove dict() construct and use literal {}")
match = re_no_construct_list.search(logical_line)
if match:
yield (0, "N351 Remove list() construct and use literal []")
def check_dict_formatting_in_string(logical_line, tokens):
"""Check that strings do not use dict-formatting with a single replacement
N352
"""
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
# a stupid decorator that only works on functions that take
# (logical_line, filename) as arguments.
if (not logical_line or
logical_line.startswith("#") or
logical_line.endswith("# noqa")):
return
current_string = ""
in_string = False
for token_type, text, start, end, line in tokens:
if token_type == tokenize.STRING:
if not in_string:
current_string = ""
in_string = True
current_string += text.strip("\"")
elif token_type == tokenize.OP:
if not current_string:
continue
# NOTE(stpierre): The string formatting operator % has
# lower precedence than +, so we assume that the logical
# string has concluded whenever we hit an operator of any
# sort. (Most operators don't work for strings anyway.)
# Some string operators do have higher precedence than %,
# though, so you can technically trick this check by doing
# things like:
#
# "%(foo)s" * 1 % {"foo": 1}
# "%(foo)s"[:] % {"foo": 1}
#
# It also will produce false positives if you use explicit
# parenthesized addition for two strings instead of
# concatenation by juxtaposition, e.g.:
#
# ("%(foo)s" + "%(bar)s") % vals
#
# But if you do any of those things, then you deserve all
# of the horrible things that happen to you, and probably
# many more.
in_string = False
if text == "%":
format_keys = set()
for match in re_str_format.finditer(current_string):
format_keys.add(match.group(1))
if len(format_keys) == 1:
yield (0,
"N353 Do not use mapping key string formatting "
"with a single key")
if text != ")":
# NOTE(stpierre): You can have a parenthesized string
# followed by %, so a closing paren doesn't obviate
# the possibility for a substitution operator like
# every other operator does.
current_string = ""
elif token_type in (tokenize.NL, tokenize.COMMENT):
continue
else:
in_string = False
if token_type == tokenize.NEWLINE:
current_string = ""
@skip_ignored_lines
def check_using_unicode(logical_line, physical_line, filename):
"""Check crosspython unicode usage
N353
"""
if re.search(r"\bunicode\(", logical_line):
yield (0, "N353 'unicode' function is absent in python3. Please "
"use 'six.text_type' instead.")
def check_raises(physical_line, filename):
"""Check raises usage
N354
"""
ignored_files = ["./tests/unit/test_hacking.py",
"./tests/hacking/checks.py"]
if filename not in ignored_files:
if re_raises.search(physical_line):
return (0, "N354 ':Please use ':raises Exception: conditions' "
"in docstrings.")
@skip_ignored_lines
def check_old_type_class(logical_line, physical_line, filename):
"""Use new-style Python classes
N355
"""
if re_old_type_class.search(logical_line):
yield (0, "N355 This class does not inherit from anything and thus "
"will be an old-style class by default. Try to inherit from "
"``object`` or another new-style class.")
@skip_ignored_lines
def check_datetime_alias(logical_line, physical_line, filename):
"""Ensure using ``dt`` as alias for ``datetime``
N356
"""
if re_datetime_alias.search(logical_line):
yield (0, "N356 Please use ``dt`` as alias for ``datetime``.")
@skip_ignored_lines
def check_no_six_iteritems(logical_line, physical_line, filename):
"""Check no six.iteritems
N357
"""
if re.search(r"\six.iteritems\(\)", logical_line):
yield (0, "N357 Use dict.items() instead of six.iteritems()")
@skip_ignored_lines
def check_db_imports_in_cli(logical_line, physical_line, filename):
"""Ensure that CLI modules do not use ``rally.common.db``
N360
"""
if (not filename.startswith("./rally/cli")
or filename == "./rally/cli/commands/db.py"):
return
if re_db_import.search(logical_line):
yield (0, "N360 CLI modules do not allow to work with "
"`rally.common.db``.")
@skip_ignored_lines
def check_objects_imports_in_cli(logical_line, physical_line, filename):
"""Ensure that CLI modules do not use ``rally.common.objects``
N361
"""
if not filename.startswith("./rally/cli"):
return
if re_objects_import.search(logical_line):
yield (0, "N361 CLI modules do not allow to work with "
"`rally.common.objects``.")
@skip_ignored_lines
def check_log_warn(logical_line, physical_line, filename):
if re_log_warn.search(logical_line):
yield(0, "N313 LOG.warn is deprecated, please use LOG.warning")
@skip_ignored_lines
def check_opts_import_path(logical_line, physical_line, filename):
"""Ensure that we load opts from correct paths only
N342
"""
excluded_files = ["./rally_openstack/__init__.py"]
forbidden_methods = [".register_opts("]
if filename not in excluded_files:
for forbidden_method in forbidden_methods:
if logical_line.find(forbidden_method) != -1:
yield (0, "N342 All options should be loaded from correct "
"paths only - rally_openstack.cfg module.")
def factory(register):
register(check_assert_methods_from_mock)
register(check_import_of_logging)
register(check_import_of_config)
register(no_use_conf_debug_check)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_true_or_false_with_in)
register(assert_equal_in)
register(assert_equal_true_or_false)
register(check_no_direct_rally_objects_import)
register(check_no_oslo_deprecated_import)
register(check_quotes)
register(check_no_constructor_data_struct)
register(check_dict_formatting_in_string)
register(check_using_unicode)
register(check_raises)
register(check_datetime_alias)
register(check_db_imports_in_cli)
register(check_objects_imports_in_cli)
register(check_old_type_class)
register(check_no_six_iteritems)
register(check_log_warn)
register(check_opts_import_path)
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='carafe_layer_cuda',
ext_modules=[
CUDAExtension('carafe_layer_cuda', [
'src/carafe_layer_cuda.cpp',
'src/carafe_layer_kernel.cu',
])
],
cmdclass={
'build_ext': BuildExtension
})
|
from numpy import *
import numpy as np
# from numba import jit
# @jit
def detrending_coeff(win_len , order):
#win_len = 51
#order = 2
n = (win_len-1)/2
A = mat(ones((win_len,order+1)))
x = np.arange(-n , n+1)
for j in range(0 , order + 1):
A[:,j] = mat(x ** j).T
coeff_output = (A.T * A).I * A.T
return coeff_output , A
# coeff_output,A = detrending_coeff(5,2)
# print(coeff_output)
# print(A)
|
# Author: Javad Amirian
# Email: amiryan.j@gmail.com
import xml.etree.ElementTree as et
import numpy as np
import pandas as pd
from opentraj.toolkit.core.trajdataset import TrajDataset
from opentraj.toolkit.utils.calibration.camera_calibration_tsai import *
def load_pets(path, **kwargs):
"""
:param path: address of annotation file
:param kwargs:
:param calib_path: address of calibration file
:return: TrajectoryDataset object
"""
traj_dataset = TrajDataset()
annot_xtree = et.parse(path)
annot_xroot = annot_xtree.getroot() # dataset
cp, cc = None, None # calibration parameters
# load calibration
calib_path = kwargs.get('calib_path', "")
if calib_path:
cp = CameraParameters()
cc = CalibrationConstants()
calib_xtree = et.parse(calib_path)
calib_xroot = calib_xtree.getroot() # Camera
geometry_node = calib_xroot.find("Geometry")
width = int(geometry_node.attrib["width"])
height = int(geometry_node.attrib["height"])
cp.Ncx = float(geometry_node.attrib["ncx"])
cp.Nfx = float(geometry_node.attrib["nfx"])
cp.dx = float(geometry_node.attrib["dx"])
cp.dy = float(geometry_node.attrib["dy"])
cp.dpx = float(geometry_node.attrib["dpx"])
cp.dpy = float(geometry_node.attrib["dpy"])
intrinsic_node = calib_xroot.find("Intrinsic")
cc.f = float(intrinsic_node.attrib["focal"])
cc.kappa1 = float(intrinsic_node.attrib["kappa1"]) # 1st order radial distortion
cp.Cx = float(intrinsic_node.attrib["cx"])
cp.Cy = float(intrinsic_node.attrib["cy"])
cp.sx = float(intrinsic_node.attrib["sx"])
extrinsic_node = calib_xroot.find("Extrinsic")
cc.Tx = float(extrinsic_node.attrib["tx"])
cc.Ty = float(extrinsic_node.attrib["ty"])
cc.Tz = float(extrinsic_node.attrib["tz"])
cc.Rx = float(extrinsic_node.attrib["rx"])
cc.Ry = float(extrinsic_node.attrib["ry"])
cc.Rz = float(extrinsic_node.attrib["rz"])
cc.calc_rr() # Calculate Rotation Matrix
loaded_data = [] # frame_id, agent_id, pos_x, pos_y, xc, yc, h, w
for frame_node in annot_xroot:
objectlist_node = frame_node.find("objectlist") # .text
object_nodes = objectlist_node.findall("object")
frame_id = int(frame_node.attrib.get("number"))
for obj_node in object_nodes:
agent_id = obj_node.attrib["id"]
box_node = obj_node.find("box")
xc = float(box_node.attrib["xc"])
yc = float(box_node.attrib["yc"])
h = float(box_node.attrib["h"])
w = float(box_node.attrib["w"])
x_ground = xc
y_ground = yc + h/2
if cp:
pos_x, pos_y = image_coord_to_world_coord(x_ground, y_ground, 0, cp, cc)
else:
pos_x, pos_y = np.nan, np.nan
loaded_data.append([frame_id, agent_id, pos_x / 1000., pos_y / 1000., xc, yc, h, w])
data_columns = ["frame_id", "agent_id", "pos_x", "pos_y",
"xc", "yc", "h", "w"]
raw_dataset = pd.DataFrame(np.array(loaded_data), columns=data_columns)
traj_dataset.title = kwargs.get('title', "PETS")
# copy columns
traj_dataset.data[["frame_id", "agent_id",
"pos_x", "pos_y"]] = \
raw_dataset[["frame_id", "agent_id",
"pos_x", "pos_y"]]
traj_dataset.data["scene_id"] = kwargs.get('scene_id', 0)
traj_dataset.data["label"] = "pedestrian"
# post-process
fps = kwargs.get('fps', 7)
sampling_rate = kwargs.get('sampling_rate', 1)
use_kalman = kwargs.get('use_kalman', False)
traj_dataset.postprocess(fps=fps, sampling_rate=sampling_rate, use_kalman=use_kalman)
return traj_dataset
|
"""
Production settings for Estatisticas Facebook project.
- Use WhiteNoise for serving static files
- Use Amazon's S3 for storing uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
import logging
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['danieldourado.com', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
control = 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY, AWS_EXPIRY)
AWS_HEADERS = {
'Cache-Control': bytes(control, encoding='latin-1')
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Estatisticas Facebook <noreply@danieldourado.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Estatisticas Facebook]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
import posixpath
import numpy as np
import pandas
import tables
import warnings
from pyiron_base import GenericParameters, Settings
from pyiron.atomistics.job.potentials import PotentialAbstract, find_potential_file_base
__author__ = "Jan Janssen"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "development"
__date__ = "Sep 1, 2017"
s = Settings()
class VaspPotentialAbstract(PotentialAbstract):
"""
Args:
potential_df:
default_df:
selected_atoms:
"""
def __init__(self, potential_df=None, default_df=None, selected_atoms=None):
if potential_df is None:
potential_df = self._get_potential_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp.csv"},
backward_compatibility_name="vasppotentials",
)
super(VaspPotentialAbstract, self).__init__(
potential_df=potential_df,
default_df=default_df,
selected_atoms=selected_atoms,
)
def default(self):
if self._default_df is not None:
return pandas.concat(
[
self._potential_df[
(
self._potential_df["Name"]
== self._default_df.loc[atom].values[0]
)
]
for atom in self._selected_atoms
]
)
return None
def find_default(self, element):
if isinstance(element, set):
element = element
elif isinstance(element, list):
element = set(element)
elif isinstance(element, str):
element = set([element])
else:
raise TypeError("Only, str, list and set supported!")
element_lst = list(element)
if self._default_df is not None:
merged_lst = list(set(self._selected_atoms + element_lst))
return pandas.concat(
[
self._potential_df[
(
self._potential_df["Name"]
== self._default_df.loc[atom].values[0]
)
]
for atom in merged_lst
]
)
return None
def find(self, element):
if isinstance(element, set):
element = element
elif isinstance(element, list):
element = set(element)
elif isinstance(element, str):
element = set([element])
else:
raise TypeError("Only, str, list and set supported!")
element_lst = list(element)
merged_lst = list(set(self._selected_atoms + element_lst))
return pandas.concat(
[super(VaspPotentialAbstract, self).find({atom}) for atom in merged_lst]
)
def list(self):
if len(self._selected_atoms) != 0:
return pandas.concat(
[
super(VaspPotentialAbstract, self).find({atom})
for atom in self._selected_atoms
]
)
else:
return pandas.DataFrame({})
def list_potential_names(self):
df = self.list()
if len(df) != 0:
return list(self.list()["Name"])
else:
return []
@staticmethod
def _return_potential_file(file_name):
for resource_path in s.resource_paths:
resource_path_potcar = os.path.join(
resource_path, "vasp", "potentials", file_name
)
if os.path.exists(resource_path_potcar):
return resource_path_potcar
return None
def __dir__(self):
return [val.replace("-", "_") for val in self.list_potential_names()]
def __getitem__(self, item):
item_replace = item.replace("_gga_pbe", "-gga-pbe").replace("_lda", "-lda")
if item_replace in self.list_potential_names():
df = self.list()
return self._return_potential_file(
file_name=list(df[df["Name"] == item_replace]["Filename"])[0][0]
)
selected_atoms = self._selected_atoms + [item]
return VaspPotentialAbstract(
potential_df=self._potential_df,
default_df=self._default_df,
selected_atoms=selected_atoms,
)
class VaspPotentialFile(VaspPotentialAbstract):
"""
The Potential class is derived from the PotentialAbstract class, but instead of loading the potentials from a list,
the potentials are loaded from a file.
Args:
xc (str): Exchange correlation functional ['PBE', 'LDA']
"""
def __init__(self, xc=None, selected_atoms=None):
potential_df = self._get_potential_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp.csv"},
backward_compatibility_name="vasppotentials",
)
if xc == "PBE":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_pbe_default.csv"},
backward_compatibility_name="defaultvasppbe",
)
potential_df = potential_df[(potential_df["Model"] == "gga-pbe")]
elif xc == "GGA":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_pbe_default.csv"},
backward_compatibility_name="defaultvasppbe",
)
potential_df = potential_df[(potential_df["Model"] == "gga-pbe")]
elif xc == "LDA":
default_df = self._get_potential_default_df(
plugin_name="vasp",
file_name_lst={"potentials_vasp_lda_default.csv"},
backward_compatibility_name="defaultvasplda",
)
potential_df = potential_df[(potential_df["Model"] == "lda")]
else:
raise ValueError(
'The exchange correlation functional has to be set and it can either be "LDA" or "PBE"'
)
super(VaspPotentialFile, self).__init__(
potential_df=potential_df,
default_df=default_df,
selected_atoms=selected_atoms,
)
def add_new_element(self, parent_element, new_element):
"""
Adding a new user defined element with a different POTCAR file. It is assumed that the file exists
Args:
parent_element (str): Parent element
new_element (str): Name of the new element (the name of the folder where the new POTCAR file exists
"""
ds = self.find_default(element=parent_element)
ds["Species"].values[0][0] = new_element
path_list = ds["Filename"].values[0][0].split("/")
path_list[-2] = new_element
name_list = ds["Name"].values[0].split("-")
name_list[0] = new_element
ds["Name"].values[0] = "-".join(name_list)
ds["Filename"].values[0][0] = "/".join(path_list)
self._potential_df = self._potential_df.append(ds)
if new_element not in self._default_df.index.values:
ds = pandas.Series()
ds.name = new_element
ds["Name"] = "-".join(name_list)
self._default_df = self._default_df.append(ds)
else:
self._default_df.loc[new_element] = "-".join(name_list)
class VaspPotential(object):
"""
The Potential class is derived from the PotentialAbstract class, but instead of loading the potentials from a list,
the potentials are loaded from a file.
Args:
path (str): path to the potential list
"""
def __init__(self, selected_atoms=None):
self.pbe = VaspPotentialFile(xc="PBE", selected_atoms=selected_atoms)
self.lda = VaspPotentialFile(xc="LDA", selected_atoms=selected_atoms)
class VaspPotentialSetter(object):
def __init__(self, element_lst):
super(VaspPotentialSetter, self).__setattr__("_element_lst", element_lst)
super(VaspPotentialSetter, self).__setattr__(
"_potential_dict", {el: None for el in element_lst}
)
def __getattr__(self, item):
if item in self._element_lst:
return item
else:
raise AttributeError
def __setitem__(self, key, value):
self.__setattr__(key=key, value=value)
def __setattr__(self, key, value):
if key in self._element_lst:
self._potential_dict[key] = value
else:
raise AttributeError
def to_dict(self):
return self._potential_dict
def __repr__(self):
return self._potential_dict.__repr__()
def find_potential_file(path):
return find_potential_file_base(
path=path,
resource_path_lst=s.resource_paths,
rel_path=os.path.join("vasp", "potentials")
)
def get_enmax_among_species(symbol_lst, return_list=False, xc="PBE"):
"""
DEPRECATED: Please use `get_enmax_among_potentials`.
Given a list of species symbols, finds the largest applicable encut.
Args:
symbol_lst (list): The list of species symbols.
return_list (bool): Whether to return the list of all ENMAX values (in the same order as `species_lst` along with
the largest value). (Default is False.)
xc ("GGA"/"PBE"/"LDA"): The exchange correlation functional for which the POTCARs were generated. (Default is "PBE".)
Returns:
(float): The largest ENMAX among the POTCAR files for all the species.
[optional](list): The ENMAX value corresponding to each species.
"""
warnings.warn(("get_enmax_among_species is deprecated as of v0.3.0. Please use get_enmax_among_potentials and note "
+ "the adjustment to the signature (*args instead of list)"), DeprecationWarning)
return get_enmax_among_potentials(*symbol_lst, return_list=return_list, xc=xc)
def get_enmax_among_potentials(*names, return_list=False, xc="PBE"):
"""
Given potential names without XC information or elemental symbols, look over all the corresponding POTCAR files and
find the largest ENMAX value.
e.g. `get_enmax_among_potentials('Mg', 'Al_GW', 'Ca_pv', 'Ca_sv', xc='LDA')`
Args:
*names (str): Names of potentials or elemental symbols
return_list (bool): Whether to return the list of all ENMAX values (in the same order as `names` as a second
return value after providing the largest value). (Default is False.)
xc ("GGA"/"PBE"/"LDA"): The exchange correlation functional for which the POTCARs were generated.
(Default is "PBE".)
Returns:
(float): The largest ENMAX among the POTCAR files for all the requested names.
[optional](list): The ENMAX value corresponding to each species.
"""
def _get_just_element_from_name(name):
return name.split('_')[0]
def _get_index_of_exact_match(name, potential_names):
try:
return np.argwhere([name == strip_xc_from_potential_name(pn) for pn in potential_names])[0, 0]
except IndexError:
raise ValueError("Couldn't find {} among potential names for {}".format(name,
_get_just_element_from_name(name)))
def _get_potcar_filename(name, exch_corr):
potcar_table = VaspPotentialFile(xc=exch_corr).find(_get_just_element_from_name(name))
return potcar_table['Filename'].values[
_get_index_of_exact_match(name, potcar_table['Name'].values)
][0]
enmax_lst = []
for n in names:
with open(find_potential_file(path=_get_potcar_filename(n, xc))) as pf:
for i, line in enumerate(pf):
if i == 14:
encut_str = line.split()[2][:-1]
enmax_lst.append(float(encut_str))
break
if return_list:
return max(enmax_lst), enmax_lst
else:
return max(enmax_lst)
def strip_xc_from_potential_name(name):
return name.split('-')[0]
class Potcar(GenericParameters):
pot_path_dict = {"GGA": "paw-gga-pbe", "PBE": "paw-gga-pbe", "LDA": "paw-lda"}
def __init__(self, input_file_name=None, table_name="potcar"):
GenericParameters.__init__(
self,
input_file_name=input_file_name,
table_name=table_name,
val_only=False,
comment_char="#",
)
self._structure = None
self.electrons_per_atom_lst = list()
self.max_cutoff_lst = list()
self.el_path_lst = list()
self.el_path_dict = dict()
self.modified_elements = dict()
def potcar_set_structure(self, structure, modified_elements):
self._structure = structure
self._set_default_path_dict()
self._set_potential_paths()
self.modified_elements = modified_elements
def modify(self, **modify):
if "xc" in modify:
xc_type = modify["xc"]
self._set_default_path_dict()
if xc_type not in self.pot_path_dict:
raise ValueError("xc type not implemented: " + xc_type)
GenericParameters.modify(self, **modify)
if self._structure is not None:
self._set_potential_paths()
def _set_default_path_dict(self):
if self._structure is None:
return
vasp_potentials = VaspPotentialFile(xc=self.get("xc"))
for i, el_obj in enumerate(self._structure.get_species_objects()):
if isinstance(el_obj.Parent, str):
el = el_obj.Parent
else:
el = el_obj.Abbreviation
if isinstance(el_obj.tags, dict):
if "pseudo_potcar_file" in el_obj.tags.keys():
new_element = el_obj.tags["pseudo_potcar_file"]
vasp_potentials.add_new_element(
parent_element=el, new_element=new_element
)
key = vasp_potentials.find_default(el).Species.values[0][0]
val = vasp_potentials.find_default(el).Name.values[0]
self[key] = val
def _set_potential_paths(self):
element_list = (
self._structure.get_species_symbols()
) # .ElementList.getSpecies()
object_list = self._structure.get_species_objects()
s.logger.debug("element list: {0}".format(element_list))
self.el_path_lst = list()
try:
xc = self.get("xc")
except tables.exceptions.NoSuchNodeError:
xc = self.get("xc")
s.logger.debug("XC: {0}".format(xc))
vasp_potentials = VaspPotentialFile(xc=xc)
for i, el_obj in enumerate(object_list):
if isinstance(el_obj.Parent, str):
el = el_obj.Parent
else:
el = el_obj.Abbreviation
if (
isinstance(el_obj.tags, dict)
and "pseudo_potcar_file" in el_obj.tags.keys()
):
new_element = el_obj.tags["pseudo_potcar_file"]
vasp_potentials.add_new_element(
parent_element=el, new_element=new_element
)
el_path = find_potential_file(
path=vasp_potentials.find_default(new_element)["Filename"].values[
0
][0]
)
if not (os.path.isfile(el_path)):
raise ValueError("such a file does not exist in the pp directory")
elif el in self.modified_elements.keys():
new_element = self.modified_elements[el]
if os.path.isabs(new_element):
el_path = new_element
else:
vasp_potentials.add_new_element(
parent_element=el, new_element=new_element
)
el_path = find_potential_file(
path=vasp_potentials.find_default(new_element)["Filename"].values[
0
][0]
)
else:
el_path = find_potential_file(
path=vasp_potentials.find_default(el)["Filename"].values[0][0]
)
if not (os.path.isfile(el_path)):
raise AssertionError()
pot_name = "pot_" + str(i)
if pot_name in self._dataset["Parameter"]:
try:
ind = self._dataset["Parameter"].index(pot_name)
except (ValueError, IndexError):
indices = np.core.defchararray.find(
self._dataset["Parameter"], pot_name
)
ind = np.where(indices == 0)[0][0]
self._dataset["Value"][ind] = el_path
self._dataset["Comment"][ind] = ""
else:
self._dataset["Parameter"].append("pot_" + str(i))
self._dataset["Value"].append(el_path)
self._dataset["Comment"].append("")
self.el_path_lst.append(el_path)
def write_file(self, file_name, cwd=None):
"""
Args:
file_name:
cwd:
Returns:
"""
self.electrons_per_atom_lst = list()
self.max_cutoff_lst = list()
self._set_potential_paths()
if cwd is not None:
file_name = posixpath.join(cwd, file_name)
f = open(file_name, "w")
for el_file in self.el_path_lst:
with open(el_file) as pot_file:
for i, line in enumerate(pot_file):
f.write(line)
if i == 1:
self.electrons_per_atom_lst.append(int(float(line)))
elif i == 14:
mystr = line.split()[2][:-1]
self.max_cutoff_lst.append(float(mystr))
f.close()
def load_default(self):
file_content = """\
xc GGA # LDA, GGA
"""
self.load_string(file_content)
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Event(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'action': 'str',
'api_version': 'str',
'deprecated_count': 'int',
'deprecated_first_timestamp': 'datetime',
'deprecated_last_timestamp': 'datetime',
'deprecated_source': 'V1EventSource',
'event_time': 'datetime',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'note': 'str',
'reason': 'str',
'regarding': 'V1ObjectReference',
'related': 'V1ObjectReference',
'reporting_controller': 'str',
'reporting_instance': 'str',
'series': 'V1beta1EventSeries',
'type': 'str'
}
attribute_map = {
'action': 'action',
'api_version': 'apiVersion',
'deprecated_count': 'deprecatedCount',
'deprecated_first_timestamp': 'deprecatedFirstTimestamp',
'deprecated_last_timestamp': 'deprecatedLastTimestamp',
'deprecated_source': 'deprecatedSource',
'event_time': 'eventTime',
'kind': 'kind',
'metadata': 'metadata',
'note': 'note',
'reason': 'reason',
'regarding': 'regarding',
'related': 'related',
'reporting_controller': 'reportingController',
'reporting_instance': 'reportingInstance',
'series': 'series',
'type': 'type'
}
def __init__(self, action=None, api_version=None, deprecated_count=None, deprecated_first_timestamp=None, deprecated_last_timestamp=None, deprecated_source=None, event_time=None, kind=None, metadata=None, note=None, reason=None, regarding=None, related=None, reporting_controller=None, reporting_instance=None, series=None, type=None):
"""
V1beta1Event - a model defined in Swagger
"""
self._action = None
self._api_version = None
self._deprecated_count = None
self._deprecated_first_timestamp = None
self._deprecated_last_timestamp = None
self._deprecated_source = None
self._event_time = None
self._kind = None
self._metadata = None
self._note = None
self._reason = None
self._regarding = None
self._related = None
self._reporting_controller = None
self._reporting_instance = None
self._series = None
self._type = None
self.discriminator = None
if action is not None:
self.action = action
if api_version is not None:
self.api_version = api_version
if deprecated_count is not None:
self.deprecated_count = deprecated_count
if deprecated_first_timestamp is not None:
self.deprecated_first_timestamp = deprecated_first_timestamp
if deprecated_last_timestamp is not None:
self.deprecated_last_timestamp = deprecated_last_timestamp
if deprecated_source is not None:
self.deprecated_source = deprecated_source
self.event_time = event_time
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if note is not None:
self.note = note
if reason is not None:
self.reason = reason
if regarding is not None:
self.regarding = regarding
if related is not None:
self.related = related
if reporting_controller is not None:
self.reporting_controller = reporting_controller
if reporting_instance is not None:
self.reporting_instance = reporting_instance
if series is not None:
self.series = series
if type is not None:
self.type = type
@property
def action(self):
"""
Gets the action of this V1beta1Event.
What action was taken/failed regarding to the regarding object.
:return: The action of this V1beta1Event.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this V1beta1Event.
What action was taken/failed regarding to the regarding object.
:param action: The action of this V1beta1Event.
:type: str
"""
self._action = action
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Event.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Event.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Event.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Event.
:type: str
"""
self._api_version = api_version
@property
def deprecated_count(self):
"""
Gets the deprecated_count of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_count of this V1beta1Event.
:rtype: int
"""
return self._deprecated_count
@deprecated_count.setter
def deprecated_count(self, deprecated_count):
"""
Sets the deprecated_count of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_count: The deprecated_count of this V1beta1Event.
:type: int
"""
self._deprecated_count = deprecated_count
@property
def deprecated_first_timestamp(self):
"""
Gets the deprecated_first_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_first_timestamp of this V1beta1Event.
:rtype: datetime
"""
return self._deprecated_first_timestamp
@deprecated_first_timestamp.setter
def deprecated_first_timestamp(self, deprecated_first_timestamp):
"""
Sets the deprecated_first_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_first_timestamp: The deprecated_first_timestamp of this V1beta1Event.
:type: datetime
"""
self._deprecated_first_timestamp = deprecated_first_timestamp
@property
def deprecated_last_timestamp(self):
"""
Gets the deprecated_last_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_last_timestamp of this V1beta1Event.
:rtype: datetime
"""
return self._deprecated_last_timestamp
@deprecated_last_timestamp.setter
def deprecated_last_timestamp(self, deprecated_last_timestamp):
"""
Sets the deprecated_last_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_last_timestamp: The deprecated_last_timestamp of this V1beta1Event.
:type: datetime
"""
self._deprecated_last_timestamp = deprecated_last_timestamp
@property
def deprecated_source(self):
"""
Gets the deprecated_source of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_source of this V1beta1Event.
:rtype: V1EventSource
"""
return self._deprecated_source
@deprecated_source.setter
def deprecated_source(self, deprecated_source):
"""
Sets the deprecated_source of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_source: The deprecated_source of this V1beta1Event.
:type: V1EventSource
"""
self._deprecated_source = deprecated_source
@property
def event_time(self):
"""
Gets the event_time of this V1beta1Event.
Required. Time when this Event was first observed.
:return: The event_time of this V1beta1Event.
:rtype: datetime
"""
return self._event_time
@event_time.setter
def event_time(self, event_time):
"""
Sets the event_time of this V1beta1Event.
Required. Time when this Event was first observed.
:param event_time: The event_time of this V1beta1Event.
:type: datetime
"""
if event_time is None:
raise ValueError("Invalid value for `event_time`, must not be `None`")
self._event_time = event_time
@property
def kind(self):
"""
Gets the kind of this V1beta1Event.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Event.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Event.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Event.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Event.
:return: The metadata of this V1beta1Event.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Event.
:param metadata: The metadata of this V1beta1Event.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def note(self):
"""
Gets the note of this V1beta1Event.
Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:return: The note of this V1beta1Event.
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""
Sets the note of this V1beta1Event.
Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:param note: The note of this V1beta1Event.
:type: str
"""
self._note = note
@property
def reason(self):
"""
Gets the reason of this V1beta1Event.
Why the action was taken.
:return: The reason of this V1beta1Event.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1beta1Event.
Why the action was taken.
:param reason: The reason of this V1beta1Event.
:type: str
"""
self._reason = reason
@property
def regarding(self):
"""
Gets the regarding of this V1beta1Event.
The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:return: The regarding of this V1beta1Event.
:rtype: V1ObjectReference
"""
return self._regarding
@regarding.setter
def regarding(self, regarding):
"""
Sets the regarding of this V1beta1Event.
The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:param regarding: The regarding of this V1beta1Event.
:type: V1ObjectReference
"""
self._regarding = regarding
@property
def related(self):
"""
Gets the related of this V1beta1Event.
Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:return: The related of this V1beta1Event.
:rtype: V1ObjectReference
"""
return self._related
@related.setter
def related(self, related):
"""
Sets the related of this V1beta1Event.
Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:param related: The related of this V1beta1Event.
:type: V1ObjectReference
"""
self._related = related
@property
def reporting_controller(self):
"""
Gets the reporting_controller of this V1beta1Event.
Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
:return: The reporting_controller of this V1beta1Event.
:rtype: str
"""
return self._reporting_controller
@reporting_controller.setter
def reporting_controller(self, reporting_controller):
"""
Sets the reporting_controller of this V1beta1Event.
Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
:param reporting_controller: The reporting_controller of this V1beta1Event.
:type: str
"""
self._reporting_controller = reporting_controller
@property
def reporting_instance(self):
"""
Gets the reporting_instance of this V1beta1Event.
ID of the controller instance, e.g. `kubelet-xyzf`.
:return: The reporting_instance of this V1beta1Event.
:rtype: str
"""
return self._reporting_instance
@reporting_instance.setter
def reporting_instance(self, reporting_instance):
"""
Sets the reporting_instance of this V1beta1Event.
ID of the controller instance, e.g. `kubelet-xyzf`.
:param reporting_instance: The reporting_instance of this V1beta1Event.
:type: str
"""
self._reporting_instance = reporting_instance
@property
def series(self):
"""
Gets the series of this V1beta1Event.
Data about the Event series this event represents or nil if it's a singleton Event.
:return: The series of this V1beta1Event.
:rtype: V1beta1EventSeries
"""
return self._series
@series.setter
def series(self, series):
"""
Sets the series of this V1beta1Event.
Data about the Event series this event represents or nil if it's a singleton Event.
:param series: The series of this V1beta1Event.
:type: V1beta1EventSeries
"""
self._series = series
@property
def type(self):
"""
Gets the type of this V1beta1Event.
Type of this event (Normal, Warning), new types could be added in the future.
:return: The type of this V1beta1Event.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1beta1Event.
Type of this event (Normal, Warning), new types could be added in the future.
:param type: The type of this V1beta1Event.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1Event):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
#task 1
nyaam = float (input('enter a length in cm: '))
if nyaam < 0:
print ('entry is invalid')
else:
res = nyaam / 2.54
print (res, 'inch')
#task 2
whoosh = int (input ('how many credits have you taken? '))
if whoosh > 0 and whoosh < 24:
print ('congrats, you a freshman!')
elif whoosh > 23 and whoosh < 54:
print ('congrats, you a sophomore!')
elif whoosh > 53 and whoosh < 84:
print ('congrats, you a junior!')
elif whoosh > 83:
print ('congrats, you a senior!')
elif whoosh <= 0:
print ('you haven\'t any credits, fool')
#task3
from random import randrange
jeffry = randrange(10)
goat = float (input ('guess the number between 0 n 10: '))
if goat == jeffry:
print ('you\'re right!')
else:
print ('that\'s not it, pal')
print (jeffry)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mp.models.segmentation.unet_fepegar import UNet2D
### UNet Wrapper ###
class UNet2D_dis(UNet2D):
r"""Wrapper for UNet2D to access encoder and decoder seperately.
"""
def __init__(self, *args, **kwargs):
super(UNet2D_dis, self).__init__(*args, **kwargs)
def forward_enc(self, x):
skip_connections, encoding = self.encoder(x)
encoding = self.bottom_block(encoding)
return skip_connections, encoding
def forward_dec(self, skip_connections, encoding):
x = self.decoder(skip_connections, encoding)
if self.monte_carlo_layer is not None:
x = self.monte_carlo_layer(x)
return self.classifier(x)
### MODULES ###
class EncoderStyle(nn.Module):
r"""Style Encoder (VAE).
"""
def __init__(self, in_channels):
super(EncoderStyle, self).__init__()
layers = []
layers += [ConvBlock(in_channels=in_channels, out_channels=256)]
layers += [ConvPoolBlock(in_channels=256, out_channels=64, pooling=False)]
layers += [ConvPoolBlock(in_channels=64, out_channels=128, pooling=True)]
layers += [ConvPoolBlock(in_channels=128, out_channels=128, pooling=False)]
layers += [ConvPoolBlock(in_channels=128, out_channels=192, pooling=True)]
layers += [ConvPoolBlock(in_channels=192, out_channels=192, pooling=False)]
layers += [ConvPoolBlock(in_channels=192, out_channels=256, pooling=True)]
global_pool = [nn.LeakyReLU(), nn.AdaptiveMaxPool2d(output_size=(3,3))]
self.global_pool = nn.Sequential(*global_pool)
self.layers = nn.Sequential(*layers)
self.dense_mu = nn.Linear(in_features=3*3*256, out_features=1)
self.dense_var = nn.Linear(in_features=3*3*256, out_features=1)
def forward(self, x):
x = self.layers(x)
x = self.global_pool(x)
mu = self.dense_mu(x.view(x.shape[0], -1))
log_var = self.dense_var(x.view(x.shape[0], -1))
return [mu, log_var]
class LatentScaler(nn.Module):
r"""Scales samples from style encoding to be injected into the generator.
"""
def __init__(self, in_features):
super(LatentScaler, self).__init__()
layers = [nn.Linear(in_features=in_features, out_features=500), nn.LeakyReLU()]
layers += [nn.Linear(in_features=500, out_features=1024), nn.LeakyReLU()]
for _ in range(0, 2):
layers += [nn.Linear(in_features=1024, out_features=1024), nn.LeakyReLU()]
layers += [nn.Linear(in_features=1024, out_features=2560), nn.Tanh()]
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x).reshape(x.shape[0],10,-1) # 10 occurences a 256 filters
return x
class Generator(nn.Module):
r"""Generator using content encoding, scaled style encoding (see LatentScaler) and domain_code to generate images.
"""
def __init__(self, in_channels, out_channels, domain_code_size):
super(Generator, self).__init__()
layers_BCIN = [ResBlockBCIN(in_channels=in_channels, out_channels=in_channels, layer_id=0, stride=1, padding=1, domain_code_size=domain_code_size)]
for i in range(0,4):
layers_BCIN += [ResBlockBCIN(in_channels=in_channels, out_channels=in_channels, layer_id=i+1, stride=1, padding=1, domain_code_size=domain_code_size)]
layers = [nn.ConvTranspose2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]
layers += [nn.ConvTranspose2d(in_channels=in_channels, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]
layers += [nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]
layers += [nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU()]
layers += [nn.ConvTranspose2d(in_channels=64, out_channels=out_channels, kernel_size=7, stride=1, padding=3), nn.Sigmoid()]
self.layers_BCIN = MultiInSequential(*layers_BCIN)
self.layers = nn.Sequential(*layers)
def forward(self, content, latent_scale, domain_code):
content, latent_scale, domain_code = self.layers_BCIN(content, latent_scale, domain_code)
x = self.layers(content)
return x
class DiscriminatorDomain(nn.Module):
r"""Domain Discriminator.
"""
def __init__(self, in_channels, domain_code_size, max_channels=512, kernel_size=4, stride=2):
super(DiscriminatorDomain, self).__init__()
layers = [ConvBlockBCIN(in_channels=in_channels, out_channels=64, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]
layers += [ConvBlockBCIN(in_channels=64, out_channels=128, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]
layers += [ConvBlockBCIN(in_channels=128, out_channels=max_channels//2, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]
layers += [ConvBlockBCIN(in_channels=max_channels//2, out_channels=max_channels, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size)]
layers += [ConvBlockBCIN(in_channels=max_channels, out_channels=1, kernel_size=kernel_size, stride=stride, domain_code_size=domain_code_size, normalization='None')]
self.layers = MultiInSequential(*layers)
self.linear = nn.Linear(in_features=7**2, out_features=1)
self.activation = nn.Sigmoid()
def forward(self, x, domain_code):
x, domain_code = self.layers(x, domain_code)
x = x.view(x.shape[0],-1)
x = self.linear(x)
return x
class DiscriminatorContent(nn.Module):
r"""Unet-style Content Discriminator.
"""
def __init__(self, in_channels, domain_code_size, max_channels=512, kernel_size=3, stride=2):
super(DiscriminatorContent, self).__init__()
self.in_channels = 16
self.in_channels_max = 128
self.out_channels = 32
self.out_channels_max = 256
padding = 1
self.conv_0 = nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels*2**1, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm_0 = nn.BatchNorm2d(self.in_channels*2**1)
self.activation_0 = nn.ReLU()
self.conv_1 = nn.Conv2d(in_channels=self.in_channels*2**1, out_channels=self.in_channels*2**2, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm_1 = nn.BatchNorm2d(self.in_channels*2**2)
self.activation_1 = nn.ReLU()
self.conv_2 = nn.Conv2d(in_channels=self.in_channels*2**2, out_channels=self.in_channels*2**3, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm_2 = nn.BatchNorm2d(self.in_channels*2**3)
self.activation_2 = nn.ReLU()
self.conv_3 = nn.Conv2d(in_channels=self.in_channels*2**3, out_channels=self.in_channels*2**4, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm_3 = nn.BatchNorm2d(self.in_channels*2**4)
self.activation_3 = nn.ReLU()
self.conv_4 = nn.Conv2d(in_channels=self.in_channels*2**4, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm_4 = nn.BatchNorm2d(1)
self.activation_4 = nn.ReLU()
self.dense = nn.Linear(in_features = 8**2, out_features=domain_code_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, skip_connections, content_x):
out = self.conv_0(skip_connections[0])
out = self.norm_0(out)
out = self.activation_0(out)
out = self.conv_1(skip_connections[1] + out)
out = self.norm_1(out)
out = self.activation_1(out)
out = self.conv_2(skip_connections[2] + out)
out = self.norm_2(out)
out = self.activation_2(out)
out = self.conv_3(skip_connections[3] + out)
out = self.norm_3(out)
out = self.activation_3(out)
out = self.conv_4(content_x + out)
out = self.norm_4(out)
out = self.activation_4(out)
out = self.dense(out.reshape(content_x.shape[0], -1))
out = self.softmax(out)
return out
def center_crop(self, skip_connection, x):
skip_shape = torch.tensor(skip_connection.shape)
x_shape = torch.tensor(x.shape)
crop = skip_shape[2:] - x_shape[2:]
half_crop = crop // 2
# If skip_connection is 10, 20, 30 and x is (6, 14, 12)
# Then pad will be (-2, -2, -3, -3, -9, -9)
pad = -torch.stack((half_crop, half_crop)).t().flatten()
skip_connection = F.pad(skip_connection, pad.tolist())
return skip_connection
### BUILDING BLOCKS ###
class ConvBlock(nn.Module):
r"""Convolutional Block with normalization and activation.
"""
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.LeakyReLU, normalization='Instance'):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.normalization = normalization
if self.normalization == 'Instance':
self.norm = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable
if self.normalization =='BatchNorm':
self.norm = nn.BatchNorm2d(num_features=out_channels)
self.activation = activation()
def forward(self,x):
x = self.conv(x)
if self.normalization in ['Instance', 'BatchNorm']:
x = self.norm(x)
x = self.activation(x)
return x
class ConvPoolBlock(nn.Module):
r"""Convolutional Block with normalization, activation and pooling.
"""
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, pooling=True, activation=nn.LeakyReLU):
super(ConvPoolBlock, self).__init__()
self.pooling = pooling
self.norm= nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable
self.activation = activation()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.pool = nn.AvgPool2d(kernel_size=kernel_size)
def forward(self, x):
x = self.norm(x)
x = self.activation(x)
x = self.conv(x)
if self.pooling:
x = self.pool(x)
return x
class ConvBlockBCIN(nn.Module):
r"""Convolutional Block with BCIN normalization and activation.
"""
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.LeakyReLU, domain_code_size=10, normalization='BCIN'):
super(ConvBlockBCIN, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = BCIN(out_channels, domain_code_size) # not learnable
self.activation = activation()
self.normalization = normalization
def forward(self, x, domain_code):
x = self.conv(x)
if self.normalization == 'BCIN':
x = self.norm(x, domain_code)
x = self.activation(x)
return x, domain_code
class ResBlockIN(nn.Module):
r"""Residual Block consisting of two convolutions with skip connection, instance normalization and activation.
"""
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.ReLU):
super(ResBlockIN, self).__init__()
self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv1 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm0 = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable
self.norm1 = nn.InstanceNorm2d(num_features=out_channels, affine=False) # not learnable
self.activation = activation()
def forward(self, x):
x_in = x
x = self.conv0(x)
x = self.norm0(x)
x = self.activation(x)
x = self.conv1(x)
x = self.norm1(x)
x += self.center_crop(x_in, x)
return x
def center_crop(self, skip_connection, x):
skip_shape = torch.tensor(skip_connection.shape)
x_shape = torch.tensor(x.shape)
crop = skip_shape[2:] - x_shape[2:]
half_crop = crop // 2
# If skip_connection is 10, 20, 30 and x is (6, 14, 12)
# Then pad will be (-2, -2, -3, -3, -9, -9)
pad = -torch.stack((half_crop, half_crop)).t().flatten()
skip_connection = F.pad(skip_connection, pad.tolist())
return skip_connection
class ResBlockBCIN(nn.Module):
r"""Residual Block consisting of two convolutions with skip connection, BCIN normalization and activation.
"""
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=0, activation=nn.ReLU, domain_code_size=10, layer_id=0):
super(ResBlockBCIN, self).__init__()
self.conv0 = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.conv1 = nn.ConvTranspose2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm0 = BCIN(num_features=out_channels, domain_code_size=domain_code_size, affine=True) # learnable
self.norm1 = BCIN(num_features=out_channels, domain_code_size=domain_code_size, affine=True) # learnable
self.activation = activation()
self.layer_id = layer_id
def forward(self, x, latent_scale, domain_code):
x_in = x
x = self.conv0(x)
x = torch.mul(x, latent_scale[:,self.layer_id*2,:][:,:,None,None])
x = self.norm0(x, domain_code)
x = self.activation(x)
x = self.conv1(x)
x = torch.mul(x, latent_scale[:,self.layer_id*2+1,:][:,:,None,None])
x = self.norm1(x, domain_code)
x += self.center_crop(x_in, x)
return x, latent_scale, domain_code
def center_crop(self, skip_connection, x):
skip_shape = torch.tensor(skip_connection.shape)
x_shape = torch.tensor(x.shape)
crop = skip_shape[2:] - x_shape[2:]
half_crop = crop // 2
# If skip_connection is 10, 20, 30 and x is (6, 14, 12)
# Then pad will be (-2, -2, -3, -3, -9, -9)
pad = -torch.stack((half_crop, half_crop)).t().flatten()
skip_connection = F.pad(skip_connection, pad.tolist())
return skip_connection
### NORMALIZATION ###
class BCIN(nn.Module):
r"""Central Biasing Instance Normalization
https://arxiv.org/abs/1806.10050
"""
def __init__(self, num_features, domain_code_size, affine=True, instance_norm=False, batch_norm=False):
super(BCIN, self).__init__()
self.W = nn.Parameter(torch.rand(domain_code_size), requires_grad=affine)
self.b = nn.Parameter(torch.rand(1), requires_grad=affine)
self.activation = nn.Tanh()
self.instance_norm = instance_norm
if self.instance_norm:
print('Using instance_norm instead of BCIN')
self.i_norm = torch.nn.InstanceNorm2d(num_features=num_features)
self.batch_norm = batch_norm
if self.instance_norm:
print('Using batch_norm instead of BCIN')
self.b_norm = torch.nn.BatchNorm2d(num_features=num_features)
def forward(self, x, domain_code):
x_var = torch.sqrt(torch.var(x, (1,2,3))) # instance std
x_mean = torch.mean(x, (1,2,3)) # instance mean
bias = torch.matmul(domain_code, self.W) * self.b
bias_scaled = self.activation(bias)
if self.instance_norm:
return self.i_norm(x)
if self.batch_norm:
return self.b_norm(x)
return ((x-x_mean[:,None,None,None]) / x_var[:,None,None,None]) + bias_scaled[:,None,None,None]
### HELPER MODULES ###
class MultiInSequential(nn.Sequential):
r"""Sequential class that allows multiple inputs for forward function
"""
def forward(self, *input):
for module in self._modules.values():
input = module(*input)
return input
|
"""Tornado handlers for nbgrader assignment list web service."""
import os
import json
import contextlib
import traceback
from tornado import web
from notebook.utils import url_path_join as ujoin
from nbgrader.exchange import ExchangeFactory
from nbgrader.coursedir import CourseDirectory
from nbgrader.auth import Authenticator
from nbgrader.server_extensions.assignment_list.handlers import (
AssignmentList,
default_handlers,
BaseAssignmentHandler,
)
static = os.path.join(os.path.dirname(__file__), "static")
@contextlib.contextmanager
def chdir(dirname):
currdir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(currdir)
class E2xAssignmentList(AssignmentList):
def submit_assignment(self, course_id, assignment_id):
with self.get_assignment_dir_config() as config:
try:
config = self.load_config()
config.CourseDirectory.course_id = course_id
config.CourseDirectory.assignment_id = assignment_id
coursedir = CourseDirectory(config=config)
authenticator = Authenticator(config=config)
submit = ExchangeFactory(config=config).Submit(
coursedir=coursedir, authenticator=authenticator, config=config
)
retval = submit.start()
hashcode = "Exchange not set up for hashcode"
timestamp = "Exchange not set up for timestamp"
if retval and len(retval) == 2:
hashcode, timestamp = retval
except Exception:
self.log.error(traceback.format_exc())
retvalue = {"success": False, "value": traceback.format_exc()}
else:
retvalue = {
"success": True,
"hashcode": hashcode,
"timestamp": timestamp,
}
self.log.info(retvalue)
return retvalue
class AssignmentActionHandler(BaseAssignmentHandler):
@web.authenticated
def post(self, action):
if action == "fetch":
assignment_id = self.get_argument("assignment_id")
course_id = self.get_argument("course_id")
self.manager.fetch_assignment(course_id, assignment_id)
self.finish(json.dumps(self.manager.list_assignments(course_id=course_id)))
elif action == "submit":
assignment_id = self.get_argument("assignment_id")
course_id = self.get_argument("course_id")
output = self.manager.submit_assignment(course_id, assignment_id)
if output["success"]:
response = self.manager.list_assignments(course_id=course_id)
response["hashcode"] = output["hashcode"]
response["timestamp"] = output["timestamp"]
self.finish(json.dumps(response))
else:
self.finish(json.dumps(output))
elif action == "fetch_feedback":
assignment_id = self.get_argument("assignment_id")
course_id = self.get_argument("course_id")
self.manager.fetch_feedback(course_id, assignment_id)
self.finish(json.dumps(self.manager.list_assignments(course_id=course_id)))
# -----------------------------------------------------------------------------
# URL to handler mappings
# -----------------------------------------------------------------------------
_assignment_action_regex = r"(?P<action>fetch|submit|fetch_feedback)"
e2x_default_handlers = [
(r"/assignments/%s" % _assignment_action_regex, AssignmentActionHandler),
]
def load_jupyter_server_extension(nbapp):
"""Load the nbserver"""
nbapp.log.info("Loading the assignment_list e2xgrader serverextension")
webapp = nbapp.web_app
webapp.settings["assignment_list_manager"] = E2xAssignmentList(parent=nbapp)
base_url = webapp.settings["base_url"]
webapp.add_handlers(
".*$",
[
(ujoin(base_url, pat), handler)
for pat, handler in e2x_default_handlers + default_handlers
],
)
|
# Copyright 2016 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_xenapi.client.i18n import _
class OsXenApiException(Exception):
"""Base OsXenapi Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
message = self.msg_fmt % kwargs
self.message = message
super(OsXenApiException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class PluginRetriesExceeded(OsXenApiException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class SessionLoginTimeout(OsXenApiException):
msg_fmt = _("Unable to log in to XenAPI (is the Dom0 disk full?)")
|
import os
import shutil
from multiprocessing.pool import Pool
import cv2
import numpy as np
from functools import partial
from path import Path
def process_scene(input_directory, output_folder):
K = np.array([[525.0, 0.0, 320.0],
[0.0, 525.0, 240.0],
[0.0, 0.0, 1.0]])
print("processing", input_directory)
image_filenames = sorted(input_directory.files("*color.png"))
pose_filenames = sorted(input_directory.files("*pose.txt"))
poses = []
for pose_filename in pose_filenames:
pose = np.loadtxt(pose_filename)
poses.append(pose)
scene = input_directory.split("/")[-2]
seq = input_directory.split("/")[-1]
current_output_dir = output_folder / scene + "-" + seq
if os.path.isdir(current_output_dir):
if os.path.exists("{}/poses.txt".format(current_output_dir)) and os.path.exists("{}/K.txt".format(current_output_dir)):
return scene
else:
shutil.rmtree(current_output_dir)
os.mkdir(current_output_dir)
os.mkdir(os.path.join(current_output_dir, "images"))
output_poses = []
for current_index in range(len(image_filenames)):
image = cv2.imread(image_filenames[current_index])
output_poses.append(poses[current_index].ravel().tolist())
cv2.imwrite("{}/images/{}.png".format(current_output_dir, str(current_index).zfill(6)), image, [cv2.IMWRITE_PNG_COMPRESSION, 3])
output_poses = np.array(output_poses)
np.savetxt("{}/poses.txt".format(current_output_dir), output_poses)
np.savetxt("{}/K.txt".format(current_output_dir), K)
return scene
def main():
input_folder = Path("/home/share/dataset/7scenes")
output_folder = Path("/home/nhsmt1123/master-thesis/deep-video-mvs/data/7scenes")
input_directories = [
input_folder / "redkitchen/seq-01",
input_folder / "redkitchen/seq-07",
input_folder / "chess/seq-01",
input_folder / "chess/seq-02",
input_folder / "heads/seq-02",
input_folder / "fire/seq-01",
input_folder / "fire/seq-02",
input_folder / "office/seq-01",
input_folder / "office/seq-03",
input_folder / "pumpkin/seq-03",
input_folder / "pumpkin/seq-06",
input_folder / "stairs/seq-02",
input_folder / "stairs/seq-06", # train
input_folder / "redkitchen/seq-03",
input_folder / "chess/seq-03",
input_folder / "heads/seq-01",
input_folder / "fire/seq-03",
input_folder / "fire/seq-04",
input_folder / "office/seq-02",
input_folder / "pumpkin/seq-01",
input_folder / "stairs/seq-01"] # test
pool = Pool(6)
for finished_scene in pool.imap_unordered(partial(process_scene, output_folder=output_folder), input_directories):
print("finished", finished_scene)
pool.join()
pool.close()
if __name__ == '__main__':
main()
|
import pygame
import math
import glob
import os
tilesize = 128 # pixels per tile
def tiletosurface(tile):
pass
def maptosurface(sx,sy,ex,ey,oholmap):
pass
def main(windowsize,tilepipe,OHOLMap):
wt = math.floor(windowsize/tilesize)
cx,cy,first = 0,0,True
if OHOLMap.data != {}:
for x in OHOLMap.data:
for y in OHOLMap.data[x]:
if not first:
break
cx,cy = x,y
first = False
print("Loading sprites")
sprites = glob.glob("./OneLifeData/sprites/*.tga")
loadedsprites = {}
print("Found {} sprites, loading...".format(len(sprites)))
for sprite in sprites:
spriteid = os.path.basename(sprite).split(".")[0]
loadedsprites[spriteid] = pygame.image.load(sprite)
# do other loading things...
tilepipe.send("READY")
# main loop goes here
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import Http404
from core.models import (Category, Article, Source, BaseUserProfile,
BookmarkArticle, ArticleLike, HashTag, Menu, Notification, Devices,
SocialAccount, Category, CategoryAssociation,
TrendingArticle, Domain, DailyDigest, DraftMedia, Comment,
Subscription)
from rest_framework.authtoken.models import Token
from rest_framework.views import APIView
from .serializers import (CategorySerializer, ArticleSerializer, UserSerializer,
SourceSerializer, LoginUserSerializer, BaseUserProfileSerializer,
BookmarkArticleSerializer, ArticleLikeSerializer, HashTagSerializer,
MenuSerializer, NotificationSerializer, TrendingArticleSerializer,
ArticleCreateUpdateSerializer, DraftMediaSerializer, CommentSerializer,
CommentListSerializer, SubsMediaSerializer, UserProfileSerializer)
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework import filters
from newscout_web.constants import SOCIAL_AUTH_PROVIDERS
from django.db.models import Q
from rest_framework.exceptions import APIException
from collections import OrderedDict
from rest_framework import generics, viewsets
from rest_framework.pagination import CursorPagination
from rest_framework.generics import ListAPIView
from rest_framework.parsers import JSONParser
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from datetime import datetime, timedelta
from django.db.models import Count, Max, Min
import pytz
import uuid
from core.utils import es, ingest_to_elastic, delete_from_elastic
from elasticsearch_dsl import Search
import math
from rest_framework.utils.urls import replace_query_param
from google.auth.transport import requests as grequests
from google.oauth2 import id_token
import facebook
from .exception_handler import (create_error_response, TokenIDMissing, ProviderMissing,
SocialAuthTokenException)
import logging
import operator
from functools import reduce
import tweepy
import json
from captcha.models import CaptchaStore
from captcha.helpers import captcha_image_url
log = logging.getLogger(__name__)
def create_response(response_data):
"""
method used to create response data in given format
"""
response = OrderedDict()
response["header"] = {"status": "1"}
response["body"] = response_data
return response
def create_serializer_error_response(errors):
"""
methos is used to create error response for serializer errors
"""
error_list = []
for k, v in errors.items():
if isinstance(v, dict):
_, v = v.popitem()
d = {}
d["field"] = k
d["field_error"] = v[0]
error_list.append(d)
return OrderedDict({"header": {"status": "0"}, "errors": {
"errorList": error_list}})
class SignUpAPIView(APIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
user_serializer = UserSerializer(data=request.data)
if user_serializer.is_valid():
user_serializer.save()
return Response(create_response({"Msg": "sign up successfully"}))
else:
return Response(
create_serializer_error_response(user_serializer.errors),
status=403)
class LoginFieldsRequired(APIException):
"""
api exception for no user found
"""
status_code = 401
default_detail = ("username and password are required")
default_code = "username_and_password"
class LoginAPIView(generics.GenericAPIView):
serializer_class = LoginUserSerializer
permission_classes = (AllowAny,)
def post(self, request, format=None):
serializer = LoginUserSerializer(data=request.data)
if not serializer.is_valid():
res_data = create_serializer_error_response(serializer.errors)
return Response(res_data, status=403)
user = BaseUserProfile.objects.filter(email=request.data["email"]).first()
device_name = request.data.get("device_name")
device_id = request.data.get("device_id")
if device_id and device_name:
device, _ = Devices.objects.get_or_create(user=user,
device_name=device_name,
device_id=device_id)
notification_obj, _ = Notification.objects.get_or_create(device=device)
notification = NotificationSerializer(notification_obj)
user_serializer = BaseUserProfileSerializer(user)
token, _ = Token.objects.get_or_create(user=user)
data = user_serializer.data
data["token"] = token.key
if device_id and device_name:
data["breaking_news"] = notification.data['breaking_news']
data["daily_edition"] = notification.data['daily_edition']
data["personalized"] = notification.data['personalized']
response_data = create_response({"user": data})
return Response(response_data)
class LogoutAPIView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
request.user.auth_token.delete()
return Response(create_response({"Msg": "User has been logged out"}))
class UserHashTagAPIView(APIView):
"""
Save new tags and remove older tags based on user selection
"""
permission_classes = (IsAuthenticated,)
parser_classes = (JSONParser,)
def post(self, request, format=None):
user = self.request.user
hash_tags = request.data["tags"]
user_tags = HashTag.objects.filter(name__in=hash_tags)
if user_tags:
user.passion.clear()
user.passion.add(*user_tags)
return Response(create_response({"Msg": "Successfully saved tags"}))
return Response(create_error_response({"Msg": "Invalid tags"}), status=400)
class CategoryListAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None, *args, **kwargs):
"""
List all news category
"""
categories = CategorySerializer(Category.objects.all(), many=True)
return Response(create_response({"categories": categories.data}))
def post(self, request, format=None):
"""
Save new category to database
"""
if request.user.is_authenticated:
serializer = CategorySerializer(data=request.data, many=True)
if serializer.is_valid():
serializer.save()
return Response(create_response(serializer.data))
return Response(create_error_response(serializer.errors), status=400)
raise Http404
def put(self, request, format=None):
"""
update category in database
"""
if request.user.is_authenticated:
_id = request.data.get("id")
category = Category.objects.get(id=_id)
serializer = CategorySerializer(category, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(create_response(serializer.data))
return Response(create_error_response(serializer.errors), status=400)
raise Http404
class SourceListAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None, *args, **kwargs):
"""
List all the sources
"""
source = SourceSerializer(Source.objects.all(), many=True)
return Response(create_response({"results": source.data}))
class NoarticleFound(APIException):
"""
api exception for no user found
"""
status_code = 404
default_detail = ("Article does not exist")
default_code = "no_article_found"
class PostpageNumberPagination(CursorPagination):
page_size = 10
page_size_query_param = 'page_size'
ordering = '-created_at'
class ArticleListAPIView(ListAPIView):
serializer_class = ArticleSerializer
permission_classes = (AllowAny,)
pagination_class = PostpageNumberPagination
filter_backends = (filters.OrderingFilter,)
ordering = ('-published_on',)
def get_queryset(self):
q = self.request.GET.get("q", "")
tag = self.request.GET.getlist("tag", "")
category = self.request.GET.getlist("category", "")
source = self.request.GET.getlist("source", "")
queryset = Article.objects.all()
if self.request.user.domain:
queryset = queryset.filter(domain=self.request.user.domain)
else:
queryset = Article.objects.none()
if source:
queryset = queryset.filter(source__name__in=source)
if category:
queryset = queryset.filter(category__name__in=category)
if tag:
queryset = queryset.filter(hash_tags__name__in=tag)
if q:
q_list = q.split(" ")
condition_1 = reduce(operator.or_, [Q(title__icontains=s) for s in q_list])
condition_2 = reduce(operator.or_, [Q(full_text__icontains=s) for s in q_list])
queryset = queryset.filter(condition_1 | condition_2)
return queryset
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
if serializer.data:
paginated_response = self.get_paginated_response(serializer.data)
return Response(create_response(paginated_response.data))
else:
return Response(create_error_response({"Msg": "News Doesn't Exist"}), status=400)
class ArticleDetailAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None, *args, **kwargs):
slug = self.kwargs.get("slug", "")
user = self.request.user
article = Article.objects.filter(slug=slug).first()
has_subscribed = False
if not self.request.user.is_anonymous and \
Subscription.objects.filter(
user=self.request.user).exlcude(subs_type='Basic').exists():
has_subscribed = True
try:
next_article = Article.objects.filter(id__gt=article.id).order_by("id")[0:1].get().slug
except Exception as error:
print(error)
next_article = Article.objects.aggregate(Min("id"))['id__min']
try:
prev_article = Article.objects.filter(id__gt=article.id).order_by("-id")[0:1].get().slug
except Exception as error:
print(error)
prev_article = Article.objects.aggregate(Max("id"))['id__max']
if article:
response_data = ArticleSerializer(article, context={
"hash_tags_list": True, 'has_subscribed': has_subscribed}).data
if not user.is_anonymous:
book_mark_article = BookmarkArticle.objects.filter(
user=user, article=article).first()
like_article = ArticleLike.objects.filter(
user=user, article=article).first()
if book_mark_article:
response_data["isBookMark"] = True
else:
response_data["isBookMark"] = False
if like_article:
response_data["isLike"] = like_article.is_like
else:
response_data["isLike"] = 2
return Response(create_response({
"article": response_data, "next_article": next_article, "prev_article": prev_article}))
raise NoarticleFound
def post(self, request, *args, **kwargs):
if request.user.is_authenticated:
article_id = self.request.POST.get("article_id", "")
is_like = self.request.POST.get("isLike", "")
user = self.request.user
article = Article.objects.filter(id=article_id).first()
if article:
if is_like and int(is_like) <= 2:
article_like, created = ArticleLike.objects.get_or_create(
user=user, article=article)
article_like.is_like = is_like
article_like.save()
serializer = ArticleLikeSerializer(article_like)
return Response(create_response({
"Msg": "Article like status changed", "article": serializer.data
}))
else:
return Response(create_error_response({
"Msg": "Invalid Input"
}))
else:
return Response(create_error_response({"Msg": "News doesn't exist"}), status=400)
raise Http404
class ArticleBookMarkAPIView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
if request.data:
article_id = request.data["article_id"]
else:
article_id = self.request.POST.get("article_id", "")
user = self.request.user
if article_id:
article = Article.objects.filter(id=article_id).first()
if article:
bookmark_article, created = \
BookmarkArticle.objects.get_or_create(user=user,
article=article)
if not created:
del_bookmark_article = BookmarkArticleSerializer(bookmark_article)
del_bookmark = del_bookmark_article.data
del_bookmark["status"] = 0
bookmark_article.delete()
return Response(create_response({
"Msg": "Article removed from bookmark list", "bookmark_article": del_bookmark
}))
else:
bookmark_article = BookmarkArticleSerializer(bookmark_article)
return Response(create_response({
"Msg": "Article bookmarked successfully", "bookmark_article": bookmark_article.data
}))
raise NoarticleFound
class ArticleRecommendationsAPIView(APIView):
permission_classes = (AllowAny,)
def format_response(self, response):
results = []
if response['hits']['hits']:
for result in response['hits']['hits']:
results.append(result["_source"])
return results
def get(self, request, *args, **kwargs):
article_id = self.kwargs.get("article_id", "")
if article_id:
results = es.search(index='recommendation', body={"query": {"match": {"id": int(article_id)}}})
if results['hits']['hits']:
recommendation = results['hits']['hits'][0]['_source']['recommendation']
search_results = es.search(index='article', body={
"query": {"terms": {"id": recommendation}}, "size": 25})
return Response(create_response({
"results": self.format_response(search_results)
}))
return Response(create_error_response({
"Msg": "Error generating recommendation"
}))
class ForgotPasswordAPIView(APIView):
permission_classes = (AllowAny,)
def genrate_password(self, password_length=10):
"""
Returns a random pasword of length password_length.
"""
random = str(uuid.uuid4())
random = random.upper()
random = random.replace("-", "")
return random[0:password_length]
def send_mail_to_user(self, email, password, first_name="", last_name=""):
username = first_name + " " + last_name
email_subject = 'NewsPost: Forgot Password Request'
email_body = """
<html>
<head>
</head>
<body>
<p>
Hello """ + username + """,<br><br><b>
""" + password + """</b> is your new password
<br>
<br>
Thanks,<br>
The NewsPost Team<br>
</p>
</body>
</html>"""
msg = EmailMultiAlternatives(
email_subject, '', settings.EMAIL_FROM, [email])
ebody = email_body
msg.attach_alternative(ebody, "text/html")
msg.send(fail_silently=False)
def post(self, request, *args, **kwargs):
email = request.data["email"]
if email:
user = BaseUserProfile.objects.filter(email=email)
if user:
user = user.first()
password = self.genrate_password()
self.send_mail_to_user(
email, password, user.first_name, user.last_name)
user.set_password(password)
user.save()
return Response(create_response({
"Msg": "New password sent to your email"
}))
return Response(create_error_response({
"Msg": "Email Does Not Exist"
}))
class ChangePasswordAPIView(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
if request.data:
password = request.data["password"]
old_password = request.data["old_password"]
confirm_password = request.data["confirm_password"]
else:
password = self.request.POST.get("password", "")
old_password = self.request.POST.get("old_password", "")
confirm_password = self.request.POST.get("confirm_password", "")
user = self.request.user
if old_password:
if not user.check_password(old_password):
msg = "Old Password Does Not Match With User"
return Response(create_error_response({
"Msg": msg, "field": "old_password"
}))
if confirm_password != password:
msg = "Password and Confirm Password does not match"
return Response(create_error_response({
"Msg": msg, "field": "confirm_password"
}))
if old_password == password:
msg = "New password should not same as Old password"
return Response(create_error_response({
"Msg": msg, "field": "password"
}))
if user and password:
user.set_password(password)
user.save()
return Response(create_response({
"Msg": "Password changed successfully", "field": "confirm_password"
}))
else:
return Response(create_error_response({
"Msg": "Password field is required", "field": "password"
}))
else:
return Response(create_error_response({
"Msg": "Old Password field is required", "field": "old_password"
}))
class BookmarkArticleAPIView(APIView):
"""
This class is used to get user bookmark list
"""
permission_classes = (IsAuthenticated,)
def get(self, request):
user = self.request.user
bookmark_list = BookmarkArticleSerializer(BookmarkArticle.objects.filter(user=user), many=True)
return Response(create_response({"results": bookmark_list.data}))
class ArticleLikeAPIView(APIView):
"""
This class is used to get user articles
"""
permission_classes = (IsAuthenticated,)
def get(self, request):
like_list = [0, 1]
user = self.request.user
article_list = ArticleLikeSerializer(ArticleLike.objects.filter(user=user, is_like__in=like_list), many=True)
return Response(create_response({"results": article_list.data}))
class HashTagAPIView(ListAPIView):
serializer_class = HashTagSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
weekly = self.request.GET.get("weekly", "")
monthly = self.request.GET.get("monthly", "")
end = datetime.utcnow()
pst = pytz.timezone('Asia/Kolkata')
end = pst.localize(end)
utc = pytz.UTC
end = end.astimezone(utc)
articles = Article.objects.all()
queryset = HashTag.objects.all()
if weekly:
weekly = int(weekly)
start = end - timedelta(days=7 * weekly)
hash_tags = articles.filter(published_on__range=(start, end)).values(
'hash_tags__name').annotate(count=Count('hash_tags')).order_by('-count')[:10]
for hashtag in hash_tags:
hashtag['name'] = hashtag.pop('hash_tags__name')
queryset = hash_tags
if monthly:
monthly = int(monthly)
start = end - timedelta(days=30 * monthly)
hash_tags = articles.filter(published_on__range=(start, end)).values(
'hash_tags__name').annotate(count=Count('hash_tags')).order_by('-count')[:10]
for hashtag in hash_tags:
hashtag['name'] = hashtag.pop('hash_tags__name')
queryset = hash_tags
if not weekly and not monthly:
start = end - timedelta(days=1)
hash_tags = articles.filter(published_on__range=(start, end)).values(
'hash_tags__name').annotate(count=Count('hash_tags')).order_by('-count')[:10]
for hashtag in hash_tags:
hashtag['name'] = hashtag.pop('hash_tags__name')
queryset = hash_tags
return queryset
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
if serializer.data:
paginated_response = self.get_paginated_response(serializer.data)
return Response(create_response(paginated_response.data))
else:
return Response(create_error_response({"Msg": "No trending tags"}), status=400)
serializer = self.get_serializer(queryset, many=True)
return Response(create_response(serializer.data))
class ArticleSearchAPI(APIView):
"""
this view is used for article search and filter
"""
permission_classes = (AllowAny,)
def format_response(self, response):
results = []
filters = {}
if response.hits.hits:
for result in response.hits.hits:
source = result["_source"]
if 'highlight' in result:
if 'title' in result['highlight']:
source['title'] = " ".join(result['highlight']['title'])
if 'blurb' in result['highlight']:
source['blurb'] = " ".join(result['highlight']['blurb'])
results.append(source)
if response.aggregations.category.buckets:
filters["category"] = sorted(
response.aggregations.category.buckets._l_,
key=operator.itemgetter("key"))
if response.aggregations.source.buckets:
filters["source"] = sorted(
response.aggregations.source.buckets._l_,
key=operator.itemgetter("key"))
if response.aggregations.hash_tags.buckets:
filters["hash_tags"] = sorted(
response.aggregations.hash_tags.buckets._l_,
key=operator.itemgetter("key"))
return results, filters
def get(self, request):
page = self.request.GET.get("page", "1")
if page.isdigit():
page = int(page)
else:
page = 1
size = self.request.GET.get("rows", "20")
if size.isdigit():
size = int(size)
else:
size = 20
query = self.request.GET.get("q", "")
source = self.request.GET.getlist("source", [])
category = self.request.GET.getlist("category", [])
domain = self.request.GET.getlist("domain", [])
tags = self.request.GET.getlist("tag", [])
sort = self.request.GET.get("sort", "desc")
if not domain:
return Response(create_serializer_error_response({"domain": ["Domain id is required"]}))
# mort like this for related queries
mlt_fields = ["has_tags"]
if source:
mlt_fields = ["has_tags", "source", "domain"]
mlt = Search(using=es, index="article").query("more_like_this", fields=mlt_fields,
like=query, min_term_freq=1, max_query_terms=12).source(mlt_fields)
mlt.execute()
sr = Search(using=es, index="article")
# highlight title and blurb containing query
sr = sr.highlight("title", "blurb", fragment_size=20000)
# generate elastic search query
must_query = [{"wildcard": {"cover_image": "*"}}]
should_query = []
if query:
query = query.lower()
must_query.append({"multi_match": {"query": query,
"fields": ["title", "blurb"], 'type': 'phrase'}})
if tags:
tags = [tag.lower().replace("-", " ") for tag in tags]
for tag in tags:
sq = {"match_phrase": {"hash_tags": tag}}
should_query.append(sq)
if must_query:
sr = sr.query("bool", must=must_query)
if should_query:
if len(should_query) > 1:
sr = sr.filter("bool", should=should_query)
else:
sr = sr.filter("bool", should=should_query[0])
if domain:
sr = sr.filter("terms", domain=list(domain))
if category:
cat_objs = Category.objects.filter(name__in=category)
category = cat_objs.values_list("id", flat=True)
cat_assn_objs = CategoryAssociation.objects.filter(
parent_cat__in=cat_objs).values_list(
"child_cat__id", flat=True)
if cat_assn_objs:
new_category = set(list(cat_assn_objs) + list(category))
sr = sr.filter("terms", category_id=list(new_category))
else:
if category:
sr = sr.filter("terms", category_id=list(category))
if source:
source = [s.lower() for s in source]
sr = sr.filter("terms", source__keyword=source)
sr = sr.sort({"article_score": {"order": sort}})
sr = sr.sort({"published_on": {"order": sort}})
# pagination
start = (page - 1) * size
end = start + size
sr = sr[start:end]
# generate facets
sr.aggs.bucket("category", "terms", field="category.keyword")
sr.aggs.bucket("source", "terms", field="source.keyword")
sr.aggs.bucket("hash_tags", "terms", field="hash_tags.keyword", size=50)
# execute query
response = sr.execute()
results, filters = self.format_response(response)
count = response["hits"]["total"]
total_pages = math.ceil(count / size)
url = request.build_absolute_uri()
if end < count:
next_page = page + 1
next_url = replace_query_param(url, "page", next_page)
else:
next_url = None
if page != 1:
previous_page = page - 1
previous_url = replace_query_param(url, "page", previous_page)
else:
previous_url = None
data = {
"results": results,
"filters": filters,
"count": count,
"total_pages": total_pages,
"current_page": page,
"next": next_url,
"previous": previous_url
}
return Response(create_response(data))
class MenuAPIView(APIView):
"""
This Api will return all the menus
"""
permission_classes = (AllowAny,)
def get(self, request):
domain_id = self.request.GET.get("domain")
if not domain_id:
return Response(create_error_response({"domain": ["Domain id is required"]}))
domain = Domain.objects.filter(domain_id=domain_id).first()
if not domain:
return Response(create_error_response({"domain": ["Domain id is required"]}))
menus = MenuSerializer(Menu.objects.filter(domain=domain), many=True)
menus_list = menus.data
new_menulist = []
for menu in menus_list:
menu_dict = {}
menu_dict['heading'] = menu
new_menulist.append(menu_dict)
return Response(create_response({'results': new_menulist}))
class DevicesAPIView(APIView):
"""
this api will add device_id and device_name
"""
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
user = self.request.user
device_id = self.request.POST.get("device_id", "")
device_name = self.request.POST.get("device_name", "")
if not user.is_anonymous and device_id and device_name:
user_device = Devices.objects.filter(user=user.pk)
if user_device:
user_device.update(device_id=device_id, device_name=device_name, user=user.id)
return Response(create_response({"Msg": "Device successfully created"}))
elif not user_device:
get, created = Devices.objects.get_or_create(device_id=device_id, device_name=device_name, user=user.id)
if created:
return Response(create_response({"Msg": "Device successfully created"}))
else:
return Response(create_response({"Msg": "Device already exist"}))
elif device_id and device_name:
get, created = Devices.objects.get_or_create(device_id=device_id, device_name=device_name)
if created:
return Response(create_response({"Msg": "Device successfully created"}))
else:
return Response(create_response({"Msg": "Device already exist"}))
else:
return Response(create_error_response({"Msg": "device_id and device_name field are required"}))
class NotificationAPIView(APIView):
"""
this api will add notification data
"""
permission_classes = (AllowAny,)
def post(self, request):
device_id = request.data["device_id"]
device_name = request.data["device_name"]
breaking_news = request.data["breaking_news"]
daily_edition = request.data["daily_edition"]
personalized = request.data["personalized"]
device = Devices.objects.get(device_id=device_id, device_name=device_name)
if breaking_news and daily_edition and personalized and device:
notification = Notification.objects.filter(device=device)
if notification:
notification.update(breaking_news=breaking_news, daily_edition=daily_edition, personalized=personalized)
return Response(create_response({"Msg": "Notification updated successfully"}))
Notification.objects.create(breaking_news=breaking_news, daily_edition=daily_edition,
personalized=personalized, device=device)
return Response(create_response({"Msg": "Notification created successfully"}))
else:
return Response(
create_error_response(
{"Msg": "device_id, device_name, breaking_news, daily_edition and personalized are required"}))
def get(self, request):
device_id = request.GET.get("device_id")
device_name = request.GET.get("device_name")
device = Devices.objects.filter(device_id=device_id, device_name=device_name).first()
if device:
notification = NotificationSerializer(Notification.objects.fitler(device=device), many=True)
return Response(create_response(notification.data))
return Response(create_error_response({"Msg": "Invalid device_id or device_name"}))
class SocialLoginView(generics.GenericAPIView):
"""
this view is used for google social authentication and login
"""
permission_classes = (AllowAny,)
serializer_class = BaseUserProfileSerializer
def decode_google_token(self, token_id):
"""
this method is used to decode and verify google token
"""
request = grequests.Request()
try:
id_info = id_token.verify_oauth2_token(token_id, request)
return id_info
except Exception as e:
log.debug("error in google token verification {0}".format(e))
return False
def get_name_details(self, id_info):
"""
this methos is used to get first name and last name from id_info
details
"""
first_name = last_name = ""
if "name" in id_info:
name = id_info.get("name")
name_list = name.split(" ")
first_name = name_list[0]
if len(name_list) > 1:
last_name = " ".join(name_list[1:])
if not first_name:
if "given_name" in id_info:
first_name = id_info.get("given_name")
if not last_name:
if "family_name" in id_info:
last_name = id_info.get("family_name")
return first_name, last_name
def create_user_profile(self, first_name, last_name, username, email, image_url, sid, provider):
"""
this method is used to create base user profile object for given
social account
"""
user = BaseUserProfile.objects.filter(email=email).first()
created = ""
if not user:
user = BaseUserProfile.objects.create(
first_name=first_name,
last_name=last_name,
email=email,
username=username
)
sa_obj, created = SocialAccount.objects.get_or_create(
social_account_id=sid,
image_url=image_url,
user=user,
provider=provider
)
# create_profile_image.delay(sa_obj.id)
return user, created
def get_facebook_data(self, token_id):
"""
this method is used to get facebook user data from given access token
"""
graph = facebook.GraphAPI(access_token=token_id)
try:
res_data = graph.get_object(
id='me?fields=email,id,first_name,last_name,name,picture.width(150).height(150)')
return res_data
except Exception as e:
log.debug("error in facebook fetch data: {0}".format(e))
return False
def get_facebook_name_details(self, profile_data):
"""
this method is used to get facebook first_name last_name from profile
data
"""
name = first_name = last_name = ""
if "first_name" in profile_data:
first_name = profile_data.get("first_name")
if "last_name" in profile_data:
last_name = profile_data.get("last_name")
if "name" in profile_data:
name = profile_data.get("name")
name_list = name.split(" ")
if not first_name:
first_name = name_list[0]
if not last_name:
last_name = " ".join(name[1:])
return first_name, last_name
def get_user_serialize_data(self, email, device_id, device_name):
"""
this method will return customize user data
"""
user = BaseUserProfile.objects.filter(email=email).first()
device = Devices.objects.filter(user=user.id)
if device:
device.update(device_name=device_name, device_id=device_id)
else:
device, created = Devices.objects.get_or_create(device_name=device_name, device_id=device_id)
Devices.objects.filter(pk=device.pk).update(user=user)
notification = NotificationSerializer(Notification.objects.get_or_create(device=device), many=True)
token, _ = Token.objects.get_or_create(user=user)
data = BaseUserProfileSerializer(user).data
data["token"] = token.key
data["breaking_news"] = notification.data[0]['breaking_news']
data["daily_edition"] = notification.data[0]['daily_edition']
data["personalized"] = notification.data[0]['personalized']
return data
def post(self, request, *args, **kwargs):
"""
this is post method for collection google social auth data
and generate authentication api token for user
"""
token_id = request.data.get("token_id")
provider = request.data.get("provider")
device_id = request.data.get("device_id")
device_name = request.data.get("device_name")
if not token_id:
raise TokenIDMissing()
if not provider:
raise ProviderMissing()
if not device_id:
return Response(create_error_response({"Msg": "device_id is missing or Invalid device_id"}))
if not device_name:
return Response(create_error_response({"Msg": "device_name is missing or Invalid device_name"}))
if provider not in SOCIAL_AUTH_PROVIDERS:
raise ProviderMissing()
if provider == "google":
id_info = self.decode_google_token(token_id)
if not id_info:
raise SocialAuthTokenException()
first_name, last_name = self.get_name_details(id_info)
email = id_info.get("email", "")
if not email:
raise SocialAuthTokenException()
username = email.split("@")[0]
google_id = id_info.get("sub", "")
image_url = id_info.get("picture", "")
user, created = self.create_user_profile(
first_name, last_name, username, email, image_url, google_id, provider)
user_data = self.get_user_serialize_data(email, device_id, device_name)
return Response(create_response({"user": user_data}))
if provider == "facebook":
profile_data = self.get_facebook_data(token_id)
if not profile_data:
raise SocialAuthTokenException()
first_name, last_name = self.get_facebook_name_details(
profile_data)
email = profile_data.get("email")
if not email:
raise SocialAuthTokenException()
username = username = email.split("@")[0]
facebook_id = profile_data.get("id", "")
image_url = ""
if "picture" in profile_data:
if "data" in profile_data["picture"]:
image_url = profile_data["picture"]["data"]["url"]
user, created = self.create_user_profile(
first_name, last_name, username, email, image_url, facebook_id, provider)
user_data = self.get_user_serialize_data(email, device_id, device_name)
return Response(create_response({"user": user_data}))
raise ProviderMissing()
class TrendingArticleAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None, *args, **kwargs):
"""
List all the trending articles
"""
domain_id = self.request.GET.get("domain")
if not domain_id:
return Response(create_error_response({"domain": ["Domain id is required"]}))
domain = Domain.objects.filter(domain_id=domain_id).first()
if not domain:
return Response(create_error_response({"domain": ["Invalid domain name"]}))
source = TrendingArticleSerializer(TrendingArticle.objects.filter(domain=domain), many=True)
return Response(create_response({"results": source.data}))
class SocailMediaPublishing():
"""
this class is to update news arrticles on social media
"""
def twitter(self, data):
"""
this function will tweet article title and its url in twitter
"""
try:
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
auth.set_access_token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
api.update_status(data["title"] + "\n" + data["url"])
except Exception as e:
print("Error in twitter post: ", e)
class ArticleCreateUpdateView(APIView, SocailMediaPublishing):
"""
Article create update view
"""
permission_classes = (IsAuthenticated,)
def get_tags(self, tags):
"""
this method will return tag name from tags objects
"""
tag_list = []
for tag in tags:
tag_list.append(tag["name"])
return tag_list
def publish(self, obj):
serializer = ArticleSerializer(obj)
json_data = serializer.data
if json_data["hash_tags"]:
tag_list = self.get_tags(json_data["hash_tags"])
json_data["hash_tags"] = tag_list
ingest_to_elastic([json_data], "article", "article", "id")
tweet_data = {
"title": serializer.instance.title,
"url": serializer.instance.source_url,
}
self.twitter(tweet_data)
def post(self, request):
publish = request.data.get("publish")
# origin is used to join with cover image
# to generate proper image url
origin = request.META.get("HTTP_ORIGIN")
cover_image_id = request.data.get("cover_image_id")
if cover_image_id:
DraftMedia.objects.filter(id=cover_image_id).delete()
if not request.data.get("cover_image"):
request.data["cover_image"] = "/".join(
[origin, request.user.domain.default_image.url])
context = {"publish": publish, "user": request.user}
serializer = ArticleCreateUpdateSerializer(
data=request.data, context=context)
if serializer.is_valid():
serializer.save()
if publish:
self.publish(serializer.instance)
return Response(create_response(serializer.data))
return Response(create_error_response(serializer.errors), status=400)
def put(self, request):
_id = request.data.get("id")
publish = request.data.get("publish")
# origin is used to join with cover image
# to generate proper image url
origin = request.META.get("HTTP_ORIGIN")
cover_image_id = request.data.get("cover_image_id")
if cover_image_id:
DraftMedia.objects.filter(id=cover_image_id).delete()
if not request.data.get("cover_image"):
request.data["cover_image"] = "/".join(
[origin, request.user.domain.default_image.url])
context = {"publish": publish, "user": request.user}
article = Article.objects.get(id=_id)
serializer = ArticleCreateUpdateSerializer(
article, data=request.data, context=context)
if serializer.is_valid():
serializer.save()
if publish:
self.publish(serializer.instance)
return Response(create_response(serializer.data))
return Response(create_error_response(serializer.errors), status=400)
class ChangeArticleStatusView(APIView, SocailMediaPublishing):
"""
this view is used to update status of given article activate or deactivate
"""
permission_classes = (IsAuthenticated,)
def get_tags(self, tags):
"""
this method will return tag name from tags objects
"""
tag_list = []
for tag in tags:
tag_list.append(tag["name"])
return tag_list
def publish(self, obj):
serializer = ArticleSerializer(obj)
json_data = serializer.data
if obj.active:
if json_data["hash_tags"]:
tag_list = self.get_tags(json_data["hash_tags"])
json_data["hash_tags"] = tag_list
ingest_to_elastic([json_data], "article", "article", "id")
tweet_data = {
"title": serializer.instance.title,
"url": serializer.instance.source_url,
}
self.twitter(tweet_data)
else:
delete_from_elastic([json_data], "article", "article", "id")
def post(self, request):
_id = request.data.get("id")
article = Article.objects.filter(id=_id).first()
if not article:
return Response(create_error_response({"error": "Article does not exists"}), status=400)
article.active = request.data.get("activate")
article.save()
self.publish(article)
return Response(create_response({
"id": article.id, "active": article.active}))
class CategoryBulkUpdate(APIView):
"""
update whole bunch of articles in one go
"""
permission_classes = (IsAuthenticated,)
def get_tags(self, tags):
"""
this method will return tag name from tags objects
"""
tag_list = []
for tag in tags:
tag_list.append(tag["name"])
return tag_list
def post(self, request):
category_id = request.data['categories']
category = Category.objects.get(id=category_id)
for article_id in request.data['articles']:
current = Article.objects.get(id=article_id)
current.category = category
current.save()
serializer = ArticleSerializer(current)
json_data = serializer.data
delete_from_elastic([json_data], "article", "article", "id")
if json_data["hash_tags"]:
tag_list = self.get_tags(json_data["hash_tags"])
json_data["hash_tags"] = tag_list
ingest_to_elastic([json_data], "article", "article", "id")
return Response({"ok": "cool"})
class GetDailyDigestView(ListAPIView):
serializer_class = ArticleSerializer
permission_classes = (AllowAny,)
def format_response(self, response):
results = []
if response.hits.hits:
for result in response.hits.hits:
results.append(result["_source"])
return results
def get_queryset(self):
device_id = self.request.GET.get("device_id", "")
queryset = Devices.objects.filter(device_id=device_id)
dd = DailyDigest.objects.filter(device__in=queryset)
if not queryset.exists() or not dd.exists():
return []
return dd.first().articles.all().order_by("-published_on")
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if not queryset:
sr = Search(using=es, index="article")
sort = "desc"
sr = sr.sort({"article_score": {"order": sort}})
sr = sr.sort({"published_on": {"order": sort}})
sr = sr[0:20]
response = sr.execute()
results = self.format_response(response)
return Response(create_response({"results": results}))
serializer = self.get_serializer(queryset, many=True)
if serializer.data:
return Response(create_response(serializer.data))
else:
return Response(create_error_response({"Msg": "Daily Digest Doesn't Exist"}), status=400)
class DraftMediaUploadViewSet(viewsets.ViewSet):
"""
this view is used to upload article images
"""
permission_classes = (IsAuthenticated,)
def create(self, request):
image_file = request.data.get("image")
if not image_file:
return Response(create_error_response({"error": "Image file is required."}))
draft_image = DraftMedia.objects.create(image=image_file)
serializer = DraftMediaSerializer(draft_image)
return Response(create_response(serializer.data))
def update(self, request, pk):
image_file = request.data.get("image")
if not image_file:
return Response(create_error_response({"error": "Image file is required."}))
draft_image = DraftMedia.objects.get(id=pk)
if not draft_image:
return Http404
draft_image.image = image_file
draft_image.save()
serializer = DraftMediaSerializer(draft_image)
return Response(create_response(serializer.data))
def destroy(self, request, pk):
draft_image = DraftMedia.objects.get(id=pk)
if not draft_image:
return Http404
draft_image.delete()
return Response(create_response({"Msg": "Image deleted successfully"}))
class CommentViewSet(viewsets.ViewSet):
serializer_class = CommentSerializer
permission_classes = (IsAuthenticated,)
pagination_class = PostpageNumberPagination
ordering = "-created_at"
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
if self.action == 'list':
self.permission_classes = [AllowAny]
else:
self.permission_classes = [IsAuthenticated]
return [permission() for permission in self.permission_classes]
def create(self, request):
captcha_response_key = 0
captcha_key = request.data.get("captcha_key")
captcha_value = request.data.get("captcha_value")
captcha = CaptchaStore.objects.filter(hashkey=captcha_key).first()
if not captcha:
return Response(create_error_response({"error": "Invalid Captcha"}))
if captcha.response != captcha_value.lower():
return Response(create_error_response({"error": "Invalid Captcha"}))
data = request.data.copy()
data["user"] = request.user.id
serializer = CommentSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(create_response({"result": serializer.data}))
return Response(create_error_response({"error": "Enter Valid data"}))
def list(self, request):
article_id = request.GET.get("article_id", "")
if not article_id:
return Response(
create_error_response(
{"error": "Article ID has not been entered by the user"}
)
)
article_obj = Article.objects.filter(id=article_id).first()
if not article_obj:
return Response(create_error_response({"error": "Article does not exist"})
)
comment_list = Comment.objects.filter(article=article_obj, reply=None)
serializer = CommentSerializer(comment_list, many=True)
return Response(
create_response(
{"results": serializer.data, "total_article_likes": ArticleLike.objects.filter(
article=article_obj).count()}))
def destroy(self, request, pk):
comment_obj = Comment.objects.filter(id=pk)
if not comment_obj:
return Response(create_error_response({"error": "Comment does not exist"}))
comment_obj.delete()
return Response(create_response({"Msg": "Comment deleted successfully"}))
class LikeAPIView(APIView):
permission_classes = (IsAuthenticated,)
pagination_class = PostpageNumberPagination
ordering = "-created_at"
def post(self, request):
post_data = request.data.copy()
post_data["user"] = request.user.id
serializer = ArticleLikeSerializer(data=post_data)
if serializer.is_valid():
serializer.save()
if serializer.data.get("id"):
return Response(create_response({"Msg": "Liked"}))
return Response(create_response({"Msg": "Removed Like"}))
return Response(create_error_response({"error": "Invalid Data Entered"}))
class CaptchaCommentApiView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
captcha_len = len(CaptchaStore.objects.all())
if captcha_len > 500:
captcha = CaptchaStore.objects.order_by('?')[:1]
to_json_response = dict()
to_json_response['status'] = 1
to_json_response['new_captch_key'] = captcha[0].hashkey
to_json_response['new_captch_image'] = captcha_image_url(to_json_response['new_captch_key'])
return Response(create_response({"result": to_json_response}))
else:
to_json_response = dict()
to_json_response['status'] = 1
to_json_response['new_captch_key'] = CaptchaStore.generate_key()
to_json_response['new_captch_image'] = captcha_image_url(to_json_response['new_captch_key'])
return Response(create_response({"result": to_json_response}))
class AutoCompleteAPIView(generics.GenericAPIView):
permission_classes = (AllowAny,)
def format_response(self, response):
results = []
if response['hits']['hits']:
for result in response['hits']['hits']:
results.append(result["_source"])
return results
def get(self, request):
result_list = []
if request.data:
query = request.data["q"]
else:
query = request.GET.get("q", "")
if query:
results = es.search(
index="auto_suggestions",
body={
"suggest": {
"results": {
"text": query,
"completion": {"field": "name_suggest"},
}
}
},
)
results = results['suggest']['results'][0]['options']
if results:
for result in results:
result_list.append(
{
"value": result["_source"]["name_suggest"],
"key": result["_source"]["desc"],
}
)
return Response(create_response({"result": result_list}))
return Response(create_response({"result": []}))
class SubsAPIView(ListAPIView):
serializer_class = SubsMediaSerializer
permission_classes = (AllowAny,)
pagination_class = PostpageNumberPagination
def get(self, request):
q = self.request.GET.get("q", None)
subs = Subscription.objects.all()
if q:
subs = subs.filter(user__email__icontains=q)
source = SubsMediaSerializer(subs, many=True)
return Response(create_response({"results": source.data}))
class UpdateSubsAPIView(APIView):
serializer_class = SubsMediaSerializer
permission_classes = (AllowAny,)
def get(self, request, pk):
source = SubsMediaSerializer(Subscription.objects.get(id=pk))
return Response(create_response({"results": source.data}))
def post(self, request, *args, **kwargs):
subs_id = self.request.POST.get('id')
subs = Subscription.objects.filter(id=subs_id)
if subs.exists():
subs = subs.first()
subs.subs_type = self.request.POST.get('subs_type')
auto_renew = self.request.POST.get('auto_renew')
if auto_renew == 'No':
subs.auto_renew = False
else:
subs.auto_renew = True
subs.save()
return Response(create_response({"results": "success"}))
return Response(create_response({"results": "error"}))
class UserProfileAPIView(APIView):
permission_classes = (IsAuthenticated, )
def get(self, request, *args, **kwargs):
user = BaseUserProfile.objects.filter(id=self.request.user.id).first()
serializer = UserProfileSerializer(user)
data = serializer.data
response_data = create_response({"user": data})
return Response(response_data)
def put(self, request, format=None):
if request.user.is_authenticated:
if request.data:
_id = request.data["id"]
else:
_id = self.request.POST.get('id')
user = BaseUserProfile.objects.get(id=_id)
serializer = UserProfileSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(create_response({"result":serializer.data, "Msg":"Profile updated successfully."}))
return Response(create_error_response(serializer.errors), status=400)
raise Http404
class AccessSession(APIView):
permission_classes = (AllowAny,)
def get(self, request):
print(request.META.items())
request.session["ip"] = request.META.get('REMOTE_ADDR')
return Response(create_response({"results": request.session._session_key}))
class RSSAPIView(APIView):
permission_classes = (AllowAny,)
def get(self, request):
data = {}
domain = request.GET.get("domain")
if domain:
domain_obj = Domain.objects.filter(domain_id=domain).first()
if domain_obj:
menus = Menu.objects.filter(domain=domain_obj)
for menu in menus:
all_categories = menu.submenu.all()
for category in all_categories:
data[category.name.name] = "/article/rss/?domain=" + domain + "&category=" + category.name.name
return Response(create_response({"results": data}))
return Response(create_error_response({"error": "Domain do not exist."}))
return Response(create_error_response({"error": "Domain is required"}))
|
import pygame, time
from pygame.locals import *
from random import *
pygame.init()
# Variables Pygame
white = (255, 255, 255)
crystal = (162,162,162)
black = (0, 0, 0)
rose = (236,28,115)
red = pygame.Color('#ff0000')
green = pygame.Color('#00ff62')
blue = pygame.Color('#0026ff')
yellow = (222,207,4)
width = 800
height = 600
clock = pygame.time.Clock()
pop_block = pygame.mixer.Sound("Music/pop_block.wav")
# Images
walkRight = [pygame.image.load('Driller/droite1.png'), pygame.image.load('Driller/droite2.png'),
pygame.image.load('Driller/droite3.png'),pygame.image.load('Driller/droite4.png'),
pygame.image.load('Driller/droite5.png'), pygame.image.load('Driller/droite6.png'),
pygame.image.load('Driller/droite7.png'), pygame.image.load('Driller/droite8.png'),
pygame.image.load('Driller/droite9.png')]
walkLeft = [pygame.image.load('Driller/gauche1.png'), pygame.image.load('Driller/gauche2.png'),
pygame.image.load('Driller/gauche3.png'),pygame.image.load('Driller/gauche4.png'),
pygame.image.load('Driller/gauche5.png'),pygame.image.load('Driller/gauche6.png'),
pygame.image.load('Driller/gauche7.png'),pygame.image.load('Driller/gauche8.png'),
pygame.image.load('Driller/gauche9.png')]
fall = [
pygame.image.load('Driller/fall.png'),
pygame.image.load('Driller/fall1.png')
]
centre = pygame.image.load('Driller/centre.png')
blocks = [
pygame.image.load("Blocks/block_jaune.png"),
pygame.image.load("Blocks/block_vert.png"),
pygame.image.load("Blocks/block_bleu.png"),
pygame.image.load("Blocks/block_rouge.png"),
pygame.image.load("Blocks/block_blanc.png"),
pygame.image.load("Blocks/block_crystal.png"),
pygame.image.load("Blocks/block_niveau.png")
]
blocks_fissure = [
pygame.image.load("Blocks/block.png"),
pygame.image.load("Blocks/block1.png"),
pygame.image.load("Blocks/block2.png"),
pygame.image.load("Blocks/block3.png"),
pygame.image.load("Blocks/block4.png"),
pygame.image.load("Blocks/block5.png")
]
image_drill_left = pygame.image.load("Driller/drill_left.png")
image_drill_right = pygame.image.load("Driller/drill_right.png")
image_drill_down = pygame.image.load("Driller/drill_down.png")
oxy_display = pygame.image.load("Blocks/oxy_display.png")
capsule = pygame.image.load("Blocks/capsule_oxygene.png")
dead_crash = pygame.image.load("Driller/ecraser.png")
dead_air = pygame.image.load("Driller/asph.png")
ange = pygame.image.load("Driller/ange.png")
depth_display = pygame.image.load("Blocks/depth.png")
score_display = pygame.image.load("Blocks/score.png")
level_display = pygame.image.load("Blocks/level.png")
air_display = pygame.image.load("Blocks/air.png")
air_support_display=pygame.image.load("Blocks/air_support.png")
air_pourcent_display = pygame.image.load("Blocks/pourcent.png")
lives_display = pygame.image.load("Blocks/lives.png")
# Variables Globales
drill_left = False
drill_right = False
compteur_drill = 0
temps_recuperer = 0
cologne = 12
ligne = 35
game_over = False
surface = pygame.display.set_mode( (width,height) )
pygame.display.set_caption("Mr Driller")
obstacles = [[None]*cologne for l in range(ligne) ]
x = 100
y = 5
gravity = 5
left = False
right = False
walkCount = 0
fallCount = 0
pourcentage = 100
points = 0
profondeur = 0
GameOver = False
Death = 0
death_depth = []
CountDeath = 3
Capsule_Air = 10
name_list = []
# SP
def saisie():
global name_list
running = True
play = False
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == KEYDOWN:
if event.key == K_RETURN:
running = False
play = True
if event.type == pygame.KEYDOWN and len(name_list) != 30:
if event.key == pygame.K_a:
name_list.append("a")
elif event.key == pygame.K_b:
name_list.append("b")
elif event.key == pygame.K_c:
name_list.append("c")
elif event.key == pygame.K_d:
name_list.append("d")
elif event.key == pygame.K_e:
name_list.append("e")
elif event.key == pygame.K_f:
name_list.append("f")
elif event.key == pygame.K_g:
name_list.append("g")
elif event.key == pygame.K_h:
name_list.append("h")
elif event.key == pygame.K_i:
name_list.append("i")
elif event.key == pygame.K_j:
name_list.append("j")
elif event.key == pygame.K_k:
name_list.append("k")
elif event.key == pygame.K_l:
name_list.append("l")
elif event.key == pygame.K_m:
name_list.append("m")
elif event.key == pygame.K_n:
name_list.append("n")
elif event.key == pygame.K_o:
name_list.append("o")
elif event.key == pygame.K_p:
name_list.append("p")
elif event.key == pygame.K_q:
name_list.append("q")
elif event.key == pygame.K_r:
name_list.append("r")
elif event.key == pygame.K_s:
name_list.append("s")
elif event.key == pygame.K_t:
name_list.append("t")
elif event.key == pygame.K_u:
name_list.append("u")
elif event.key == pygame.K_v:
name_list.append("v")
elif event.key == pygame.K_w:
name_list.append("w")
elif event.key == pygame.K_x:
name_list.append("x")
elif event.key == pygame.K_y:
name_list.append("y")
elif event.key == pygame.K_z:
name_list.append("z")
elif event.key == pygame.K_SPACE:
name_list.append(" ")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE and len(name_list) > 0:
name_list.pop(-1)
#surface.fill( (0,0,0) )
ecran_saisie = pygame.image.load("Screens/EnterNameBetter.png")
ecran_saisie = pygame.transform.scale(ecran_saisie, (width, height))
surface.blit(ecran_saisie,(0,0))
string = ''.join(name_list)
font = pygame.font.Font("Screens/monospace.ttf" , 40)
texte = font.render(string , True , (0,0,0))
rectangle = texte.get_rect()
rectangle.topleft = (150,130)
surface.blit(texte,rectangle)
pygame.display.update()
clock.tick(60)
return play , string
def air():
global pourcentage , GameOver , Death , x , death_depth
pos_x = 620
pos_y = 300
font = pygame.font.Font("freesansbold.ttf", 30)
if pourcentage <= 0:
GameOver = True
Death = 1
if pourcentage > 100:
pourcentage = 100
text_temps = font.render(str(pourcentage), True, white)
list_rotato = [oxy_display for loop in range(pourcentage)]
surface.blit(text_temps, (pos_x+80, pos_y+40))
surface.blit(air_display,(pos_x-20,pos_y-50))
surface.blit(air_support_display,(pos_x-8,pos_y-3))
surface.blit(air_pourcent_display,(pos_x+135,pos_y+40 ))
longueur_barre = 0
for k in list_rotato:
surface.blit(k, (pos_x + longueur_barre, pos_y))
longueur_barre += 1.5
def score(points):
pos_x = 620
pos_y = 150
font = pygame.font.Font("freesansbold.ttf", 30)
pygame.draw.circle(surface,rose,(pos_x,pos_y+20),10,0)
pygame.draw.circle(surface,rose,(pos_x+30,pos_y+20),10,0)
text_score = font.render(str(points), True, white)
text = font.render("PTS", True, rose)
surface.blit(text_score, (pos_x+80, pos_y+30))
surface.blit(text, (pos_x+100, pos_y+60))
surface.blit(score_display,(pos_x-20,pos_y-30))
def depth(profondeur):
pos_x = 620
pos_y = 50
font = pygame.font.Font("freesansbold.ttf", 30)
pygame.draw.circle(surface, yellow, (pos_x, pos_y), 10, 0)
pygame.draw.circle(surface, yellow, (pos_x + 30, pos_y), 10, 0)
text_score = font.render(str(profondeur), True, white)
text = font.render("FT", True, yellow)
surface.blit(text_score, (pos_x + 80, pos_y))
surface.blit(text, (pos_x + 100, pos_y + 30))
surface.blit(depth_display,(600,0))
def lives(DeathCount):
pos_x = 560
pos_y = 400
font = pygame.font.Font("freesansbold.ttf", 30)
text_score = font.render(str(DeathCount), True, white)
text = font.render("x", True, red)
surface.blit(text_score, (pos_x + 180, pos_y+32))
surface.blit(text, (pos_x + 150, pos_y+30))
surface.blit(ange,(pos_x + 80, pos_y+5))
surface.blit(lives_display,(600,pos_y-25))
def levels():
pos_x=600
pos_y= 480
font = pygame.font.Font("freesansbold.ttf", 30)
text_level = font.render(str(level), True, white)
surface.blit(text_level, (pos_x+50 , pos_y+50))
surface.blit(level_display,(pos_x,pos_y))
def chrono(seconds):
time.sleep(1)
return (seconds + 1)
def intro():
pygame.mixer.music.load("Intro/intro_music.mp3") # je rapporte la musique
pygame.display.flip()
font = pygame.font.Font(None, 24)
clock = pygame.time.Clock()
seconds = 0
nextimg = 1
''' Chargement des images et choix de la premiere image'''
images = [
pygame.image.load("Intro/Start_screen1.png"),
pygame.image.load("Intro/Start_screen2.png"),
pygame.image.load("Intro/Start_screen3.png"),
pygame.image.load("Intro/Start_screen4.png"),
pygame.image.load("Intro/Start_screen5.png"),
pygame.image.load("Intro/Start_screen6.png"),
pygame.image.load("Intro/Start_screen7.png"),
pygame.image.load("Intro/Start_screen8.png")
]
pygame.mixer.music.play(0) # On lance la musique
running = True
play = False
while running:
seconds = chrono(seconds) # on lance le chrono
if seconds > 0 and seconds % 3 == 0: # tout les trois secondes on change d'images
nextimg += 1
if nextimg <= len(images):
choix_image = images[nextimg-1]
choix_image = pygame.transform.scale(choix_image, (width, height))
text_temps = font.render(str(seconds) + " seconds since start", 1,(255, 255, 255)) # petite indicateur de temps
surface.blit(choix_image, (0, 0))
surface.blit(text_temps, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == KEYDOWN:
if event.key == K_SPACE:
running = False
play = True
pygame.display.update()
clock.tick(60)
return play
def initialise():
global obstacles
x_cube = 0
hauteur = y+200
caps = Capsule_Air
for i in range(0,ligne-5):
for j in range(cologne):
if caps != 0:
square_type = randint(1,8)
else:
square_type = randint(1,7)
if square_type == 8:
caps -= 1
square = pygame.Rect(x_cube, hauteur, 50, 50)
if square_type == 5:
obstacles[i][j] = [square, square_type,0,0]
elif square_type == 7:
obstacles[i][j] = [square, square_type,0]
else:
obstacles[i][j] = [square, square_type]
cpt = 3
while cpt >= 3:
cpt = 0
for k in range(j - 1, j - 4, -1):
if k >= 0:
if obstacles[i][k] != None:
if (obstacles[i][k])[1] == (obstacles[i][j])[1]:
cpt += 1
for l in range(i - 1, i - 4, -1):
if l >= 0:
if obstacles[l][j] != None:
if (obstacles[l][j])[1] == (obstacles[i][j])[1]:
cpt += 1
if cpt >= 3:
square_type = randint(1, 7)
if square_type == 5:
obstacles[i][j] = [square, square_type, 0, 0]
elif square_type == 7:
obstacles[i][j] = [square, square_type, 0]
else:
obstacles[i][j] = [square, square_type]
x_cube += 50
x_cube = 0
hauteur += 50
hauteur += 400
for i in range(ligne-5 , ligne):
for j in range(cologne):
square = pygame.Rect(x_cube, hauteur, 50, 50)
obstacles[i][j] = [square , 9]
x_cube += 50
x_cube = 0
hauteur += 50
def draw():
global collision_vertical , x , y , obstacles
surface.fill(black)
pygame.draw.line(surface, white, (600, 0), (600, height))
pygame.draw.line(surface, rose, (600, 125), (width, 125))
pygame.draw.line(surface, rose, (600, 250), (width, 250))
pygame.draw.line(surface, rose, (600, 375), (width, 375))
pygame.draw.line(surface, rose, (600, 500), (width, 500))
for i in range(ligne):
for j in range(cologne):
if obstacles[i][j] != None:
if (obstacles[i][j])[1] == 1:
#pygame.draw.rect(surface, red, (obstacles[i][j])[0])
surface.blit( blocks[3] , (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 2:
#pygame.draw.rect(surface, blue, (obstacles[i][j])[0])
surface.blit( blocks[2] , (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 3:
#pygame.draw.rect(surface, yellow, (obstacles[i][j])[0])
surface.blit( blocks[0] , (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 4:
#pygame.draw.rect(surface, green, (obstacles[i][j])[0])
surface.blit( blocks[1] , (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 5:
surface.blit(blocks_fissure[ (obstacles[i][j])[2] ], (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 6:
#pygame.draw.rect(surface, white, (obstacles[i][j])[0])
surface.blit(blocks[4], (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 7:
#pygame.draw.rect(surface, crystal, (obstacles[i][j])[0])
surface.blit(blocks[5], (obstacles[i][j])[0])
elif (obstacles[i][j])[1] == 8:
surface.blit(capsule, (obstacles[i][j])[0])
else:
surface.blit(blocks[6], (obstacles[i][j])[0])
def move():
global walkCount , fallCount ,x, y , liste_blocks , compteur_drill , GameOver , Death , second_death , obstacles \
, death_depth
if walkCount + 1 >= 27:
walkCount = 0
if fallCount+1 == 6:
fallCount = 0
if Death == 2:
if second_death >= 100:
image_ange = ange
image_ange = pygame.transform.scale(image_ange, (55, 55))
surface.blit(image_ange, (x - 10, y - 10))
else:
image_death = dead_crash
image_death = pygame.transform.scale(image_death, (55, 55))
surface.blit(image_death, (x - 10, y - 10))
elif Death == 1:
if second_death >= 100:
image_ange = ange
image_ange = pygame.transform.scale(image_ange, (55, 55))
surface.blit(image_ange, (x - 10, y - 10))
else:
image_air = dead_air
image_air = pygame.transform.scale(image_air, (55, 55))
surface.blit(image_air, (x - 10, y - 10))
elif not collision_horizontal:
image_fall = pygame.transform.scale(fall[fallCount // 3], (55, 55))
surface.blit(image_fall, (x - 10, y - 10))
fallCount += 1
y += gravity
elif compteur_drill != 0:
if drill_right and not drill_left:
image_d_right = image_drill_right
image_d_right = pygame.transform.scale(image_d_right, (55, 55))
surface.blit(image_d_right, (x - 10, y - 10))
elif not drill_right and drill_left:
image_d_left = image_drill_left
image_d_left = pygame.transform.scale(image_d_left, (55, 55))
surface.blit(image_d_left, (x - 10, y - 10))
else:
image_d_down = image_drill_down
image_d_down = pygame.transform.scale(image_d_down, (55, 55))
surface.blit(image_d_down, (x - 10, y - 10))
compteur_drill -= 1
else:
if left == True:
image_left = walkLeft[walkCount//3]
image_left = pygame.transform.scale(image_left, (55, 55))
surface.blit(image_left , (x-10,y-10))
walkCount += 1
elif right == True:
image_right = walkRight[walkCount // 3]
image_right = pygame.transform.scale(image_right, (55, 55))
surface.blit(image_right , (x-10,y-10))
walkCount += 1
else:
image_centre = pygame.transform.scale(centre, (55, 55))
surface.blit(image_centre, (x - 10, y - 10))
for element in liste_blocks:
square = element[0]
compteur = element[1]
seconds_gravity = element[2]
if compteur == 50:
i,j = element[5] , element[3]
destruction_block(i,j)
liste_blocks.remove(element)
else:
if compteur == 0:
if seconds_gravity == 100:
square.x = element[3]*50
square.y += gravity
element[1] += gravity
else:
if seconds_gravity % 5 == 0:
if element[4] == -2:
element[4] = 2
else:
element[4] = -2
square.x += element[4]
else:
square.y += gravity
element[1] += gravity
i,j = element[5] , element[3]
if obstacles[i][j] != None:
if (obstacles[i][j])[1] != 8:
if (square.bottom-5 > driller.top and ( square.left-5 < driller.left < square.right-5 or
square.left+5 < driller.right < square.right+5) ):
GameOver = True
Death = 2
death_depth = [i,j]
def events():
global left , right , x , y , walkCount , collision_vertical_right , collision_vertical_left , drill_right , drill_left
keys = pygame.key.get_pressed()
if compteur_drill == 0:
if not GameOver:
if keys[pygame.K_LEFT] and x > 5:
if not collision_vertical_left:
x -= 5
left = True
right = False
drill_right = False
drill_left = False
elif keys[pygame.K_RIGHT] and x < 560:
if not collision_vertical_right:
x += 5
drill_right = False
drill_left = False
right = True
left = False
else:
right = False
left = False
drill_left = False
drill_right = False
walkCount = 0
if jump == True:
if not GameOver:
if keys[pygame.K_SPACE]:
y -= 55
def collisions_player():
global collision_vertical_right , collision_vertical_left , collision_horizontal , x,y , jump , obstacles ,\
drill_ticker , drill_right , drill_left , compteur_drill , pourcentage , points , profondeur , death_depth
keys = pygame.key.get_pressed()
liste = []
for i in range(ligne):
for j in range(cologne):
if obstacles[i][j] != None:
square = (obstacles[i][j])[0]
if driller.colliderect(square):
if y == square.y - 45:
collision_horizontal = True
liste.append((i,j))
profondeur = ( ligne*(level-1) ) + i
else:
jmp = False
if x == square.x + 45:
if (obstacles[i][j])[1] != 8:
collision_vertical_left = True
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] == 0:
(obstacles[i][j])[2] += 1
if not GameOver:
if keys[pygame.K_a] and drill_ticker == 0:
collisions_blocks(i, j)
drill_left = True
drill_right = False
compteur_drill = 20
drill_ticker = 20
if i != 0:
if (obstacles[i - 1][j]) != None:
if (obstacles[i - 1][j ])[1] != 8:
if (obstacles[i - 1][j])[0].bottom + 5 == driller.top:
jmp = True
if (obstacles[i - 1][j+1]) != None:
if (obstacles[i - 1][j+1])[1] != 8:
if (obstacles[i - 1][j+1])[0].bottom + 5 == driller.top:
jmp = True
if jmp == False:
jump = True
else:
jump = True
else:
obstacles[i][j] = None
points += 1
pourcentage += 20
if x == square.x - 35:
if (obstacles[i][j])[1] != 8:
collision_vertical_right = True
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] == 0:
(obstacles[i][j])[2] += 1
if not GameOver:
if keys[pygame.K_e] and drill_ticker == 0:
drill_ticker = 20
collisions_blocks(i, j)
drill_right = True
drill_left = False
compteur_drill = 20
if i != 0:
if (obstacles[i-1][j]) != None:
if (obstacles[i - 1][j])[1] != 8:
if (obstacles[i-1][j])[0].bottom+5 == driller.top:
jmp = True
if (obstacles[i-1][j-1]) != None:
if (obstacles[i - 1][j - 1])[1] != 8:
if (obstacles[i-1][j-1])[0].bottom+5 == driller.top:
jmp = True
if jmp == False:
jump = True
else:
jump = True
else:
obstacles[i][j] = None
points += 1
pourcentage += 20
for element in liste:
i = element[0]
j = element[1]
if obstacles[i][j] != None:
square = (obstacles[i][j])[0]
if len(liste) == 2:
if square.x+15 == x:
if Death == 1:
death_depth = [i,j]
x -= 5
if (obstacles[i][j])[1] != 8:
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] == 0:
(obstacles[i][j])[2] += 1
if not GameOver:
if keys[pygame.K_z]: # Right
if obstacles[i][j] != None:
collisions_blocks(i, j)
x -= 5
drill_ticker = 20
drill_right = True
drill_left = False
compteur_drill = 20
else:
obstacles[i][j] = None
points += 1
pourcentage += 20
elif square.x-5 == x:
if Death == 1:
death_depth = [i, j]
x += 5
if (obstacles[i][j])[1] != 8:
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] == 0:
(obstacles[i][j])[2] += 1
if not GameOver:
if keys[pygame.K_z]: # Left
if obstacles[i][j] != None:
collisions_blocks(i, j)
x += 5
drill_right = False
drill_right = True
drill_ticker = 20
compteur_drill = 20
else:
obstacles[i][j] = None
points += 1
pourcentage += 20
else:
if Death == 1:
death_depth = [i, j]
if (obstacles[i][j])[1] != 8:
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] == 0:
(obstacles[i][j])[2] += 1
if not GameOver:
if keys[pygame.K_z]: # Down
if obstacles[i][j] != None and drill_ticker == 0:
drill_ticker = 20
collisions_blocks(i, j)
drill_right = False
drill_right = False
compteur_drill = 20
else:
obstacles[i][j] = None
points += 1
pourcentage += 20
def gravity_blocks():
global obstacles , gravity , liste_blocks
liste = []
for i in range(1 , ligne):
for j in range(0,cologne):
if obstacles[i][j] == None and obstacles[i-1][j] != None:
liste.append( (i-1 , i , j) )
for element in liste:
i = element[1]
i_1 = element[0]
j = element[2]
j_sup = j+1
if (obstacles[i_1][j])[1] != 6:
continue_sup = False
while j_sup < cologne and i_1+1 < ligne:
if obstacles[i_1][j_sup] != None:
if (obstacles[i_1][j])[1] == (obstacles[i_1][j_sup])[1]:
if obstacles[i_1+1][j_sup] != None:
continue_sup = True
break
else:
break
else:
break
j_sup += 1
if continue_sup:
continue
j_inf = j-1
continue_inf = False
while j_inf < cologne and i_1 + 1 < ligne:
if obstacles[i_1][j_inf] != None:
if (obstacles[i_1][j])[1] == (obstacles[i_1][j_inf])[1]:
if obstacles[i_1 + 1][j_inf] != None:
continue_inf = True
break
else:
break
else:
break
j_inf -= 1
if continue_inf:
continue
obstacles[i][j] = obstacles[i_1][j]
obstacles[i_1][j] = None
liste_blocks.append( [ (obstacles[i][j])[0] , 0 , 0 , j , 2, i ] )
def collisions_blocks(i,j):
global obstacles , points , NextLevel
if (obstacles[i][j])[1] == 9:
NextLevel = True
elif (obstacles[i][j])[1] != 8:
liste = [ (i,j) ]
compteur = 1
while compteur != 0:
compteur = 0
for element in liste:
position_i = element[0]
position_j = element[1]
i_sup = position_i + 1
i_inf = position_i - 1
j_sup = position_j + 1
j_inf = position_j - 1
if i_sup < ligne and obstacles[i_sup][position_j] != None:
if (i_sup , position_j) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[i_sup][position_j])[1]:
liste.append((i_sup, position_j))
compteur += 1
if i_inf >= 0 and obstacles[i_inf][position_j] != None:
if (i_inf , position_j) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[i_inf][position_j])[1]:
liste.append((i_inf, position_j))
compteur += 1
if j_sup < cologne and obstacles[position_i][j_sup] != None:
if (position_i,j_sup) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[position_i][j_sup])[1]:
liste.append((position_i, j_sup))
compteur += 1
if j_inf >= 0 and obstacles[position_i][j_inf] != None:
if (position_i,j_inf) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[position_i][j_inf])[1]:
liste.append((position_i, j_inf))
compteur += 1
pop_block.play()
for element in liste:
i = element[0]
j = element[1]
if len(obstacles[i][j]) == 4:
if (obstacles[i][j])[2] < 5:
(obstacles[i][j])[2] += 1
else:
obstacles[i][j] = None
points += 1
def destruction_block(i,j):
global obstacles , merge_blocks , pourcentage , points
liste = [(i, j)]
compteur = 1
cpt_global = 1
while compteur != 0:
compteur = 0
for element in liste:
position_i = element[0]
position_j = element[1]
i_sup = position_i + 1
i_inf = position_i - 1
j_sup = position_j + 1
j_inf = position_j - 1
if obstacles[position_i][position_j] != None:
if i_sup < ligne and obstacles[i_sup][position_j] != None:
if (i_sup, position_j) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[i_sup][position_j])[1]:
liste.append((i_sup, position_j))
compteur += 1
cpt_global += 1
if i_inf >= 0 and obstacles[i_inf][position_j] != None:
if (i_inf, position_j) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[i_inf][position_j])[1]:
liste.append((i_inf, position_j))
compteur += 1
cpt_global += 1
if j_sup < cologne and obstacles[position_i][j_sup] != None:
if (position_i, j_sup) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[position_i][j_sup])[1]:
liste.append((position_i, j_sup))
compteur += 1
cpt_global += 1
if j_inf >= 0 and obstacles[position_i][j_inf] != None:
if (position_i, j_inf) not in liste:
if (obstacles[position_i][position_j])[1] == (obstacles[position_i][j_inf])[1]:
liste.append((position_i, j_inf))
compteur += 1
cpt_global += 1
if cpt_global >= 4:
pop_block.play()
for element in liste:
i1 = element[0]
j1 = element[1]
points += 1
if len(obstacles[i1][j1]) == 4:
surface.blit(blocks_fissure[5], (obstacles[i1][j1])[0])
obstacles[i1][j1] = None
else:
obstacles[i1][j1] = None
def save():
fopen = open("Save/sauvegarde.txt","a")
fopen.close()
fichier = open("Save/sauvegarde.txt","r")
ecraser = False
list_name = []
lines = fichier.readlines()
if len(lines) != 0:
for user in lines:
for i in range(len(user)):
if user[i] == ':':
list_name.append([user[0:i-1] , int(user[i+1:])])
for element in list_name:
if username == element[0]:
ecraser = True
if points > element[1]:
list_name.remove(element)
list_name.append([username,points])
fic = open("Save/sauvegarde.txt","w")
for element in list_name:
fic.write(element[0]+' : '+str(element[1])+'\n')
fic.close()
fichier.close()
if not ecraser:
fichier = open('Save/sauvegarde.txt' , 'a')
fic = open('Save/sauvegarde.txt' , 'r')
ligne = fic.readline()
if len(ligne) != 0:
fichier.write("\n"+username+" : "+str(points))
else:
fichier.write(username+" : "+str(points))
fic.close()
fichier.close()
def load():
global game_over
fichier = open("Save/sauvegarde.txt" , "r")
liste_trie = []
lignes = fichier.readlines()
for ligne in lignes:
for i in range(len(ligne)):
if ligne[i] == ':':
liste_trie.append( [ligne[0:i],int(ligne[i+1:])] )
cpt = 1
while cpt != 0:
cpt = 0
for i in range(len(liste_trie)-1):
element_1 = liste_trie[i]
element_2 = liste_trie[i+1]
if element_1[1] < element_2[1]:
temp = element_1
liste_trie[i] = element_2
liste_trie[i+1] = temp
cpt += 1
fichier.close()
for event in pygame.event.get():
if event.type == QUIT:
game_over = True
pos_x , pos_y = 250,150
surface.fill(black)
img = pygame.image.load("Screens/HighScore.png")
surface.blit(img , (250,-50))
font = pygame.font.Font("Screens/monospace.ttf",30)
texte , texte1 = font.render("Name" , True , green) , font.render("Score" , True , green)
rectangle , rectangle1 = texte.get_rect() , texte1.get_rect()
rectangle.topleft = (pos_x , pos_y)
rectangle1.topleft = (pos_x+250,pos_y)
surface.blit(texte , rectangle)
surface.blit(texte1,rectangle1)
pos_y +=50
for element in liste_trie:
texte = font.render(element[0] ,True,white)
texte1 = font.render(str(element[1]),True,white)
rectangle = texte.get_rect()
rectangle1 = texte1.get_rect()
rectangle.topleft = (pos_x,pos_y)
rectangle1.topleft = (pos_x+250,pos_y)
surface.blit(texte , rectangle)
surface.blit(texte1,rectangle1)
pos_y += 50
def pause():
global Paused , game_over
if Paused == 1:
paused = True
font = pygame.font.Font("freesansbold.ttf",40)
texte = font.render("Paused",True,white)
rectangle = texte.get_rect()
rectangle.topleft = (200,50)
while paused:
pygame.draw.rect(surface,black,rectangle,0)
surface.blit(texte,rectangle)
for event in pygame.event.get():
if event.type == QUIT:
paused = False
game_over = True
if event.type == KEYDOWN:
if event.key == K_p:
paused = False
Paused += 1
if Paused == 2:
Paused = 0
pygame.display.update()
def lose(second):
global y , GameOver , pourcentage , Death , CountDeath
if second == 80:
if len(death_depth) != 0:
i,j = death_depth[0],death_depth[1]
for k in range(i, -1, -1):
obstacles[k][j] = None
if second >= 100:
y -= gravity
if second == 200:
GameOver = False
pourcentage = 100
Death = 0
CountDeath -= 1
def gameover(death_screen):
global game_over
for event in pygame.event.get():
if event.type == QUIT:
game_over = True
if death_screen == 0:
pygame.mixer.music.stop()
pygame.mixer.music.load("Music/GameOver.wav")
pygame.mixer.music.play(0)
img = pygame.image.load("Screens/GameOver.jpg")
img = pygame.transform.scale(img, (width, height))
surface.blit(img , (0,0))
def win(win_screen):
global game_over
for event in pygame.event.get():
if event.type == QUIT:
game_over = True
if win_screen == 0:
pygame.mixer.music.stop()
pygame.mixer.music.load("Music/WinTheme.wav")
pygame.mixer.music.play(0)
surface.fill(black)
img = pygame.image.load("Screens/Win.jpg")
img = pygame.transform.scale(img, (width, height-200))
surface.blit(img , (0,200))
message = "Congratulations !!"
message1 = "Game Completed"
font = pygame.font.Font("freesansbold.ttf",50)
texte = font.render(message,True,white)
texte1 = font.render(message1,True,white)
rectangle = texte.get_rect()
rectangle.topleft = (200,50)
rectangle1 = texte1.get_rect()
rectangle1.topleft = (200,120)
surface.blit(texte , rectangle)
surface.blit(texte1 , rectangle1)
def main():
global game_over , collision_horizontal , collision_vertical_left , collision_vertical_right , jump , driller,\
x, y , obstacles , drill_ticker , liste_blocks , merge_blocks , pourcentage , points , second_death , CountDeath , Capsule_Air , NextLevel , level , Paused
drill_ticker , second = 0 , 0
liste_blocks = []
merge_blocks = []
second_death = 0
driller , level = 1 , 1
pygame.mixer.music.load("Music/main.mp3")
pygame.mixer.music.play(-1)
NextLevel = False
cpt_save , Paused= 0 , 0
death_screen , win_screen = 0,0
while not game_over:
clock.tick(60)
if CountDeath != 0 and level != 11:
collision_horizontal = False
collision_vertical_left = False
collision_vertical_right = False
jump = False
if NextLevel:
initialise()
level += 1
pourcentage = 100
points += 10
Capsule_Air -= 1
NextLevel = False
driller = pygame.Rect(x, y, 40, 50)
for event in pygame.event.get():
if event.type == QUIT:
game_over = True
if event.type == KEYDOWN:
if event.key == K_p:
Paused += 1
if drill_ticker > 0:
drill_ticker -= 1
if not GameOver:
second_death = 0
second += 1
if second == 60:
second = 0
pourcentage -= 1
if y >= 300:
y -= 50
for i in range(ligne):
for j in range(cologne):
if obstacles[i][j] != None:
(obstacles[i][j])[0].y -= 50
if y <= 100:
y += 50
for i in range(ligne):
for j in range(cologne):
if obstacles[i][j] != None:
(obstacles[i][j])[0].y += 50
else:
second_death += 1
lose(second_death)
for element in liste_blocks:
element[2] += 1
for i in range(ligne):
for j in range(cologne):
if obstacles[i][j] != None:
if len(obstacles[i][j]) == 4:
if (obstacles[i][j])[2] == 5:
(obstacles[i][j])[3] += 1
if (obstacles[i][j])[3] == 20:
obstacles[i][j] = None
pourcentage -= 20
points += 1
if obstacles[i][j] != None:
if (obstacles[i][j])[1] == 7:
if (obstacles[i][j])[2] >= 1:
(obstacles[i][j])[2] += 1
if (obstacles[i][j])[2] == 500:
obstacles[i][j] = None
pop_block.play()
draw()
air()
collisions_player()
gravity_blocks()
move()
events()
score(points)
depth(profondeur)
lives(CountDeath)
levels()
pause()
else:
if cpt_save == 0:
save()
cpt_save += 1
if CountDeath == 0 and death_screen < 100:
gameover(death_screen)
death_screen += 1
elif level == 11 and win_screen < 100:
win(win_screen)
win_screen += 1
else:
load()
pygame.display.update()
pygame.quit()
# Lancemant :
launch , username = saisie()
if launch == True:
play = intro()
if play == True:
pygame.mixer.music.stop()
initialise()
main()
else:
pygame.quit()
else:
pygame.quit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.